mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-19 11:43:40 +00:00
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "The main changes in this cycle were: Kernel side changes: - A couple of x86/cpu cleanups and changes were grandfathered in due to patch dependencies. These clean up the set of CPU model/family matching macros with a consistent namespace and C99 initializer style. - A bunch of updates to various low level PMU drivers: * AMD Family 19h L3 uncore PMU * Intel Tiger Lake uncore support * misc fixes to LBR TOS sampling - optprobe fixes - perf/cgroup: optimize cgroup event sched-in processing - misc cleanups and fixes Tooling side changes are to: - perf {annotate,expr,record,report,stat,test} - perl scripting - libapi, libperf and libtraceevent - vendor events on Intel and S390, ARM cs-etm - Intel PT updates - Documentation changes and updates to core facilities - misc cleanups, fixes and other enhancements" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (89 commits) cpufreq/intel_pstate: Fix wrong macro conversion x86/cpu: Cleanup the now unused CPU match macros hwrng: via_rng: Convert to new X86 CPU match macros crypto: Convert to new CPU match macros ASoC: Intel: Convert to new X86 CPU match macros powercap/intel_rapl: Convert to new X86 CPU match macros PCI: intel-mid: Convert to new X86 CPU match macros mmc: sdhci-acpi: Convert to new X86 CPU match macros intel_idle: Convert to new X86 CPU match macros extcon: axp288: Convert to new X86 CPU match macros thermal: Convert to new X86 CPU match macros hwmon: Convert to new X86 CPU match macros platform/x86: Convert to new CPU match macros EDAC: Convert to new X86 CPU match macros cpufreq: Convert to new X86 CPU match macros ACPI: Convert to new X86 CPU match macros x86/platform: Convert to new CPU match macros x86/kernel: Convert to new CPU match macros x86/kvm: Convert to new CPU match macros x86/perf/events: Convert to new CPU match macros ...
This commit is contained in:
commit
9b82f05f86
@ -518,6 +518,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
|
||||
}
|
||||
}
|
||||
cpuhw->bhrb_stack.nr = u_index;
|
||||
cpuhw->bhrb_stack.hw_idx = -1ULL;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1875,7 +1875,6 @@ config X86_SMAP
|
||||
|
||||
config X86_UMIP
|
||||
def_bool y
|
||||
depends on CPU_SUP_INTEL || CPU_SUP_AMD
|
||||
prompt "User Mode Instruction Prevention" if EXPERT
|
||||
---help---
|
||||
User Mode Instruction Prevention (UMIP) is a security feature in
|
||||
|
@ -1064,7 +1064,7 @@ static struct aead_alg aesni_aeads[0];
|
||||
static struct simd_aead_alg *aesni_simd_aeads[ARRAY_SIZE(aesni_aeads)];
|
||||
|
||||
static const struct x86_cpu_id aesni_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_AES),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_AES, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
|
||||
|
@ -170,7 +170,7 @@ static struct shash_alg alg = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id crc32pclmul_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, crc32pclmul_cpu_id);
|
||||
|
@ -221,7 +221,7 @@ static struct shash_alg alg = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id crc32c_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_XMM4_2),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_XMM4_2, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, crc32c_cpu_id);
|
||||
|
@ -114,7 +114,7 @@ static struct shash_alg alg = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id crct10dif_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
|
||||
|
@ -313,7 +313,7 @@ static struct ahash_alg ghash_async_alg = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id pcmul_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ), /* Pickle-Mickle-Duck */
|
||||
X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id);
|
||||
|
@ -259,7 +259,7 @@ static int power_cpu_init(unsigned int cpu)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id cpu_match[] = {
|
||||
{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -180,6 +180,31 @@ static void amd_uncore_del(struct perf_event *event, int flags)
|
||||
hwc->idx = -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert logical CPU number to L3 PMC Config ThreadMask format
|
||||
*/
|
||||
static u64 l3_thread_slice_mask(int cpu)
|
||||
{
|
||||
u64 thread_mask, core = topology_core_id(cpu);
|
||||
unsigned int shift, thread = 0;
|
||||
|
||||
if (topology_smt_supported() && !topology_is_primary_thread(cpu))
|
||||
thread = 1;
|
||||
|
||||
if (boot_cpu_data.x86 <= 0x18) {
|
||||
shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread;
|
||||
thread_mask = BIT_ULL(shift);
|
||||
|
||||
return AMD64_L3_SLICE_MASK | thread_mask;
|
||||
}
|
||||
|
||||
core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK;
|
||||
shift = AMD64_L3_THREAD_SHIFT + thread;
|
||||
thread_mask = BIT_ULL(shift);
|
||||
|
||||
return AMD64_L3_EN_ALL_SLICES | core | thread_mask;
|
||||
}
|
||||
|
||||
static int amd_uncore_event_init(struct perf_event *event)
|
||||
{
|
||||
struct amd_uncore *uncore;
|
||||
@ -203,18 +228,11 @@ static int amd_uncore_event_init(struct perf_event *event)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* SliceMask and ThreadMask need to be set for certain L3 events in
|
||||
* Family 17h. For other events, the two fields do not affect the count.
|
||||
* SliceMask and ThreadMask need to be set for certain L3 events.
|
||||
* For other events, the two fields do not affect the count.
|
||||
*/
|
||||
if (l3_mask && is_llc_event(event)) {
|
||||
int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
thread += cpu_data(event->cpu).apicid & 1;
|
||||
|
||||
hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
|
||||
AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
|
||||
}
|
||||
if (l3_mask && is_llc_event(event))
|
||||
hwc->config |= l3_thread_slice_mask(event->cpu);
|
||||
|
||||
uncore = event_to_amd_uncore(event);
|
||||
if (!uncore)
|
||||
@ -520,9 +538,9 @@ static int __init amd_uncore_init(void)
|
||||
if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
|
||||
return -ENODEV;
|
||||
|
||||
if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) {
|
||||
if (boot_cpu_data.x86 >= 0x17) {
|
||||
/*
|
||||
* For F17h or F18h, the Northbridge counters are
|
||||
* For F17h and above, the Northbridge counters are
|
||||
* repurposed as Data Fabric counters. Also, L3
|
||||
* counters are supported too. The PMUs are exported
|
||||
* based on family as either L2 or L3 and NB or DF.
|
||||
|
@ -1945,6 +1945,14 @@ static __initconst const u64 knl_hw_cache_extra_regs
|
||||
* intel_bts events don't coexist with intel PMU's BTS events because of
|
||||
* x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
|
||||
* disabled around intel PMU's event batching etc, only inside the PMI handler.
|
||||
*
|
||||
* Avoid PEBS_ENABLE MSR access in PMIs.
|
||||
* The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
|
||||
* It doesn't matter if the PEBS is enabled or not.
|
||||
* Usually, the PEBS status are not changed in PMIs. It's unnecessary to
|
||||
* access PEBS_ENABLE MSR in disable_all()/enable_all().
|
||||
* However, there are some cases which may change PEBS status, e.g. PMI
|
||||
* throttle. The PEBS_ENABLE should be updated where the status changes.
|
||||
*/
|
||||
static void __intel_pmu_disable_all(void)
|
||||
{
|
||||
@ -1954,13 +1962,12 @@ static void __intel_pmu_disable_all(void)
|
||||
|
||||
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
|
||||
intel_pmu_disable_bts();
|
||||
|
||||
intel_pmu_pebs_disable_all();
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_all(void)
|
||||
{
|
||||
__intel_pmu_disable_all();
|
||||
intel_pmu_pebs_disable_all();
|
||||
intel_pmu_lbr_disable_all();
|
||||
}
|
||||
|
||||
@ -1968,7 +1975,6 @@ static void __intel_pmu_enable_all(int added, bool pmi)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
|
||||
|
||||
intel_pmu_pebs_enable_all();
|
||||
intel_pmu_lbr_enable_all(pmi);
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
|
||||
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
|
||||
@ -1986,6 +1992,7 @@ static void __intel_pmu_enable_all(int added, bool pmi)
|
||||
|
||||
static void intel_pmu_enable_all(int added)
|
||||
{
|
||||
intel_pmu_pebs_enable_all();
|
||||
__intel_pmu_enable_all(added, false);
|
||||
}
|
||||
|
||||
@ -2374,9 +2381,21 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
* PEBS overflow sets bit 62 in the global status register
|
||||
*/
|
||||
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
|
||||
u64 pebs_enabled = cpuc->pebs_enabled;
|
||||
|
||||
handled++;
|
||||
x86_pmu.drain_pebs(regs);
|
||||
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
|
||||
|
||||
/*
|
||||
* PMI throttle may be triggered, which stops the PEBS event.
|
||||
* Although cpuc->pebs_enabled is updated accordingly, the
|
||||
* MSR_IA32_PEBS_ENABLE is not updated. Because the
|
||||
* cpuc->enabled has been forced to 0 in PMI.
|
||||
* Update the MSR if pebs_enabled is changed.
|
||||
*/
|
||||
if (pebs_enabled != cpuc->pebs_enabled)
|
||||
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -594,63 +594,60 @@ static const struct cstate_model glm_cstates __initconst = {
|
||||
};
|
||||
|
||||
|
||||
#define X86_CSTATES_MODEL(model, states) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
|
||||
|
||||
static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_G, snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_L, hswult_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hswult_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT, slm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT_D, slm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &slm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &slm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &slm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_D, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_G, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_L, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &snb_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE, hswult_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE_L, hswult_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE, hswult_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &hswult_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &hswult_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &hswult_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &hswult_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnl_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &glm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &glm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &glm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &glm_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE_L, icl_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE, icl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
@ -585,6 +585,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
|
||||
cpuc->lbr_entries[i].reserved = 0;
|
||||
}
|
||||
cpuc->lbr_stack.nr = i;
|
||||
cpuc->lbr_stack.hw_idx = tos;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -680,6 +681,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
|
||||
out++;
|
||||
}
|
||||
cpuc->lbr_stack.nr = out;
|
||||
cpuc->lbr_stack.hw_idx = tos;
|
||||
}
|
||||
|
||||
void intel_pmu_lbr_read(void)
|
||||
@ -1120,6 +1122,13 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
|
||||
int i;
|
||||
|
||||
cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
|
||||
|
||||
/* Cannot get TOS for large PEBS */
|
||||
if (cpuc->n_pebs == cpuc->n_large_pebs)
|
||||
cpuc->lbr_stack.hw_idx = -1ULL;
|
||||
else
|
||||
cpuc->lbr_stack.hw_idx = intel_pmu_lbr_tos();
|
||||
|
||||
for (i = 0; i < x86_pmu.lbr_nr; i++) {
|
||||
u64 info = lbr->lbr[i].info;
|
||||
struct perf_branch_entry *e = &cpuc->lbr_entries[i];
|
||||
|
@ -668,9 +668,6 @@ static int __init init_rapl_pmus(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define X86_RAPL_MODEL_MATCH(model, init) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
||||
|
||||
static struct rapl_model model_snb = {
|
||||
.events = BIT(PERF_RAPL_PP0) |
|
||||
BIT(PERF_RAPL_PKG) |
|
||||
@ -716,36 +713,35 @@ static struct rapl_model model_skl = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, model_snb),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, model_snbep),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, model_snb),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, model_snbep),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_L, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_G, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, model_knl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, model_knl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, model_hsx),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_L, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_D, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, model_hsw),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_COMETLAKE_L, model_skl),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_COMETLAKE, model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &model_snbep),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &model_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &model_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &model_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &model_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &model_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &model_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &model_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
|
||||
|
||||
static int __init rapl_pmu_init(void)
|
||||
|
@ -1392,10 +1392,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
#define X86_UNCORE_MODEL_MATCH(model, init) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
|
||||
|
||||
struct intel_uncore_init_fun {
|
||||
void (*cpu_init)(void);
|
||||
int (*pci_init)(void);
|
||||
@ -1470,6 +1466,16 @@ static const struct intel_uncore_init_fun icl_uncore_init __initconst = {
|
||||
.pci_init = skl_uncore_pci_init,
|
||||
};
|
||||
|
||||
static const struct intel_uncore_init_fun tgl_uncore_init __initconst = {
|
||||
.cpu_init = icl_uncore_cpu_init,
|
||||
.mmio_init = tgl_uncore_mmio_init,
|
||||
};
|
||||
|
||||
static const struct intel_uncore_init_fun tgl_l_uncore_init __initconst = {
|
||||
.cpu_init = icl_uncore_cpu_init,
|
||||
.mmio_init = tgl_l_uncore_mmio_init,
|
||||
};
|
||||
|
||||
static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
|
||||
.cpu_init = snr_uncore_cpu_init,
|
||||
.pci_init = snr_uncore_pci_init,
|
||||
@ -1477,38 +1483,39 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL, hsw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_L, hsw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_G, hsw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL, bdw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_G, bdw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_D, bdx_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE, skl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_L, skl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, skx_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE_L, skl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_KABYLAKE, skl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_L, icl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE_NNPI, icl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ICELAKE, icl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_ATOM_TREMONT_D, snr_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &nhm_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &nhm_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &snb_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &ivb_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &hsw_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &hsw_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &hsw_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &bdw_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &bdw_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &snbep_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &nhmex_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &nhmex_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ivbep_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &hswep_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &bdx_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &bdx_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &knl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &knl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &skl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &skl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &skx_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &skl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &skl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
|
||||
{},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match);
|
||||
|
||||
static int __init intel_uncore_init(void)
|
||||
|
@ -154,6 +154,7 @@ struct freerunning_counters {
|
||||
unsigned int box_offset;
|
||||
unsigned int num_counters;
|
||||
unsigned int bits;
|
||||
unsigned *box_offsets;
|
||||
};
|
||||
|
||||
struct pci2phy_map {
|
||||
@ -310,7 +311,9 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
|
||||
|
||||
return pmu->type->freerunning[type].counter_base +
|
||||
pmu->type->freerunning[type].counter_offset * idx +
|
||||
pmu->type->freerunning[type].box_offset * pmu->pmu_idx;
|
||||
(pmu->type->freerunning[type].box_offsets ?
|
||||
pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
|
||||
pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
|
||||
}
|
||||
|
||||
static inline
|
||||
@ -527,6 +530,8 @@ void snb_uncore_cpu_init(void);
|
||||
void nhm_uncore_cpu_init(void);
|
||||
void skl_uncore_cpu_init(void);
|
||||
void icl_uncore_cpu_init(void);
|
||||
void tgl_uncore_mmio_init(void);
|
||||
void tgl_l_uncore_mmio_init(void);
|
||||
int snb_pci2phy_map_init(int devid);
|
||||
|
||||
/* uncore_snbep.c */
|
||||
|
@ -44,6 +44,11 @@
|
||||
#define PCI_DEVICE_ID_INTEL_WHL_UD_IMC 0x3e35
|
||||
#define PCI_DEVICE_ID_INTEL_ICL_U_IMC 0x8a02
|
||||
#define PCI_DEVICE_ID_INTEL_ICL_U2_IMC 0x8a12
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_U1_IMC 0x9a02
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_U2_IMC 0x9a04
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_U3_IMC 0x9a12
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_U4_IMC 0x9a14
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36
|
||||
|
||||
|
||||
/* SNB event control */
|
||||
@ -1002,3 +1007,157 @@ void nhm_uncore_cpu_init(void)
|
||||
}
|
||||
|
||||
/* end of Nehalem uncore support */
|
||||
|
||||
/* Tiger Lake MMIO uncore support */
|
||||
|
||||
static const struct pci_device_id tgl_uncore_pci_ids[] = {
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U1_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U2_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U3_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_U4_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
enum perf_tgl_uncore_imc_freerunning_types {
|
||||
TGL_MMIO_UNCORE_IMC_DATA_TOTAL,
|
||||
TGL_MMIO_UNCORE_IMC_DATA_READ,
|
||||
TGL_MMIO_UNCORE_IMC_DATA_WRITE,
|
||||
TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
|
||||
};
|
||||
|
||||
static struct freerunning_counters tgl_l_uncore_imc_freerunning[] = {
|
||||
[TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x5040, 0x0, 0x0, 1, 64 },
|
||||
[TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0x5058, 0x0, 0x0, 1, 64 },
|
||||
[TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0x50A0, 0x0, 0x0, 1, 64 },
|
||||
};
|
||||
|
||||
static struct freerunning_counters tgl_uncore_imc_freerunning[] = {
|
||||
[TGL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0xd840, 0x0, 0x0, 1, 64 },
|
||||
[TGL_MMIO_UNCORE_IMC_DATA_READ] = { 0xd858, 0x0, 0x0, 1, 64 },
|
||||
[TGL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xd8A0, 0x0, 0x0, 1, 64 },
|
||||
};
|
||||
|
||||
static struct uncore_event_desc tgl_uncore_imc_events[] = {
|
||||
INTEL_UNCORE_EVENT_DESC(data_total, "event=0xff,umask=0x10"),
|
||||
INTEL_UNCORE_EVENT_DESC(data_total.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(data_total.unit, "MiB"),
|
||||
|
||||
INTEL_UNCORE_EVENT_DESC(data_read, "event=0xff,umask=0x20"),
|
||||
INTEL_UNCORE_EVENT_DESC(data_read.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(data_read.unit, "MiB"),
|
||||
|
||||
INTEL_UNCORE_EVENT_DESC(data_write, "event=0xff,umask=0x30"),
|
||||
INTEL_UNCORE_EVENT_DESC(data_write.scale, "6.103515625e-5"),
|
||||
INTEL_UNCORE_EVENT_DESC(data_write.unit, "MiB"),
|
||||
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
||||
static struct pci_dev *tgl_uncore_get_mc_dev(void)
|
||||
{
|
||||
const struct pci_device_id *ids = tgl_uncore_pci_ids;
|
||||
struct pci_dev *mc_dev = NULL;
|
||||
|
||||
while (ids && ids->vendor) {
|
||||
mc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, ids->device, NULL);
|
||||
if (mc_dev)
|
||||
return mc_dev;
|
||||
ids++;
|
||||
}
|
||||
|
||||
return mc_dev;
|
||||
}
|
||||
|
||||
#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
|
||||
|
||||
static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
struct pci_dev *pdev = tgl_uncore_get_mc_dev();
|
||||
struct intel_uncore_pmu *pmu = box->pmu;
|
||||
resource_size_t addr;
|
||||
u32 mch_bar;
|
||||
|
||||
if (!pdev) {
|
||||
pr_warn("perf uncore: Cannot find matched IMC device.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET, &mch_bar);
|
||||
/* MCHBAR is disabled */
|
||||
if (!(mch_bar & BIT(0))) {
|
||||
pr_warn("perf uncore: MCHBAR is disabled. Failed to map IMC free-running counters.\n");
|
||||
return;
|
||||
}
|
||||
mch_bar &= ~BIT(0);
|
||||
addr = (resource_size_t)(mch_bar + TGL_UNCORE_MMIO_IMC_MEM_OFFSET * pmu->pmu_idx);
|
||||
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
pci_read_config_dword(pdev, SNB_UNCORE_PCI_IMC_BAR_OFFSET + 4, &mch_bar);
|
||||
addr |= ((resource_size_t)mch_bar << 32);
|
||||
#endif
|
||||
|
||||
box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE);
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
|
||||
.init_box = tgl_uncore_imc_freerunning_init_box,
|
||||
.exit_box = uncore_mmio_exit_box,
|
||||
.read_counter = uncore_mmio_read_counter,
|
||||
.hw_config = uncore_freerunning_hw_config,
|
||||
};
|
||||
|
||||
static struct attribute *tgl_uncore_imc_formats_attr[] = {
|
||||
&format_attr_event.attr,
|
||||
&format_attr_umask.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group tgl_uncore_imc_format_group = {
|
||||
.name = "format",
|
||||
.attrs = tgl_uncore_imc_formats_attr,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type tgl_uncore_imc_free_running = {
|
||||
.name = "imc_free_running",
|
||||
.num_counters = 3,
|
||||
.num_boxes = 2,
|
||||
.num_freerunning_types = TGL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
|
||||
.freerunning = tgl_uncore_imc_freerunning,
|
||||
.ops = &tgl_uncore_imc_freerunning_ops,
|
||||
.event_descs = tgl_uncore_imc_events,
|
||||
.format_group = &tgl_uncore_imc_format_group,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type *tgl_mmio_uncores[] = {
|
||||
&tgl_uncore_imc_free_running,
|
||||
NULL
|
||||
};
|
||||
|
||||
void tgl_l_uncore_mmio_init(void)
|
||||
{
|
||||
tgl_uncore_imc_free_running.freerunning = tgl_l_uncore_imc_freerunning;
|
||||
uncore_mmio_uncores = tgl_mmio_uncores;
|
||||
}
|
||||
|
||||
void tgl_uncore_mmio_init(void)
|
||||
{
|
||||
uncore_mmio_uncores = tgl_mmio_uncores;
|
||||
}
|
||||
|
||||
/* end of Tiger Lake MMIO uncore support */
|
||||
|
@ -4380,10 +4380,10 @@ static struct pci_dev *snr_uncore_get_mc_dev(int id)
|
||||
return mc_dev;
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
|
||||
static void __snr_uncore_mmio_init_box(struct intel_uncore_box *box,
|
||||
unsigned int box_ctl, int mem_offset)
|
||||
{
|
||||
struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid);
|
||||
unsigned int box_ctl = uncore_mmio_box_ctl(box);
|
||||
resource_size_t addr;
|
||||
u32 pci_dword;
|
||||
|
||||
@ -4393,7 +4393,7 @@ static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
|
||||
pci_read_config_dword(pdev, SNR_IMC_MMIO_BASE_OFFSET, &pci_dword);
|
||||
addr = (pci_dword & SNR_IMC_MMIO_BASE_MASK) << 23;
|
||||
|
||||
pci_read_config_dword(pdev, SNR_IMC_MMIO_MEM0_OFFSET, &pci_dword);
|
||||
pci_read_config_dword(pdev, mem_offset, &pci_dword);
|
||||
addr |= (pci_dword & SNR_IMC_MMIO_MEM0_MASK) << 12;
|
||||
|
||||
addr += box_ctl;
|
||||
@ -4405,6 +4405,12 @@ static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
|
||||
writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr);
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
__snr_uncore_mmio_init_box(box, uncore_mmio_box_ctl(box),
|
||||
SNR_IMC_MMIO_MEM0_OFFSET);
|
||||
}
|
||||
|
||||
static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box)
|
||||
{
|
||||
u32 config;
|
||||
|
@ -12,7 +12,6 @@ struct amd_nb_bus_dev_range {
|
||||
u8 dev_limit;
|
||||
};
|
||||
|
||||
extern const struct pci_device_id amd_nb_misc_ids[];
|
||||
extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
|
||||
|
||||
extern bool early_is_amd_nb(u32 value);
|
||||
|
@ -5,9 +5,139 @@
|
||||
/*
|
||||
* Declare drivers belonging to specific x86 CPUs
|
||||
* Similar in spirit to pci_device_id and related PCI functions
|
||||
*
|
||||
* The wildcard initializers are in mod_devicetable.h because
|
||||
* file2alias needs them. Sigh.
|
||||
*/
|
||||
|
||||
#include <linux/mod_devicetable.h>
|
||||
/* Get the INTEL_FAM* model defines */
|
||||
#include <asm/intel-family.h>
|
||||
/* And the X86_VENDOR_* ones */
|
||||
#include <asm/processor.h>
|
||||
|
||||
/* Centaur FAM6 models */
|
||||
#define X86_CENTAUR_FAM6_C7_A 0xa
|
||||
#define X86_CENTAUR_FAM6_C7_D 0xd
|
||||
#define X86_CENTAUR_FAM6_NANO 0xf
|
||||
|
||||
/**
|
||||
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE - Base macro for CPU matching
|
||||
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
||||
* The name is expanded to X86_VENDOR_@_vendor
|
||||
* @_family: The family number or X86_FAMILY_ANY
|
||||
* @_model: The model number, model constant or X86_MODEL_ANY
|
||||
* @_feature: A X86_FEATURE bit or X86_FEATURE_ANY
|
||||
* @_data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* Use only if you need all selectors. Otherwise use one of the shorter
|
||||
* macros of the X86_MATCH_* family. If there is no matching shorthand
|
||||
* macro, consider to add one. If you really need to wrap one of the macros
|
||||
* into another macro at the usage site for good reasons, then please
|
||||
* start this local macro with X86_MATCH to allow easy grepping.
|
||||
*/
|
||||
#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(_vendor, _family, _model, \
|
||||
_feature, _data) { \
|
||||
.vendor = X86_VENDOR_##_vendor, \
|
||||
.family = _family, \
|
||||
.model = _model, \
|
||||
.feature = _feature, \
|
||||
.driver_data = (unsigned long) _data \
|
||||
}
|
||||
|
||||
/**
|
||||
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
|
||||
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
||||
* The name is expanded to X86_VENDOR_@vendor
|
||||
* @family: The family number or X86_FAMILY_ANY
|
||||
* @feature: A X86_FEATURE bit
|
||||
* @data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
||||
* set to wildcards.
|
||||
*/
|
||||
#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
|
||||
X86_MODEL_ANY, feature, data)
|
||||
|
||||
/**
|
||||
* X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
|
||||
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
||||
* The name is expanded to X86_VENDOR_@vendor
|
||||
* @feature: A X86_FEATURE bit
|
||||
* @data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
||||
* set to wildcards.
|
||||
*/
|
||||
#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
|
||||
X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
|
||||
|
||||
/**
|
||||
* X86_MATCH_FEATURE - Macro for matching a CPU feature
|
||||
* @feature: A X86_FEATURE bit
|
||||
* @data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
||||
* set to wildcards.
|
||||
*/
|
||||
#define X86_MATCH_FEATURE(feature, data) \
|
||||
X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
|
||||
|
||||
/**
|
||||
* X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
|
||||
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
||||
* The name is expanded to X86_VENDOR_@vendor
|
||||
* @family: The family number or X86_FAMILY_ANY
|
||||
* @model: The model number, model constant or X86_MODEL_ANY
|
||||
* @data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
||||
* set to wildcards.
|
||||
*/
|
||||
#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
|
||||
X86_FEATURE_ANY, data)
|
||||
|
||||
/**
|
||||
* X86_MATCH_VENDOR_FAM - Match vendor and family
|
||||
* @vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
||||
* The name is expanded to X86_VENDOR_@vendor
|
||||
* @family: The family number or X86_FAMILY_ANY
|
||||
* @data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
||||
* set of wildcards.
|
||||
*/
|
||||
#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
|
||||
|
||||
/**
|
||||
* X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
|
||||
* @model: The model name without the INTEL_FAM6_ prefix or ANY
|
||||
* The model name is expanded to INTEL_FAM6_@model internally
|
||||
* @data: Driver specific data or NULL. The internal storage
|
||||
* format is unsigned long. The supplied value, pointer
|
||||
* etc. is casted to unsigned long internally.
|
||||
*
|
||||
* The vendor is set to INTEL, the family to 6 and all other missing
|
||||
* arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are set to wildcards.
|
||||
*
|
||||
* See X86_MATCH_VENDOR_FAM_MODEL_FEATURE() for further information.
|
||||
*/
|
||||
#define X86_MATCH_INTEL_FAM6_MODEL(model, data) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
|
||||
|
||||
/*
|
||||
* Match specific microcode revisions.
|
||||
|
@ -217,7 +217,7 @@
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 or above (Zen) */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
|
||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
|
||||
|
@ -35,6 +35,9 @@
|
||||
* The #define line may optionally include a comment including platform names.
|
||||
*/
|
||||
|
||||
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
|
||||
#define INTEL_FAM6_ANY X86_MODEL_ANY
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
|
||||
#define INTEL_FAM6_CORE2_MEROM 0x0F
|
||||
@ -118,17 +121,7 @@
|
||||
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
|
||||
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
|
||||
|
||||
/* Useful macros */
|
||||
#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
|
||||
{ \
|
||||
.vendor = X86_VENDOR_INTEL, \
|
||||
.family = _family, \
|
||||
.model = _model, \
|
||||
.feature = X86_FEATURE_ANY, \
|
||||
.driver_data = (kernel_ulong_t)&_driver_data \
|
||||
}
|
||||
|
||||
#define INTEL_CPU_FAM6(_model, _driver_data) \
|
||||
INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)
|
||||
/* Family 5 */
|
||||
#define INTEL_FAM5_QUARK_X1000 0x09 /* Quark X1000 SoC */
|
||||
|
||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
||||
|
@ -36,6 +36,7 @@ typedef u8 kprobe_opcode_t;
|
||||
|
||||
/* optinsn template addresses */
|
||||
extern __visible kprobe_opcode_t optprobe_template_entry[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_clac[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_val[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_call[];
|
||||
extern __visible kprobe_opcode_t optprobe_template_end[];
|
||||
|
@ -50,11 +50,22 @@
|
||||
|
||||
#define AMD64_L3_SLICE_SHIFT 48
|
||||
#define AMD64_L3_SLICE_MASK \
|
||||
((0xFULL) << AMD64_L3_SLICE_SHIFT)
|
||||
(0xFULL << AMD64_L3_SLICE_SHIFT)
|
||||
#define AMD64_L3_SLICEID_MASK \
|
||||
(0x7ULL << AMD64_L3_SLICE_SHIFT)
|
||||
|
||||
#define AMD64_L3_THREAD_SHIFT 56
|
||||
#define AMD64_L3_THREAD_MASK \
|
||||
((0xFFULL) << AMD64_L3_THREAD_SHIFT)
|
||||
(0xFFULL << AMD64_L3_THREAD_SHIFT)
|
||||
#define AMD64_L3_F19H_THREAD_MASK \
|
||||
(0x3ULL << AMD64_L3_THREAD_SHIFT)
|
||||
|
||||
#define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
|
||||
#define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
|
||||
|
||||
#define AMD64_L3_COREID_SHIFT 42
|
||||
#define AMD64_L3_COREID_MASK \
|
||||
(0x7ULL << AMD64_L3_COREID_SHIFT)
|
||||
|
||||
#define X86_RAW_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_EVENT | \
|
||||
|
@ -36,10 +36,9 @@ static const struct pci_device_id amd_root_ids[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
|
||||
#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
|
||||
|
||||
const struct pci_device_id amd_nb_misc_ids[] = {
|
||||
static const struct pci_device_id amd_nb_misc_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
|
||||
@ -56,7 +55,6 @@ const struct pci_device_id amd_nb_misc_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
|
||||
{}
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(amd_nb_misc_ids);
|
||||
|
||||
static const struct pci_device_id amd_nb_link_ids[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
|
||||
|
@ -546,12 +546,6 @@ static struct clock_event_device lapic_clockevent = {
|
||||
};
|
||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||
|
||||
#define DEADLINE_MODEL_MATCH_FUNC(model, func) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&func }
|
||||
|
||||
#define DEADLINE_MODEL_MATCH_REV(model, rev) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
|
||||
|
||||
static u32 hsx_deadline_rev(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_stepping) {
|
||||
@ -588,23 +582,23 @@ static u32 skx_deadline_rev(void)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id deadline_match[] = {
|
||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
|
||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_D, bdx_deadline_rev),
|
||||
DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_X, &hsx_deadline_rev),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_D, &bdx_deadline_rev),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_X, &skx_deadline_rev),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL, 0x22),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_L, 0x20),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_G, 0x17),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL, 0x25),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_G, 0x17),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_L, 0xb2),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE, 0xb2),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
|
||||
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_L, 0x52),
|
||||
DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE, 0x52),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
|
||||
X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
|
||||
|
||||
{},
|
||||
};
|
||||
|
@ -955,7 +955,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
case 0x16: init_amd_jg(c); break;
|
||||
case 0x17: init_amd_zn(c); break;
|
||||
case 0x17: fallthrough;
|
||||
case 0x19: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1008,8 +1008,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||
#define NO_ITLB_MULTIHIT BIT(7)
|
||||
#define NO_SPECTRE_V2 BIT(8)
|
||||
|
||||
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
||||
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
||||
#define VULNWL(vendor, family, model, whitelist) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
||||
|
||||
#define VULNWL_INTEL(model, whitelist) \
|
||||
VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
|
||||
|
@ -16,12 +16,17 @@
|
||||
* respective wildcard entries.
|
||||
*
|
||||
* A typical table entry would be to match a specific CPU
|
||||
* { X86_VENDOR_INTEL, 6, 0x12 }
|
||||
* or to match a specific CPU feature
|
||||
* { X86_FEATURE_MATCH(X86_FEATURE_FOOBAR) }
|
||||
*
|
||||
* X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_BROADWELL,
|
||||
* X86_FEATURE_ANY, NULL);
|
||||
*
|
||||
* Fields can be wildcarded with %X86_VENDOR_ANY, %X86_FAMILY_ANY,
|
||||
* %X86_MODEL_ANY, %X86_FEATURE_ANY or 0 (except for vendor)
|
||||
* %X86_MODEL_ANY, %X86_FEATURE_ANY (except for vendor)
|
||||
*
|
||||
* asm/cpu_device_id.h contains a set of useful macros which are shortcuts
|
||||
* for various common selections. The above can be shortened to:
|
||||
*
|
||||
* X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, NULL);
|
||||
*
|
||||
* Arrays used to match for this should also be declared using
|
||||
* MODULE_DEVICE_TABLE(x86cpu, ...)
|
||||
|
@ -71,6 +71,21 @@ found:
|
||||
return (unsigned long)buf;
|
||||
}
|
||||
|
||||
static void synthesize_clac(kprobe_opcode_t *addr)
|
||||
{
|
||||
/*
|
||||
* Can't be static_cpu_has() due to how objtool treats this feature bit.
|
||||
* This isn't a fast path anyway.
|
||||
*/
|
||||
if (!boot_cpu_has(X86_FEATURE_SMAP))
|
||||
return;
|
||||
|
||||
/* Replace the NOP3 with CLAC */
|
||||
addr[0] = 0x0f;
|
||||
addr[1] = 0x01;
|
||||
addr[2] = 0xca;
|
||||
}
|
||||
|
||||
/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
|
||||
static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
|
||||
{
|
||||
@ -92,6 +107,9 @@ asm (
|
||||
/* We don't bother saving the ss register */
|
||||
" pushq %rsp\n"
|
||||
" pushfq\n"
|
||||
".global optprobe_template_clac\n"
|
||||
"optprobe_template_clac:\n"
|
||||
ASM_NOP3
|
||||
SAVE_REGS_STRING
|
||||
" movq %rsp, %rsi\n"
|
||||
".global optprobe_template_val\n"
|
||||
@ -111,6 +129,9 @@ asm (
|
||||
#else /* CONFIG_X86_32 */
|
||||
" pushl %esp\n"
|
||||
" pushfl\n"
|
||||
".global optprobe_template_clac\n"
|
||||
"optprobe_template_clac:\n"
|
||||
ASM_NOP3
|
||||
SAVE_REGS_STRING
|
||||
" movl %esp, %edx\n"
|
||||
".global optprobe_template_val\n"
|
||||
@ -134,6 +155,8 @@ asm (
|
||||
void optprobe_template_func(void);
|
||||
STACK_FRAME_NON_STANDARD(optprobe_template_func);
|
||||
|
||||
#define TMPL_CLAC_IDX \
|
||||
((long)optprobe_template_clac - (long)optprobe_template_entry)
|
||||
#define TMPL_MOVE_IDX \
|
||||
((long)optprobe_template_val - (long)optprobe_template_entry)
|
||||
#define TMPL_CALL_IDX \
|
||||
@ -389,6 +412,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
|
||||
op->optinsn.size = ret;
|
||||
len = TMPL_END_IDX + op->optinsn.size;
|
||||
|
||||
synthesize_clac(buf + TMPL_CLAC_IDX);
|
||||
|
||||
/* Set probe information */
|
||||
synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
|
||||
|
||||
|
@ -466,7 +466,7 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
*/
|
||||
|
||||
static const struct x86_cpu_id snc_cpu[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -63,13 +63,13 @@ static const struct freq_desc freq_desc_lgm = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id tsc_msr_cpu_ids[] = {
|
||||
INTEL_CPU_FAM6(ATOM_SALTWELL_MID, freq_desc_pnw),
|
||||
INTEL_CPU_FAM6(ATOM_SALTWELL_TABLET, freq_desc_clv),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT, freq_desc_byt),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, freq_desc_tng),
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT, freq_desc_cht),
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT_MID, freq_desc_ann),
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT_NP, freq_desc_lgm),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &freq_desc_pnw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_TABLET,&freq_desc_clv),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &freq_desc_byt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &freq_desc_tng),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &freq_desc_cht),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &freq_desc_ann),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_NP, &freq_desc_lgm),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#include <asm/virtext.h>
|
||||
#include "trace.h"
|
||||
@ -59,7 +60,7 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
#ifdef MODULE
|
||||
static const struct x86_cpu_id svm_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_SVM),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
@ -66,7 +67,7 @@ MODULE_LICENSE("GPL");
|
||||
|
||||
#ifdef MODULE
|
||||
static const struct x86_cpu_id vmx_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_VMX),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
|
||||
|
@ -117,17 +117,16 @@ static void punit_dbgfs_unregister(void)
|
||||
debugfs_remove_recursive(punit_dbg_file);
|
||||
}
|
||||
|
||||
#define ICPU(model, drv_data) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT,\
|
||||
(kernel_ulong_t)&drv_data }
|
||||
#define X86_MATCH(model, data) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
||||
X86_FEATURE_MWAIT, data)
|
||||
|
||||
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT, punit_device_byt),
|
||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, punit_device_tng),
|
||||
ICPU(INTEL_FAM6_ATOM_AIRMONT, punit_device_cht),
|
||||
X86_MATCH(ATOM_SILVERMONT, &punit_device_byt),
|
||||
X86_MATCH(ATOM_SILVERMONT_MID, &punit_device_tng),
|
||||
X86_MATCH(ATOM_AIRMONT, &punit_device_cht),
|
||||
{}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
|
||||
|
||||
static int __init punit_atom_debug_init(void)
|
||||
|
@ -663,12 +663,9 @@ static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define ICPU(family, model, quirk_handler) \
|
||||
{ X86_VENDOR_INTEL, family, model, X86_FEATURE_ANY, \
|
||||
(unsigned long)&quirk_handler }
|
||||
|
||||
static const struct x86_cpu_id efi_capsule_quirk_ids[] = {
|
||||
ICPU(5, 9, qrk_capsule_setup_info), /* Intel Quark X1000 */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000,
|
||||
&qrk_capsule_setup_info),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -60,11 +60,8 @@ static struct bt_sfi_data tng_bt_sfi_data __initdata = {
|
||||
.setup = tng_bt_sfi_setup,
|
||||
};
|
||||
|
||||
#define ICPU(model, ddata) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (kernel_ulong_t)&ddata }
|
||||
|
||||
static const struct x86_cpu_id bt_sfi_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID, tng_bt_sfi_data),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &tng_bt_sfi_data),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -569,7 +569,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id imr_ids[] __initconst = {
|
||||
{ X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -105,7 +105,7 @@ static void __init imr_self_test(void)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id imr_ids[] __initconst = {
|
||||
{ X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -475,20 +475,8 @@ static int msr_save_cpuid_features(const struct x86_cpu_id *c)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id msr_save_cpu_table[] = {
|
||||
{
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.family = 0x15,
|
||||
.model = X86_MODEL_ANY,
|
||||
.feature = X86_FEATURE_ANY,
|
||||
.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
|
||||
},
|
||||
{
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.family = 0x16,
|
||||
.model = X86_MODEL_ANY,
|
||||
.feature = X86_FEATURE_ANY,
|
||||
.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
|
||||
},
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -306,11 +306,9 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
|
||||
.setup = lpss_deassert_reset,
|
||||
};
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
|
||||
static const struct x86_cpu_id lpss_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT), /* Valleyview, Bay Trail */
|
||||
ICPU(INTEL_FAM6_ATOM_AIRMONT), /* Braswell, Cherry Trail */
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -37,7 +37,7 @@ struct always_present_id {
|
||||
const char *uid;
|
||||
};
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
#define X86_MATCH(model) X86_MATCH_INTEL_FAM6_MODEL(model, NULL)
|
||||
|
||||
#define ENTRY(hid, uid, cpu_models, dmi...) { \
|
||||
{ { hid, }, {} }, \
|
||||
@ -51,29 +51,29 @@ static const struct always_present_id always_present_ids[] = {
|
||||
* Bay / Cherry Trail PWM directly poked by GPU driver in win10,
|
||||
* but Linux uses a separate PWM driver, harmless if not used.
|
||||
*/
|
||||
ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT), {}),
|
||||
ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
|
||||
ENTRY("80860F09", "1", X86_MATCH(ATOM_SILVERMONT), {}),
|
||||
ENTRY("80862288", "1", X86_MATCH(ATOM_AIRMONT), {}),
|
||||
|
||||
/* Lenovo Yoga Book uses PWM2 for keyboard backlight control */
|
||||
ENTRY("80862289", "2", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
|
||||
ENTRY("80862289", "2", X86_MATCH(ATOM_AIRMONT), {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
|
||||
}),
|
||||
/*
|
||||
* The INT0002 device is necessary to clear wakeup interrupt sources
|
||||
* on Cherry Trail devices, without it we get nobody cared IRQ msgs.
|
||||
*/
|
||||
ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}),
|
||||
ENTRY("INT0002", "1", X86_MATCH(ATOM_AIRMONT), {}),
|
||||
/*
|
||||
* On the Dell Venue 11 Pro 7130 and 7139, the DSDT hides
|
||||
* the touchscreen ACPI device until a certain time
|
||||
* after _SB.PCI0.GFX0.LCD.LCD1._ON gets called has passed
|
||||
* *and* _STA has been called at least 3 times since.
|
||||
*/
|
||||
ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), {
|
||||
ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"),
|
||||
}),
|
||||
ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_L), {
|
||||
ENTRY("SYNA7500", "1", X86_MATCH(HASWELL_L), {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7139"),
|
||||
}),
|
||||
@ -89,19 +89,19 @@ static const struct always_present_id always_present_ids[] = {
|
||||
* was copy-pasted from the GPD win, so it has a disabled KIOX000A
|
||||
* node which we should not enable, thus we also check the BIOS date.
|
||||
*/
|
||||
ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
|
||||
ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_BIOS_DATE, "02/21/2017")
|
||||
}),
|
||||
ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
|
||||
ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_BIOS_DATE, "03/20/2017")
|
||||
}),
|
||||
ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {
|
||||
ENTRY("KIOX000A", "1", X86_MATCH(ATOM_AIRMONT), {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||
|
@ -775,7 +775,7 @@ int __init agp_amd64_init(void)
|
||||
}
|
||||
|
||||
/* First check that we have at least one AMD64 NB */
|
||||
if (!pci_dev_present(amd_nb_misc_ids)) {
|
||||
if (!amd_nb_num()) {
|
||||
pci_unregister_driver(&agp_amd64_pci_driver);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -209,20 +209,19 @@ static int __init mod_init(void)
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
module_init(mod_init);
|
||||
|
||||
static void __exit mod_exit(void)
|
||||
{
|
||||
hwrng_unregister(&via_rng);
|
||||
}
|
||||
|
||||
module_init(mod_init);
|
||||
module_exit(mod_exit);
|
||||
|
||||
static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_XSTORE),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
|
||||
|
||||
MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
|
||||
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
|
||||
@ -991,8 +992,8 @@ late_initcall(acpi_cpufreq_init);
|
||||
module_exit(acpi_cpufreq_exit);
|
||||
|
||||
static const struct x86_cpu_id acpi_cpufreq_ids[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_ACPI),
|
||||
X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_ACPI, NULL),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_HW_PSTATE, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#include <asm/msr.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#include "cpufreq_ondemand.h"
|
||||
|
||||
@ -144,7 +145,7 @@ static void __exit amd_freq_sensitivity_exit(void)
|
||||
module_exit(amd_freq_sensitivity_exit);
|
||||
|
||||
static const struct x86_cpu_id amd_freq_sensitivity_ids[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PROC_FEEDBACK),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_PROC_FEEDBACK, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, amd_freq_sensitivity_ids);
|
||||
|
@ -385,7 +385,7 @@ static struct cpufreq_driver eps_driver = {
|
||||
/* This driver will work only on Centaur C7 processors with
|
||||
* Enhanced SpeedStep/PowerSaver registers */
|
||||
static const struct x86_cpu_id eps_cpu_id[] = {
|
||||
{ X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_EST },
|
||||
X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_EST, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, eps_cpu_id);
|
||||
|
@ -198,7 +198,7 @@ static struct cpufreq_driver elanfreq_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id elan_id[] = {
|
||||
{ X86_VENDOR_AMD, 4, 10, },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 10, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, elan_id);
|
||||
|
@ -1908,51 +1908,51 @@ static const struct pstate_funcs knl_funcs = {
|
||||
.get_val = core_get_val,
|
||||
};
|
||||
|
||||
#define ICPU(model, policy) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
|
||||
(unsigned long)&policy }
|
||||
#define X86_MATCH(model, policy) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
||||
X86_FEATURE_APERFMPERF, &policy)
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_SANDYBRIDGE, core_funcs),
|
||||
ICPU(INTEL_FAM6_SANDYBRIDGE_X, core_funcs),
|
||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT, silvermont_funcs),
|
||||
ICPU(INTEL_FAM6_IVYBRIDGE, core_funcs),
|
||||
ICPU(INTEL_FAM6_HASWELL, core_funcs),
|
||||
ICPU(INTEL_FAM6_BROADWELL, core_funcs),
|
||||
ICPU(INTEL_FAM6_IVYBRIDGE_X, core_funcs),
|
||||
ICPU(INTEL_FAM6_HASWELL_X, core_funcs),
|
||||
ICPU(INTEL_FAM6_HASWELL_L, core_funcs),
|
||||
ICPU(INTEL_FAM6_HASWELL_G, core_funcs),
|
||||
ICPU(INTEL_FAM6_BROADWELL_G, core_funcs),
|
||||
ICPU(INTEL_FAM6_ATOM_AIRMONT, airmont_funcs),
|
||||
ICPU(INTEL_FAM6_SKYLAKE_L, core_funcs),
|
||||
ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
|
||||
ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
|
||||
ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
|
||||
ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs),
|
||||
ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs),
|
||||
ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs),
|
||||
ICPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, core_funcs),
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(SANDYBRIDGE, core_funcs),
|
||||
X86_MATCH(SANDYBRIDGE_X, core_funcs),
|
||||
X86_MATCH(ATOM_SILVERMONT, silvermont_funcs),
|
||||
X86_MATCH(IVYBRIDGE, core_funcs),
|
||||
X86_MATCH(HASWELL, core_funcs),
|
||||
X86_MATCH(BROADWELL, core_funcs),
|
||||
X86_MATCH(IVYBRIDGE_X, core_funcs),
|
||||
X86_MATCH(HASWELL_X, core_funcs),
|
||||
X86_MATCH(HASWELL_L, core_funcs),
|
||||
X86_MATCH(HASWELL_G, core_funcs),
|
||||
X86_MATCH(BROADWELL_G, core_funcs),
|
||||
X86_MATCH(ATOM_AIRMONT, airmont_funcs),
|
||||
X86_MATCH(SKYLAKE_L, core_funcs),
|
||||
X86_MATCH(BROADWELL_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE, core_funcs),
|
||||
X86_MATCH(BROADWELL_D, core_funcs),
|
||||
X86_MATCH(XEON_PHI_KNL, knl_funcs),
|
||||
X86_MATCH(XEON_PHI_KNM, knl_funcs),
|
||||
X86_MATCH(ATOM_GOLDMONT, core_funcs),
|
||||
X86_MATCH(ATOM_GOLDMONT_PLUS, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
||||
ICPU(INTEL_FAM6_BROADWELL_D, core_funcs),
|
||||
ICPU(INTEL_FAM6_BROADWELL_X, core_funcs),
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(BROADWELL_D, core_funcs),
|
||||
X86_MATCH(BROADWELL_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
|
||||
ICPU(INTEL_FAM6_KABYLAKE, core_funcs),
|
||||
X86_MATCH(KABYLAKE, core_funcs),
|
||||
{}
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs),
|
||||
ICPU(INTEL_FAM6_SKYLAKE, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE, core_funcs),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -2725,13 +2725,14 @@ static inline void intel_pstate_request_control_from_smm(void) {}
|
||||
|
||||
#define INTEL_PSTATE_HWP_BROADWELL 0x01
|
||||
|
||||
#define ICPU_HWP(model, hwp_mode) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
|
||||
#define X86_MATCH_HWP(model, hwp_mode) \
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
||||
X86_FEATURE_HWP, hwp_mode)
|
||||
|
||||
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
|
||||
ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
|
||||
ICPU_HWP(INTEL_FAM6_BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
|
||||
ICPU_HWP(X86_MODEL_ANY, 0),
|
||||
X86_MATCH_HWP(BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
|
||||
X86_MATCH_HWP(BROADWELL_D, INTEL_PSTATE_HWP_BROADWELL),
|
||||
X86_MATCH_HWP(ANY, 0),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -910,7 +910,7 @@ static struct cpufreq_driver longhaul_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id longhaul_id[] = {
|
||||
{ X86_VENDOR_CENTAUR, 6 },
|
||||
X86_MATCH_VENDOR_FAM(CENTAUR, 6, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, longhaul_id);
|
||||
|
@ -281,8 +281,7 @@ static struct cpufreq_driver longrun_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id longrun_ids[] = {
|
||||
{ X86_VENDOR_TRANSMETA, X86_FAMILY_ANY, X86_MODEL_ANY,
|
||||
X86_FEATURE_LONGRUN },
|
||||
X86_MATCH_VENDOR_FEATURE(TRANSMETA, X86_FEATURE_LONGRUN, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, longrun_ids);
|
||||
|
@ -231,7 +231,7 @@ static struct cpufreq_driver p4clockmod_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id cpufreq_p4_id[] = {
|
||||
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ACC },
|
||||
X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_ACC, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -258,8 +258,8 @@ static struct cpufreq_driver powernow_k6_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id powernow_k6_ids[] = {
|
||||
{ X86_VENDOR_AMD, 5, 12 },
|
||||
{ X86_VENDOR_AMD, 5, 13 },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 12, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 5, 13, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, powernow_k6_ids);
|
||||
|
@ -109,7 +109,7 @@ static int check_fsb(unsigned int fsbspeed)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id powernow_k7_cpuids[] = {
|
||||
{ X86_VENDOR_AMD, 6, },
|
||||
X86_MATCH_VENDOR_FAM(AMD, 6, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, powernow_k7_cpuids);
|
||||
|
@ -452,7 +452,7 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
|
||||
|
||||
static const struct x86_cpu_id powernow_k8_ids[] = {
|
||||
/* IO based frequency switching */
|
||||
{ X86_VENDOR_AMD, 0xf },
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0xf, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
|
||||
|
@ -95,7 +95,7 @@ static struct cpufreq_driver sc520_freq_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id sc520_ids[] = {
|
||||
{ X86_VENDOR_AMD, 4, 9 },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(AMD, 4, 9, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, sc520_ids);
|
||||
|
@ -520,18 +520,12 @@ static struct cpufreq_driver centrino_driver = {
|
||||
* or ASCII model IDs.
|
||||
*/
|
||||
static const struct x86_cpu_id centrino_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, 9, X86_FEATURE_EST },
|
||||
{ X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
|
||||
{ X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
|
||||
{ X86_VENDOR_INTEL, 6, 13, X86_FEATURE_EST },
|
||||
{ X86_VENDOR_INTEL, 15, 3, X86_FEATURE_EST },
|
||||
{ X86_VENDOR_INTEL, 15, 4, X86_FEATURE_EST },
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
|
||||
{}
|
||||
};
|
||||
#if 0
|
||||
/* Autoload or not? Do not for now. */
|
||||
MODULE_DEVICE_TABLE(x86cpu, centrino_ids);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* centrino_init - initializes the Enhanced SpeedStep CPUFreq driver
|
||||
|
@ -319,15 +319,11 @@ static struct cpufreq_driver speedstep_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id ss_smi_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, 0xb, },
|
||||
{ X86_VENDOR_INTEL, 6, 0x8, },
|
||||
{ X86_VENDOR_INTEL, 15, 2 },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
|
||||
{}
|
||||
};
|
||||
#if 0
|
||||
/* Autoload or not? Do not for now. */
|
||||
MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* speedstep_init - initializes the SpeedStep CPUFreq driver
|
||||
|
@ -299,15 +299,11 @@ static struct cpufreq_driver speedstep_driver = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id ss_smi_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, 0xb, },
|
||||
{ X86_VENDOR_INTEL, 6, 0x8, },
|
||||
{ X86_VENDOR_INTEL, 15, 2 },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0x8, 0),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, 0xb, 0),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 15, 0x2, 0),
|
||||
{}
|
||||
};
|
||||
#if 0
|
||||
/* Not auto loaded currently */
|
||||
MODULE_DEVICE_TABLE(x86cpu, ss_smi_ids);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* speedstep_init - initializes the SpeedStep CPUFreq driver
|
||||
|
@ -474,7 +474,7 @@ static struct skcipher_alg cbc_aes_alg = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id padlock_cpu_id[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_XCRYPT),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
|
||||
|
@ -490,7 +490,7 @@ static struct shash_alg sha256_alg_nano = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id padlock_sha_ids[] = {
|
||||
X86_FEATURE_MATCH(X86_FEATURE_PHE),
|
||||
X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
|
||||
|
@ -3626,13 +3626,13 @@ static void setup_pci_device(void)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id amd64_cpuids[] = {
|
||||
{ X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
{ X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
{ X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x0F, NULL),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x10, NULL),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x15, NULL),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x16, NULL),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
|
||||
X86_MATCH_VENDOR_FAM(HYGON, 0x18, NULL),
|
||||
X86_MATCH_VENDOR_FAM(AMD, 0x19, NULL),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
|
||||
|
@ -123,10 +123,10 @@ static int i10nm_get_all_munits(void)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id i10nm_cpuids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_TREMONT_D, 0, 0 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_X, 0, 0 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ICELAKE_D, 0, 0 },
|
||||
{ }
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
|
||||
|
||||
|
@ -1537,8 +1537,8 @@ static struct dunit_ops dnv_ops = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id pnd2_cpuids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT_D, 0, (kernel_ulong_t)&dnv_ops },
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &apl_ops),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &dnv_ops),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
|
||||
|
@ -3420,13 +3420,13 @@ fail0:
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id sbridge_cpuids[] = {
|
||||
INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
|
||||
INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
|
||||
INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
|
||||
INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
|
||||
INTEL_CPU_FAM6(BROADWELL_D, pci_dev_descr_broadwell_table),
|
||||
INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
|
||||
INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &pci_dev_descr_ibridge_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &pci_dev_descr_haswell_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &pci_dev_descr_broadwell_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &pci_dev_descr_broadwell_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &pci_dev_descr_knl_table),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &pci_dev_descr_knl_table),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
|
||||
|
@ -158,7 +158,7 @@ fail:
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id skx_cpuids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
|
||||
|
@ -107,7 +107,7 @@ struct axp288_extcon_info {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id cherry_trail_cpu_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT, X86_FEATURE_ANY },
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -709,7 +709,7 @@ static int coretemp_cpu_offline(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
static const struct x86_cpu_id __initconst coretemp_ids[] = {
|
||||
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
|
||||
X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_DTHERM, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
|
||||
|
@ -270,10 +270,10 @@ static int via_cputemp_down_prep(unsigned int cpu)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id __initconst cputemp_ids[] = {
|
||||
{ X86_VENDOR_CENTAUR, 6, 0xa, }, /* C7 A */
|
||||
{ X86_VENDOR_CENTAUR, 6, 0xd, }, /* C7 D */
|
||||
{ X86_VENDOR_CENTAUR, 6, 0xf, }, /* Nano */
|
||||
{ X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL),
|
||||
X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, cputemp_ids);
|
||||
|
@ -1079,51 +1079,48 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
||||
INTEL_CPU_FAM6(NEHALEM_EP, idle_cpu_nhx),
|
||||
INTEL_CPU_FAM6(NEHALEM, idle_cpu_nehalem),
|
||||
INTEL_CPU_FAM6(NEHALEM_G, idle_cpu_nehalem),
|
||||
INTEL_CPU_FAM6(WESTMERE, idle_cpu_nehalem),
|
||||
INTEL_CPU_FAM6(WESTMERE_EP, idle_cpu_nhx),
|
||||
INTEL_CPU_FAM6(NEHALEM_EX, idle_cpu_nhx),
|
||||
INTEL_CPU_FAM6(ATOM_BONNELL, idle_cpu_atom),
|
||||
INTEL_CPU_FAM6(ATOM_BONNELL_MID, idle_cpu_lincroft),
|
||||
INTEL_CPU_FAM6(WESTMERE_EX, idle_cpu_nhx),
|
||||
INTEL_CPU_FAM6(SANDYBRIDGE, idle_cpu_snb),
|
||||
INTEL_CPU_FAM6(SANDYBRIDGE_X, idle_cpu_snx),
|
||||
INTEL_CPU_FAM6(ATOM_SALTWELL, idle_cpu_atom),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT, idle_cpu_byt),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, idle_cpu_tangier),
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT, idle_cpu_cht),
|
||||
INTEL_CPU_FAM6(IVYBRIDGE, idle_cpu_ivb),
|
||||
INTEL_CPU_FAM6(IVYBRIDGE_X, idle_cpu_ivt),
|
||||
INTEL_CPU_FAM6(HASWELL, idle_cpu_hsw),
|
||||
INTEL_CPU_FAM6(HASWELL_X, idle_cpu_hsx),
|
||||
INTEL_CPU_FAM6(HASWELL_L, idle_cpu_hsw),
|
||||
INTEL_CPU_FAM6(HASWELL_G, idle_cpu_hsw),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT_D, idle_cpu_avn),
|
||||
INTEL_CPU_FAM6(BROADWELL, idle_cpu_bdw),
|
||||
INTEL_CPU_FAM6(BROADWELL_G, idle_cpu_bdw),
|
||||
INTEL_CPU_FAM6(BROADWELL_X, idle_cpu_bdx),
|
||||
INTEL_CPU_FAM6(BROADWELL_D, idle_cpu_bdx),
|
||||
INTEL_CPU_FAM6(SKYLAKE_L, idle_cpu_skl),
|
||||
INTEL_CPU_FAM6(SKYLAKE, idle_cpu_skl),
|
||||
INTEL_CPU_FAM6(KABYLAKE_L, idle_cpu_skl),
|
||||
INTEL_CPU_FAM6(KABYLAKE, idle_cpu_skl),
|
||||
INTEL_CPU_FAM6(SKYLAKE_X, idle_cpu_skx),
|
||||
INTEL_CPU_FAM6(XEON_PHI_KNL, idle_cpu_knl),
|
||||
INTEL_CPU_FAM6(XEON_PHI_KNM, idle_cpu_knl),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT, idle_cpu_bxt),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, idle_cpu_bxt),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT_D, idle_cpu_dnv),
|
||||
INTEL_CPU_FAM6(ATOM_TREMONT_D, idle_cpu_dnv),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_G, &idle_cpu_nehalem),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE, &idle_cpu_nehalem),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EP, &idle_cpu_nhx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EX, &idle_cpu_nhx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL, &idle_cpu_atom),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_BONNELL_MID, &idle_cpu_lincroft),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(WESTMERE_EX, &idle_cpu_nhx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &idle_cpu_snb),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &idle_cpu_snx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL, &idle_cpu_atom),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &idle_cpu_byt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &idle_cpu_tangier),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &idle_cpu_cht),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &idle_cpu_ivb),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &idle_cpu_ivt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &idle_cpu_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &idle_cpu_hsx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &idle_cpu_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &idle_cpu_hsw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, &idle_cpu_avn),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &idle_cpu_bdw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &idle_cpu_bdw),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &idle_cpu_bdx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &idle_cpu_bdx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &idle_cpu_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &idle_cpu_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &idle_cpu_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &idle_cpu_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv),
|
||||
{}
|
||||
};
|
||||
|
||||
#define INTEL_CPU_FAM6_MWAIT \
|
||||
{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_MWAIT, 0 }
|
||||
|
||||
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
|
||||
INTEL_CPU_FAM6_MWAIT,
|
||||
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -242,7 +242,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
|
||||
static bool sdhci_acpi_byt(void)
|
||||
{
|
||||
static const struct x86_cpu_id byt[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT },
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -252,7 +252,7 @@ static bool sdhci_acpi_byt(void)
|
||||
static bool sdhci_acpi_cht(void)
|
||||
{
|
||||
static const struct x86_cpu_id cht[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -55,15 +55,13 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = {
|
||||
.need_resume = mid_pci_need_resume,
|
||||
};
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
|
||||
/*
|
||||
* This table should be in sync with the one in
|
||||
* arch/x86/platform/intel-mid/pwr.c.
|
||||
*/
|
||||
static const struct x86_cpu_id lpss_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_ATOM_SALTWELL_MID),
|
||||
ICPU(INTEL_FAM6_ATOM_SILVERMONT_MID),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -358,15 +358,13 @@ static struct notifier_block uncore_pm_nb = {
|
||||
.notifier_call = uncore_pm_notify,
|
||||
};
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
|
||||
static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_BROADWELL_G),
|
||||
ICPU(INTEL_FAM6_BROADWELL_X),
|
||||
ICPU(INTEL_FAM6_BROADWELL_D),
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X),
|
||||
ICPU(INTEL_FAM6_ICELAKE_X),
|
||||
ICPU(INTEL_FAM6_ICELAKE_D),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -148,8 +148,8 @@ static struct irq_chip int0002_cht_irqchip = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id int0002_cpu_ids[] = {
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT, int0002_byt_irqchip), /* Valleyview, Bay Trail */
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT, int0002_cht_irqchip), /* Braswell, Cherry Trail */
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -113,8 +113,8 @@ static const struct mid_pb_ddata mrfld_ddata = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id mid_pb_cpu_ids[] = {
|
||||
INTEL_CPU_FAM6(ATOM_SALTWELL_MID, mfld_ddata),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, mrfld_ddata),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, &mfld_ddata),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &mrfld_ddata),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -871,18 +871,18 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
static const struct x86_cpu_id intel_pmc_core_ids[] = {
|
||||
INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map),
|
||||
INTEL_CPU_FAM6(SKYLAKE, spt_reg_map),
|
||||
INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map),
|
||||
INTEL_CPU_FAM6(KABYLAKE, spt_reg_map),
|
||||
INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
|
||||
INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
|
||||
INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
|
||||
INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
|
||||
INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
|
||||
INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
|
||||
INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
|
||||
INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &spt_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &spt_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &spt_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &spt_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &cnp_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &icl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &cnp_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &cnp_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_reg_map),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &tgl_reg_map),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -38,14 +38,14 @@ static struct platform_device pmc_core_device = {
|
||||
* other list may grow, but this list should not.
|
||||
*/
|
||||
static const struct x86_cpu_id intel_pmc_core_platform_ids[] = {
|
||||
INTEL_CPU_FAM6(SKYLAKE_L, pmc_core_device),
|
||||
INTEL_CPU_FAM6(SKYLAKE, pmc_core_device),
|
||||
INTEL_CPU_FAM6(KABYLAKE_L, pmc_core_device),
|
||||
INTEL_CPU_FAM6(KABYLAKE, pmc_core_device),
|
||||
INTEL_CPU_FAM6(CANNONLAKE_L, pmc_core_device),
|
||||
INTEL_CPU_FAM6(ICELAKE_L, pmc_core_device),
|
||||
INTEL_CPU_FAM6(COMETLAKE, pmc_core_device),
|
||||
INTEL_CPU_FAM6(COMETLAKE_L, pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &pmc_core_device),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &pmc_core_device),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_platform_ids);
|
||||
|
@ -160,10 +160,8 @@ static struct notifier_block isst_pm_nb = {
|
||||
.notifier_call = isst_pm_notify,
|
||||
};
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
|
||||
static const struct x86_cpu_id isst_if_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, isst_if_cpu_ids);
|
||||
|
@ -308,11 +308,10 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT, telem_apl_debugfs_conf),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, telem_apl_debugfs_conf),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_debugfs_conf),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_apl_debugfs_conf),
|
||||
{}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(x86cpu, telemetry_debugfs_cpu_ids);
|
||||
|
||||
static int telemetry_debugfs_check_evts(void)
|
||||
|
@ -67,9 +67,6 @@
|
||||
#define TELEM_CLEAR_VERBOSITY_BITS(x) ((x) &= ~(BIT(27) | BIT(28)))
|
||||
#define TELEM_SET_VERBOSITY_BITS(x, y) ((x) |= ((y) << 27))
|
||||
|
||||
#define TELEM_CPU(model, data) \
|
||||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data }
|
||||
|
||||
enum telemetry_action {
|
||||
TELEM_UPDATE = 0,
|
||||
TELEM_ADD,
|
||||
@ -183,8 +180,8 @@ static struct telemetry_plt_config telem_glk_config = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id telemetry_cpu_ids[] = {
|
||||
TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
|
||||
TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT_PLUS, telem_glk_config),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &telem_apl_config),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &telem_glk_config),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -113,11 +113,9 @@ static int itmt_legacy_cpu_online(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
|
||||
static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
|
||||
ICPU(INTEL_FAM6_BROADWELL_X),
|
||||
ICPU(INTEL_FAM6_SKYLAKE_X),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -951,52 +951,51 @@ static const struct rapl_defaults rapl_defaults_cht = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id rapl_ids[] __initconst = {
|
||||
INTEL_CPU_FAM6(SANDYBRIDGE, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(SANDYBRIDGE_X, rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core),
|
||||
|
||||
INTEL_CPU_FAM6(IVYBRIDGE, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(IVYBRIDGE_X, rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core),
|
||||
|
||||
INTEL_CPU_FAM6(HASWELL, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(HASWELL_L, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(HASWELL_G, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(HASWELL_X, rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server),
|
||||
|
||||
INTEL_CPU_FAM6(BROADWELL, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(BROADWELL_G, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(BROADWELL_D, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(BROADWELL_X, rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server),
|
||||
|
||||
INTEL_CPU_FAM6(SKYLAKE, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(SKYLAKE_L, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(SKYLAKE_X, rapl_defaults_hsw_server),
|
||||
INTEL_CPU_FAM6(KABYLAKE_L, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(KABYLAKE, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(CANNONLAKE_L, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ICELAKE_L, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ICELAKE, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ICELAKE_NNPI, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ICELAKE_X, rapl_defaults_hsw_server),
|
||||
INTEL_CPU_FAM6(ICELAKE_D, rapl_defaults_hsw_server),
|
||||
INTEL_CPU_FAM6(COMETLAKE_L, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(COMETLAKE, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(TIGERLAKE_L, rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
|
||||
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT, rapl_defaults_byt),
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT, rapl_defaults_cht),
|
||||
INTEL_CPU_FAM6(ATOM_SILVERMONT_MID, rapl_defaults_tng),
|
||||
INTEL_CPU_FAM6(ATOM_AIRMONT_MID, rapl_defaults_ann),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT_PLUS, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ATOM_GOLDMONT_D, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ATOM_TREMONT_D, rapl_defaults_core),
|
||||
INTEL_CPU_FAM6(ATOM_TREMONT_L, rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core),
|
||||
|
||||
INTEL_CPU_FAM6(XEON_PHI_KNL, rapl_defaults_hsw_server),
|
||||
INTEL_CPU_FAM6(XEON_PHI_KNM, rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server),
|
||||
{}
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
|
||||
|
||||
/* Read once for all raw primitive data for domains */
|
||||
|
@ -651,7 +651,7 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
|
||||
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
|
||||
X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_MWAIT, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
|
||||
|
@ -64,9 +64,6 @@
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/iosf_mbi.h>
|
||||
|
||||
#define X86_FAMILY_QUARK 0x5
|
||||
#define X86_MODEL_QUARK_X1000 0x9
|
||||
|
||||
/* DTS reset is programmed via QRK_MBI_UNIT_SOC */
|
||||
#define QRK_DTS_REG_OFFSET_RESET 0x34
|
||||
#define QRK_DTS_RESET_BIT BIT(0)
|
||||
@ -433,7 +430,7 @@ err_ret:
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id qrk_thermal_ids[] __initconst = {
|
||||
{ X86_VENDOR_INTEL, X86_FAMILY_QUARK, X86_MODEL_QUARK_X1000 },
|
||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, qrk_thermal_ids);
|
||||
|
@ -36,8 +36,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id soc_thermal_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, 0,
|
||||
BYT_SOC_DTS_APIC_IRQ},
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, BYT_SOC_DTS_APIC_IRQ),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
|
||||
|
@ -478,7 +478,7 @@ static int pkg_thermal_cpu_online(unsigned int cpu)
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id __initconst pkg_temp_thermal_ids[] = {
|
||||
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_PTS },
|
||||
X86_MATCH_VENDOR_FEATURE(INTEL, X86_FEATURE_PTS, NULL),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, pkg_temp_thermal_ids);
|
||||
|
134
include/linux/min_heap.h
Normal file
134
include/linux/min_heap.h
Normal file
@ -0,0 +1,134 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_MIN_HEAP_H
|
||||
#define _LINUX_MIN_HEAP_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* struct min_heap - Data structure to hold a min-heap.
|
||||
* @data: Start of array holding the heap elements.
|
||||
* @nr: Number of elements currently in the heap.
|
||||
* @size: Maximum number of elements that can be held in current storage.
|
||||
*/
|
||||
struct min_heap {
|
||||
void *data;
|
||||
int nr;
|
||||
int size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct min_heap_callbacks - Data/functions to customise the min_heap.
|
||||
* @elem_size: The nr of each element in bytes.
|
||||
* @less: Partial order function for this heap.
|
||||
* @swp: Swap elements function.
|
||||
*/
|
||||
struct min_heap_callbacks {
|
||||
int elem_size;
|
||||
bool (*less)(const void *lhs, const void *rhs);
|
||||
void (*swp)(void *lhs, void *rhs);
|
||||
};
|
||||
|
||||
/* Sift the element at pos down the heap. */
|
||||
static __always_inline
|
||||
void min_heapify(struct min_heap *heap, int pos,
|
||||
const struct min_heap_callbacks *func)
|
||||
{
|
||||
void *left, *right, *parent, *smallest;
|
||||
void *data = heap->data;
|
||||
|
||||
for (;;) {
|
||||
if (pos * 2 + 1 >= heap->nr)
|
||||
break;
|
||||
|
||||
left = data + ((pos * 2 + 1) * func->elem_size);
|
||||
parent = data + (pos * func->elem_size);
|
||||
smallest = parent;
|
||||
if (func->less(left, smallest))
|
||||
smallest = left;
|
||||
|
||||
if (pos * 2 + 2 < heap->nr) {
|
||||
right = data + ((pos * 2 + 2) * func->elem_size);
|
||||
if (func->less(right, smallest))
|
||||
smallest = right;
|
||||
}
|
||||
if (smallest == parent)
|
||||
break;
|
||||
func->swp(smallest, parent);
|
||||
if (smallest == left)
|
||||
pos = (pos * 2) + 1;
|
||||
else
|
||||
pos = (pos * 2) + 2;
|
||||
}
|
||||
}
|
||||
|
||||
/* Floyd's approach to heapification that is O(nr). */
|
||||
static __always_inline
|
||||
void min_heapify_all(struct min_heap *heap,
|
||||
const struct min_heap_callbacks *func)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = heap->nr / 2; i >= 0; i--)
|
||||
min_heapify(heap, i, func);
|
||||
}
|
||||
|
||||
/* Remove minimum element from the heap, O(log2(nr)). */
|
||||
static __always_inline
|
||||
void min_heap_pop(struct min_heap *heap,
|
||||
const struct min_heap_callbacks *func)
|
||||
{
|
||||
void *data = heap->data;
|
||||
|
||||
if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
|
||||
return;
|
||||
|
||||
/* Place last element at the root (position 0) and then sift down. */
|
||||
heap->nr--;
|
||||
memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
|
||||
min_heapify(heap, 0, func);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the minimum element and then push the given element. The
|
||||
* implementation performs 1 sift (O(log2(nr))) and is therefore more
|
||||
* efficient than a pop followed by a push that does 2.
|
||||
*/
|
||||
static __always_inline
|
||||
void min_heap_pop_push(struct min_heap *heap,
|
||||
const void *element,
|
||||
const struct min_heap_callbacks *func)
|
||||
{
|
||||
memcpy(heap->data, element, func->elem_size);
|
||||
min_heapify(heap, 0, func);
|
||||
}
|
||||
|
||||
/* Push an element on to the heap, O(log2(nr)). */
|
||||
static __always_inline
|
||||
void min_heap_push(struct min_heap *heap, const void *element,
|
||||
const struct min_heap_callbacks *func)
|
||||
{
|
||||
void *data = heap->data;
|
||||
void *child, *parent;
|
||||
int pos;
|
||||
|
||||
if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
|
||||
return;
|
||||
|
||||
/* Place at the end of data. */
|
||||
pos = heap->nr;
|
||||
memcpy(data + (pos * func->elem_size), element, func->elem_size);
|
||||
heap->nr++;
|
||||
|
||||
/* Sift child at pos up. */
|
||||
for (; pos > 0; pos = (pos - 1) / 2) {
|
||||
child = data + (pos * func->elem_size);
|
||||
parent = data + ((pos - 1) / 2) * func->elem_size;
|
||||
if (func->less(parent, child))
|
||||
break;
|
||||
func->swp(parent, child);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _LINUX_MIN_HEAP_H */
|
@ -667,9 +667,7 @@ struct x86_cpu_id {
|
||||
kernel_ulong_t driver_data;
|
||||
};
|
||||
|
||||
#define X86_FEATURE_MATCH(x) \
|
||||
{ X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
|
||||
|
||||
/* Wild cards for x86_cpu_id::vendor, family, model and feature */
|
||||
#define X86_VENDOR_ANY 0xffff
|
||||
#define X86_FAMILY_ANY 0
|
||||
#define X86_MODEL_ANY 0
|
||||
|
@ -93,14 +93,26 @@ struct perf_raw_record {
|
||||
/*
|
||||
* branch stack layout:
|
||||
* nr: number of taken branches stored in entries[]
|
||||
* hw_idx: The low level index of raw branch records
|
||||
* for the most recent branch.
|
||||
* -1ULL means invalid/unknown.
|
||||
*
|
||||
* Note that nr can vary from sample to sample
|
||||
* branches (to, from) are stored from most recent
|
||||
* to least recent, i.e., entries[0] contains the most
|
||||
* recent branch.
|
||||
* The entries[] is an abstraction of raw branch records,
|
||||
* which may not be stored in age order in HW, e.g. Intel LBR.
|
||||
* The hw_idx is to expose the low level index of raw
|
||||
* branch record for the most recent branch aka entries[0].
|
||||
* The hw_idx index is between -1 (unknown) and max depth,
|
||||
* which can be retrieved in /sys/devices/cpu/caps/branches.
|
||||
* For the architectures whose raw branch records are
|
||||
* already stored in age order, the hw_idx should be 0.
|
||||
*/
|
||||
struct perf_branch_stack {
|
||||
__u64 nr;
|
||||
__u64 hw_idx;
|
||||
struct perf_branch_entry entries[0];
|
||||
};
|
||||
|
||||
@ -850,6 +862,13 @@ struct perf_cpu_context {
|
||||
int sched_cb_usage;
|
||||
|
||||
int online;
|
||||
/*
|
||||
* Per-CPU storage for iterators used in visit_groups_merge. The default
|
||||
* storage is of size 2 to hold the CPU and any CPU event iterators.
|
||||
*/
|
||||
int heap_size;
|
||||
struct perf_event **heap;
|
||||
struct perf_event *heap_default[2];
|
||||
};
|
||||
|
||||
struct perf_output_handle {
|
||||
|
@ -181,6 +181,8 @@ enum perf_branch_sample_type_shift {
|
||||
|
||||
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
|
||||
|
||||
PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
|
||||
|
||||
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
|
||||
};
|
||||
|
||||
@ -208,6 +210,8 @@ enum perf_branch_sample_type {
|
||||
PERF_SAMPLE_BRANCH_TYPE_SAVE =
|
||||
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
|
||||
};
|
||||
|
||||
@ -853,7 +857,9 @@ enum perf_event_type {
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
|
||||
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
|
||||
* { u64 from, to, flags } lbr[nr];
|
||||
* } && PERF_SAMPLE_BRANCH_STACK
|
||||
*
|
||||
* { u64 abi; # enum perf_sample_regs_abi
|
||||
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
|
||||
|
@ -49,6 +49,7 @@
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/proc_ns.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/min_heap.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@ -891,6 +892,47 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int perf_cgroup_ensure_storage(struct perf_event *event,
|
||||
struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event **storage;
|
||||
int cpu, heap_size, ret = 0;
|
||||
|
||||
/*
|
||||
* Allow storage to have sufficent space for an iterator for each
|
||||
* possibly nested cgroup plus an iterator for events with no cgroup.
|
||||
*/
|
||||
for (heap_size = 1; css; css = css->parent)
|
||||
heap_size++;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu);
|
||||
if (heap_size <= cpuctx->heap_size)
|
||||
continue;
|
||||
|
||||
storage = kmalloc_node(heap_size * sizeof(struct perf_event *),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!storage) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_lock_irq(&cpuctx->ctx.lock);
|
||||
if (cpuctx->heap_size < heap_size) {
|
||||
swap(cpuctx->heap, storage);
|
||||
if (storage == cpuctx->heap_default)
|
||||
storage = NULL;
|
||||
cpuctx->heap_size = heap_size;
|
||||
}
|
||||
raw_spin_unlock_irq(&cpuctx->ctx.lock);
|
||||
|
||||
kfree(storage);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
struct perf_event_attr *attr,
|
||||
struct perf_event *group_leader)
|
||||
@ -910,6 +952,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = perf_cgroup_ensure_storage(event, css);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
cgrp = container_of(css, struct perf_cgroup, css);
|
||||
event->cgrp = cgrp;
|
||||
|
||||
@ -1531,6 +1577,30 @@ perf_event_groups_less(struct perf_event *left, struct perf_event *right)
|
||||
if (left->cpu > right->cpu)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
if (left->cgrp != right->cgrp) {
|
||||
if (!left->cgrp || !left->cgrp->css.cgroup) {
|
||||
/*
|
||||
* Left has no cgroup but right does, no cgroups come
|
||||
* first.
|
||||
*/
|
||||
return true;
|
||||
}
|
||||
if (!right->cgrp || !right->cgrp->css.cgroup) {
|
||||
/*
|
||||
* Right has no cgroup but left does, no cgroups come
|
||||
* first.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
/* Two dissimilar cgroups, order by id. */
|
||||
if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (left->group_index < right->group_index)
|
||||
return true;
|
||||
if (left->group_index > right->group_index)
|
||||
@ -1610,25 +1680,48 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the leftmost event in the @cpu subtree.
|
||||
* Get the leftmost event in the cpu/cgroup subtree.
|
||||
*/
|
||||
static struct perf_event *
|
||||
perf_event_groups_first(struct perf_event_groups *groups, int cpu)
|
||||
perf_event_groups_first(struct perf_event_groups *groups, int cpu,
|
||||
struct cgroup *cgrp)
|
||||
{
|
||||
struct perf_event *node_event = NULL, *match = NULL;
|
||||
struct rb_node *node = groups->tree.rb_node;
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
u64 node_cgrp_id, cgrp_id = 0;
|
||||
|
||||
if (cgrp)
|
||||
cgrp_id = cgrp->kn->id;
|
||||
#endif
|
||||
|
||||
while (node) {
|
||||
node_event = container_of(node, struct perf_event, group_node);
|
||||
|
||||
if (cpu < node_event->cpu) {
|
||||
node = node->rb_left;
|
||||
} else if (cpu > node_event->cpu) {
|
||||
node = node->rb_right;
|
||||
} else {
|
||||
match = node_event;
|
||||
node = node->rb_left;
|
||||
continue;
|
||||
}
|
||||
if (cpu > node_event->cpu) {
|
||||
node = node->rb_right;
|
||||
continue;
|
||||
}
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
node_cgrp_id = 0;
|
||||
if (node_event->cgrp && node_event->cgrp->css.cgroup)
|
||||
node_cgrp_id = node_event->cgrp->css.cgroup->kn->id;
|
||||
|
||||
if (cgrp_id < node_cgrp_id) {
|
||||
node = node->rb_left;
|
||||
continue;
|
||||
}
|
||||
if (cgrp_id > node_cgrp_id) {
|
||||
node = node->rb_right;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
match = node_event;
|
||||
node = node->rb_left;
|
||||
}
|
||||
|
||||
return match;
|
||||
@ -1641,12 +1734,26 @@ static struct perf_event *
|
||||
perf_event_groups_next(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *next;
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
u64 curr_cgrp_id = 0;
|
||||
u64 next_cgrp_id = 0;
|
||||
#endif
|
||||
|
||||
next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node);
|
||||
if (next && next->cpu == event->cpu)
|
||||
return next;
|
||||
if (next == NULL || next->cpu != event->cpu)
|
||||
return NULL;
|
||||
|
||||
return NULL;
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
if (event->cgrp && event->cgrp->css.cgroup)
|
||||
curr_cgrp_id = event->cgrp->css.cgroup->kn->id;
|
||||
|
||||
if (next->cgrp && next->cgrp->css.cgroup)
|
||||
next_cgrp_id = next->cgrp->css.cgroup->kn->id;
|
||||
|
||||
if (curr_cgrp_id != next_cgrp_id)
|
||||
return NULL;
|
||||
#endif
|
||||
return next;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1986,6 +2093,12 @@ static int perf_get_aux_event(struct perf_event *event,
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline struct list_head *get_event_list(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active;
|
||||
}
|
||||
|
||||
static void perf_group_detach(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *sibling, *tmp;
|
||||
@ -2028,12 +2141,8 @@ static void perf_group_detach(struct perf_event *event)
|
||||
if (!RB_EMPTY_NODE(&event->group_node)) {
|
||||
add_event_to_groups(sibling, event->ctx);
|
||||
|
||||
if (sibling->state == PERF_EVENT_STATE_ACTIVE) {
|
||||
struct list_head *list = sibling->attr.pinned ?
|
||||
&ctx->pinned_active : &ctx->flexible_active;
|
||||
|
||||
list_add_tail(&sibling->active_list, list);
|
||||
}
|
||||
if (sibling->state == PERF_EVENT_STATE_ACTIVE)
|
||||
list_add_tail(&sibling->active_list, get_event_list(sibling));
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(sibling->ctx != event->ctx);
|
||||
@ -2182,6 +2291,7 @@ __perf_remove_from_context(struct perf_event *event,
|
||||
|
||||
if (!ctx->nr_events && ctx->is_active) {
|
||||
ctx->is_active = 0;
|
||||
ctx->rotate_necessary = 0;
|
||||
if (ctx->task) {
|
||||
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
|
||||
cpuctx->task_ctx = NULL;
|
||||
@ -2350,6 +2460,8 @@ event_sched_in(struct perf_event *event,
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
WARN_ON_ONCE(event->ctx != ctx);
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
if (event->state <= PERF_EVENT_STATE_OFF)
|
||||
@ -3077,12 +3189,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
||||
if (!ctx->nr_active || !(is_active & EVENT_ALL))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we had been multiplexing, no rotations are necessary, now no events
|
||||
* are active.
|
||||
*/
|
||||
ctx->rotate_necessary = 0;
|
||||
|
||||
perf_pmu_disable(ctx->pmu);
|
||||
if (is_active & EVENT_PINNED) {
|
||||
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
|
||||
@ -3092,6 +3198,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
||||
if (is_active & EVENT_FLEXIBLE) {
|
||||
list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list)
|
||||
group_sched_out(event, cpuctx, ctx);
|
||||
|
||||
/*
|
||||
* Since we cleared EVENT_FLEXIBLE, also clear
|
||||
* rotate_necessary, is will be reset by
|
||||
* ctx_flexible_sched_in() when needed.
|
||||
*/
|
||||
ctx->rotate_necessary = 0;
|
||||
}
|
||||
perf_pmu_enable(ctx->pmu);
|
||||
}
|
||||
@ -3388,46 +3501,103 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
|
||||
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
|
||||
}
|
||||
|
||||
static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
|
||||
int (*func)(struct perf_event *, void *), void *data)
|
||||
static bool perf_less_group_idx(const void *l, const void *r)
|
||||
{
|
||||
struct perf_event **evt, *evt1, *evt2;
|
||||
const struct perf_event *le = l, *re = r;
|
||||
|
||||
return le->group_index < re->group_index;
|
||||
}
|
||||
|
||||
static void swap_ptr(void *l, void *r)
|
||||
{
|
||||
void **lp = l, **rp = r;
|
||||
|
||||
swap(*lp, *rp);
|
||||
}
|
||||
|
||||
static const struct min_heap_callbacks perf_min_heap = {
|
||||
.elem_size = sizeof(struct perf_event *),
|
||||
.less = perf_less_group_idx,
|
||||
.swp = swap_ptr,
|
||||
};
|
||||
|
||||
static void __heap_add(struct min_heap *heap, struct perf_event *event)
|
||||
{
|
||||
struct perf_event **itrs = heap->data;
|
||||
|
||||
if (event) {
|
||||
itrs[heap->nr] = event;
|
||||
heap->nr++;
|
||||
}
|
||||
}
|
||||
|
||||
static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
|
||||
struct perf_event_groups *groups, int cpu,
|
||||
int (*func)(struct perf_event *, void *),
|
||||
void *data)
|
||||
{
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
struct cgroup_subsys_state *css = NULL;
|
||||
#endif
|
||||
/* Space for per CPU and/or any CPU event iterators. */
|
||||
struct perf_event *itrs[2];
|
||||
struct min_heap event_heap;
|
||||
struct perf_event **evt;
|
||||
int ret;
|
||||
|
||||
evt1 = perf_event_groups_first(groups, -1);
|
||||
evt2 = perf_event_groups_first(groups, cpu);
|
||||
if (cpuctx) {
|
||||
event_heap = (struct min_heap){
|
||||
.data = cpuctx->heap,
|
||||
.nr = 0,
|
||||
.size = cpuctx->heap_size,
|
||||
};
|
||||
|
||||
while (evt1 || evt2) {
|
||||
if (evt1 && evt2) {
|
||||
if (evt1->group_index < evt2->group_index)
|
||||
evt = &evt1;
|
||||
else
|
||||
evt = &evt2;
|
||||
} else if (evt1) {
|
||||
evt = &evt1;
|
||||
} else {
|
||||
evt = &evt2;
|
||||
}
|
||||
lockdep_assert_held(&cpuctx->ctx.lock);
|
||||
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
if (cpuctx->cgrp)
|
||||
css = &cpuctx->cgrp->css;
|
||||
#endif
|
||||
} else {
|
||||
event_heap = (struct min_heap){
|
||||
.data = itrs,
|
||||
.nr = 0,
|
||||
.size = ARRAY_SIZE(itrs),
|
||||
};
|
||||
/* Events not within a CPU context may be on any CPU. */
|
||||
__heap_add(&event_heap, perf_event_groups_first(groups, -1, NULL));
|
||||
}
|
||||
evt = event_heap.data;
|
||||
|
||||
__heap_add(&event_heap, perf_event_groups_first(groups, cpu, NULL));
|
||||
|
||||
#ifdef CONFIG_CGROUP_PERF
|
||||
for (; css; css = css->parent)
|
||||
__heap_add(&event_heap, perf_event_groups_first(groups, cpu, css->cgroup));
|
||||
#endif
|
||||
|
||||
min_heapify_all(&event_heap, &perf_min_heap);
|
||||
|
||||
while (event_heap.nr) {
|
||||
ret = func(*evt, data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*evt = perf_event_groups_next(*evt);
|
||||
if (*evt)
|
||||
min_heapify(&event_heap, 0, &perf_min_heap);
|
||||
else
|
||||
min_heap_pop(&event_heap, &perf_min_heap);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct sched_in_data {
|
||||
struct perf_event_context *ctx;
|
||||
struct perf_cpu_context *cpuctx;
|
||||
int can_add_hw;
|
||||
};
|
||||
|
||||
static int pinned_sched_in(struct perf_event *event, void *data)
|
||||
static int merge_sched_in(struct perf_event *event, void *data)
|
||||
{
|
||||
struct sched_in_data *sid = data;
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
|
||||
int *can_add_hw = data;
|
||||
|
||||
if (event->state <= PERF_EVENT_STATE_OFF)
|
||||
return 0;
|
||||
@ -3435,39 +3605,17 @@ static int pinned_sched_in(struct perf_event *event, void *data)
|
||||
if (!event_filter_match(event))
|
||||
return 0;
|
||||
|
||||
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
|
||||
if (!group_sched_in(event, sid->cpuctx, sid->ctx))
|
||||
list_add_tail(&event->active_list, &sid->ctx->pinned_active);
|
||||
if (group_can_go_on(event, cpuctx, *can_add_hw)) {
|
||||
if (!group_sched_in(event, cpuctx, ctx))
|
||||
list_add_tail(&event->active_list, get_event_list(event));
|
||||
}
|
||||
|
||||
/*
|
||||
* If this pinned group hasn't been scheduled,
|
||||
* put it in error state.
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE)
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE) {
|
||||
if (event->attr.pinned)
|
||||
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int flexible_sched_in(struct perf_event *event, void *data)
|
||||
{
|
||||
struct sched_in_data *sid = data;
|
||||
|
||||
if (event->state <= PERF_EVENT_STATE_OFF)
|
||||
return 0;
|
||||
|
||||
if (!event_filter_match(event))
|
||||
return 0;
|
||||
|
||||
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
|
||||
int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
|
||||
if (ret) {
|
||||
sid->can_add_hw = 0;
|
||||
sid->ctx->rotate_necessary = 1;
|
||||
return 0;
|
||||
}
|
||||
list_add_tail(&event->active_list, &sid->ctx->flexible_active);
|
||||
*can_add_hw = 0;
|
||||
ctx->rotate_necessary = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -3477,30 +3625,28 @@ static void
|
||||
ctx_pinned_sched_in(struct perf_event_context *ctx,
|
||||
struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
struct sched_in_data sid = {
|
||||
.ctx = ctx,
|
||||
.cpuctx = cpuctx,
|
||||
.can_add_hw = 1,
|
||||
};
|
||||
int can_add_hw = 1;
|
||||
|
||||
visit_groups_merge(&ctx->pinned_groups,
|
||||
if (ctx != &cpuctx->ctx)
|
||||
cpuctx = NULL;
|
||||
|
||||
visit_groups_merge(cpuctx, &ctx->pinned_groups,
|
||||
smp_processor_id(),
|
||||
pinned_sched_in, &sid);
|
||||
merge_sched_in, &can_add_hw);
|
||||
}
|
||||
|
||||
static void
|
||||
ctx_flexible_sched_in(struct perf_event_context *ctx,
|
||||
struct perf_cpu_context *cpuctx)
|
||||
{
|
||||
struct sched_in_data sid = {
|
||||
.ctx = ctx,
|
||||
.cpuctx = cpuctx,
|
||||
.can_add_hw = 1,
|
||||
};
|
||||
int can_add_hw = 1;
|
||||
|
||||
visit_groups_merge(&ctx->flexible_groups,
|
||||
if (ctx != &cpuctx->ctx)
|
||||
cpuctx = NULL;
|
||||
|
||||
visit_groups_merge(cpuctx, &ctx->flexible_groups,
|
||||
smp_processor_id(),
|
||||
flexible_sched_in, &sid);
|
||||
merge_sched_in, &can_add_hw);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -3841,6 +3987,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
|
||||
typeof(*event), group_node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
|
||||
* finds there are unschedulable events, it will set it again.
|
||||
*/
|
||||
ctx->rotate_necessary = 0;
|
||||
|
||||
return event;
|
||||
}
|
||||
|
||||
@ -6555,6 +6707,11 @@ static void perf_output_read(struct perf_output_handle *handle,
|
||||
perf_output_read_one(handle, event, enabled, running);
|
||||
}
|
||||
|
||||
static inline bool perf_sample_save_hw_index(struct perf_event *event)
|
||||
{
|
||||
return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX;
|
||||
}
|
||||
|
||||
void perf_output_sample(struct perf_output_handle *handle,
|
||||
struct perf_event_header *header,
|
||||
struct perf_sample_data *data,
|
||||
@ -6643,6 +6800,8 @@ void perf_output_sample(struct perf_output_handle *handle,
|
||||
* sizeof(struct perf_branch_entry);
|
||||
|
||||
perf_output_put(handle, data->br_stack->nr);
|
||||
if (perf_sample_save_hw_index(event))
|
||||
perf_output_put(handle, data->br_stack->hw_idx);
|
||||
perf_output_copy(handle, data->br_stack->entries, size);
|
||||
} else {
|
||||
/*
|
||||
@ -6836,6 +6995,9 @@ void perf_prepare_sample(struct perf_event_header *header,
|
||||
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
|
||||
int size = sizeof(u64); /* nr */
|
||||
if (data->br_stack) {
|
||||
if (perf_sample_save_hw_index(event))
|
||||
size += sizeof(u64);
|
||||
|
||||
size += data->br_stack->nr
|
||||
* sizeof(struct perf_branch_entry);
|
||||
}
|
||||
@ -10349,6 +10511,9 @@ skip_type:
|
||||
cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
|
||||
|
||||
__perf_mux_hrtimer_init(cpuctx, cpu);
|
||||
|
||||
cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default);
|
||||
cpuctx->heap = cpuctx->heap_default;
|
||||
}
|
||||
|
||||
got_cpu_context:
|
||||
@ -10794,12 +10959,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
if (!has_branch_stack(event))
|
||||
event->attr.branch_sample_type = 0;
|
||||
|
||||
if (cgroup_fd != -1) {
|
||||
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
|
||||
if (err)
|
||||
goto err_ns;
|
||||
}
|
||||
|
||||
pmu = perf_init_event(event);
|
||||
if (IS_ERR(pmu)) {
|
||||
err = PTR_ERR(pmu);
|
||||
@ -10821,6 +10980,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
goto err_pmu;
|
||||
}
|
||||
|
||||
if (cgroup_fd != -1) {
|
||||
err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader);
|
||||
if (err)
|
||||
goto err_pmu;
|
||||
}
|
||||
|
||||
err = exclusive_event_init(event);
|
||||
if (err)
|
||||
goto err_pmu;
|
||||
@ -10881,12 +11046,12 @@ err_per_task:
|
||||
exclusive_event_destroy(event);
|
||||
|
||||
err_pmu:
|
||||
if (is_cgroup_event(event))
|
||||
perf_detach_cgroup(event);
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
module_put(pmu->module);
|
||||
err_ns:
|
||||
if (is_cgroup_event(event))
|
||||
perf_detach_cgroup(event);
|
||||
if (event->ns)
|
||||
put_pid_ns(event->ns);
|
||||
if (event->hw.target)
|
||||
|
@ -1786,6 +1786,16 @@ config TEST_LIST_SORT
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config TEST_MIN_HEAP
|
||||
tristate "Min heap test"
|
||||
depends on DEBUG_KERNEL || m
|
||||
help
|
||||
Enable this to turn on min heap function tests. This test is
|
||||
executed only once during system boot (so affects only boot time),
|
||||
or at module load time.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config TEST_SORT
|
||||
tristate "Array-based sort test"
|
||||
depends on DEBUG_KERNEL || m
|
||||
|
@ -67,6 +67,7 @@ CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
|
||||
UBSAN_SANITIZE_test_ubsan.o := y
|
||||
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
|
||||
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
|
||||
obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o
|
||||
obj-$(CONFIG_TEST_LKM) += test_module.o
|
||||
obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o
|
||||
obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o
|
||||
|
194
lib/test_min_heap.c
Normal file
194
lib/test_min_heap.c
Normal file
@ -0,0 +1,194 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
#define pr_fmt(fmt) "min_heap_test: " fmt
|
||||
|
||||
/*
|
||||
* Test cases for the min max heap.
|
||||
*/
|
||||
|
||||
#include <linux/log2.h>
|
||||
#include <linux/min_heap.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
static __init bool less_than(const void *lhs, const void *rhs)
|
||||
{
|
||||
return *(int *)lhs < *(int *)rhs;
|
||||
}
|
||||
|
||||
static __init bool greater_than(const void *lhs, const void *rhs)
|
||||
{
|
||||
return *(int *)lhs > *(int *)rhs;
|
||||
}
|
||||
|
||||
static __init void swap_ints(void *lhs, void *rhs)
|
||||
{
|
||||
int temp = *(int *)lhs;
|
||||
|
||||
*(int *)lhs = *(int *)rhs;
|
||||
*(int *)rhs = temp;
|
||||
}
|
||||
|
||||
static __init int pop_verify_heap(bool min_heap,
|
||||
struct min_heap *heap,
|
||||
const struct min_heap_callbacks *funcs)
|
||||
{
|
||||
int *values = heap->data;
|
||||
int err = 0;
|
||||
int last;
|
||||
|
||||
last = values[0];
|
||||
min_heap_pop(heap, funcs);
|
||||
while (heap->nr > 0) {
|
||||
if (min_heap) {
|
||||
if (last > values[0]) {
|
||||
pr_err("error: expected %d <= %d\n", last,
|
||||
values[0]);
|
||||
err++;
|
||||
}
|
||||
} else {
|
||||
if (last < values[0]) {
|
||||
pr_err("error: expected %d >= %d\n", last,
|
||||
values[0]);
|
||||
err++;
|
||||
}
|
||||
}
|
||||
last = values[0];
|
||||
min_heap_pop(heap, funcs);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static __init int test_heapify_all(bool min_heap)
|
||||
{
|
||||
int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0,
|
||||
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
|
||||
struct min_heap heap = {
|
||||
.data = values,
|
||||
.nr = ARRAY_SIZE(values),
|
||||
.size = ARRAY_SIZE(values),
|
||||
};
|
||||
struct min_heap_callbacks funcs = {
|
||||
.elem_size = sizeof(int),
|
||||
.less = min_heap ? less_than : greater_than,
|
||||
.swp = swap_ints,
|
||||
};
|
||||
int i, err;
|
||||
|
||||
/* Test with known set of values. */
|
||||
min_heapify_all(&heap, &funcs);
|
||||
err = pop_verify_heap(min_heap, &heap, &funcs);
|
||||
|
||||
|
||||
/* Test with randomly generated values. */
|
||||
heap.nr = ARRAY_SIZE(values);
|
||||
for (i = 0; i < heap.nr; i++)
|
||||
values[i] = get_random_int();
|
||||
|
||||
min_heapify_all(&heap, &funcs);
|
||||
err += pop_verify_heap(min_heap, &heap, &funcs);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static __init int test_heap_push(bool min_heap)
|
||||
{
|
||||
const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
|
||||
-3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
|
||||
int values[ARRAY_SIZE(data)];
|
||||
struct min_heap heap = {
|
||||
.data = values,
|
||||
.nr = 0,
|
||||
.size = ARRAY_SIZE(values),
|
||||
};
|
||||
struct min_heap_callbacks funcs = {
|
||||
.elem_size = sizeof(int),
|
||||
.less = min_heap ? less_than : greater_than,
|
||||
.swp = swap_ints,
|
||||
};
|
||||
int i, temp, err;
|
||||
|
||||
/* Test with known set of values copied from data. */
|
||||
for (i = 0; i < ARRAY_SIZE(data); i++)
|
||||
min_heap_push(&heap, &data[i], &funcs);
|
||||
|
||||
err = pop_verify_heap(min_heap, &heap, &funcs);
|
||||
|
||||
/* Test with randomly generated values. */
|
||||
while (heap.nr < heap.size) {
|
||||
temp = get_random_int();
|
||||
min_heap_push(&heap, &temp, &funcs);
|
||||
}
|
||||
err += pop_verify_heap(min_heap, &heap, &funcs);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static __init int test_heap_pop_push(bool min_heap)
|
||||
{
|
||||
const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0,
|
||||
-3, -1, -2, -4, 0x80000000, 0x7FFFFFFF };
|
||||
int values[ARRAY_SIZE(data)];
|
||||
struct min_heap heap = {
|
||||
.data = values,
|
||||
.nr = 0,
|
||||
.size = ARRAY_SIZE(values),
|
||||
};
|
||||
struct min_heap_callbacks funcs = {
|
||||
.elem_size = sizeof(int),
|
||||
.less = min_heap ? less_than : greater_than,
|
||||
.swp = swap_ints,
|
||||
};
|
||||
int i, temp, err;
|
||||
|
||||
/* Fill values with data to pop and replace. */
|
||||
temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
|
||||
for (i = 0; i < ARRAY_SIZE(data); i++)
|
||||
min_heap_push(&heap, &temp, &funcs);
|
||||
|
||||
/* Test with known set of values copied from data. */
|
||||
for (i = 0; i < ARRAY_SIZE(data); i++)
|
||||
min_heap_pop_push(&heap, &data[i], &funcs);
|
||||
|
||||
err = pop_verify_heap(min_heap, &heap, &funcs);
|
||||
|
||||
heap.nr = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(data); i++)
|
||||
min_heap_push(&heap, &temp, &funcs);
|
||||
|
||||
/* Test with randomly generated values. */
|
||||
for (i = 0; i < ARRAY_SIZE(data); i++) {
|
||||
temp = get_random_int();
|
||||
min_heap_pop_push(&heap, &temp, &funcs);
|
||||
}
|
||||
err += pop_verify_heap(min_heap, &heap, &funcs);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __init test_min_heap_init(void)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
err += test_heapify_all(true);
|
||||
err += test_heapify_all(false);
|
||||
err += test_heap_push(true);
|
||||
err += test_heap_push(false);
|
||||
err += test_heap_pop_push(true);
|
||||
err += test_heap_pop_push(false);
|
||||
if (err) {
|
||||
pr_err("test failed with %d errors\n", err);
|
||||
return -EINVAL;
|
||||
}
|
||||
pr_info("test passed\n");
|
||||
return 0;
|
||||
}
|
||||
module_init(test_min_heap_init);
|
||||
|
||||
static void __exit test_min_heap_exit(void)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
module_exit(test_min_heap_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
@ -15,13 +15,11 @@
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/iosf_mbi.h>
|
||||
|
||||
#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
|
||||
|
||||
#define SOC_INTEL_IS_CPU(soc, type) \
|
||||
static inline bool soc_intel_is_##soc(void) \
|
||||
{ \
|
||||
static const struct x86_cpu_id soc##_cpu_ids[] = { \
|
||||
ICPU(type), \
|
||||
X86_MATCH_INTEL_FAM6_MODEL(type, NULL), \
|
||||
{} \
|
||||
}; \
|
||||
const struct x86_cpu_id *id; \
|
||||
@ -32,11 +30,11 @@ static inline bool soc_intel_is_##soc(void) \
|
||||
return false; \
|
||||
}
|
||||
|
||||
SOC_INTEL_IS_CPU(byt, INTEL_FAM6_ATOM_SILVERMONT);
|
||||
SOC_INTEL_IS_CPU(cht, INTEL_FAM6_ATOM_AIRMONT);
|
||||
SOC_INTEL_IS_CPU(apl, INTEL_FAM6_ATOM_GOLDMONT);
|
||||
SOC_INTEL_IS_CPU(glk, INTEL_FAM6_ATOM_GOLDMONT_PLUS);
|
||||
SOC_INTEL_IS_CPU(cml, INTEL_FAM6_KABYLAKE_L);
|
||||
SOC_INTEL_IS_CPU(byt, ATOM_SILVERMONT);
|
||||
SOC_INTEL_IS_CPU(cht, ATOM_AIRMONT);
|
||||
SOC_INTEL_IS_CPU(apl, ATOM_GOLDMONT);
|
||||
SOC_INTEL_IS_CPU(glk, ATOM_GOLDMONT_PLUS);
|
||||
SOC_INTEL_IS_CPU(cml, KABYLAKE_L);
|
||||
|
||||
static inline bool soc_intel_is_byt_cr(struct platform_device *pdev)
|
||||
{
|
||||
|
@ -181,6 +181,8 @@ enum perf_branch_sample_type_shift {
|
||||
|
||||
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
|
||||
|
||||
PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
|
||||
|
||||
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
|
||||
};
|
||||
|
||||
@ -208,6 +210,8 @@ enum perf_branch_sample_type {
|
||||
PERF_SAMPLE_BRANCH_TYPE_SAVE =
|
||||
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
|
||||
};
|
||||
|
||||
@ -853,7 +857,9 @@ enum perf_event_type {
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
|
||||
* { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
|
||||
* { u64 from, to, flags } lbr[nr];
|
||||
* } && PERF_SAMPLE_BRANCH_STACK
|
||||
*
|
||||
* { u64 abi; # enum perf_sample_regs_abi
|
||||
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
|
||||
|
@ -1,2 +1,3 @@
|
||||
libapi-y += fs.o
|
||||
libapi-y += tracing_path.o
|
||||
libapi-y += cgroup.o
|
||||
|
67
tools/lib/api/fs/cgroup.c
Normal file
67
tools/lib/api/fs/cgroup.c
Normal file
@ -0,0 +1,67 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/stringify.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "fs.h"
|
||||
|
||||
int cgroupfs_find_mountpoint(char *buf, size_t maxlen, const char *subsys)
|
||||
{
|
||||
FILE *fp;
|
||||
char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
|
||||
char path_v1[PATH_MAX + 1], path_v2[PATH_MAX + 2], *path;
|
||||
char *token, *saved_ptr = NULL;
|
||||
|
||||
fp = fopen("/proc/mounts", "r");
|
||||
if (!fp)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* in order to handle split hierarchy, we need to scan /proc/mounts
|
||||
* and inspect every cgroupfs mount point to find one that has
|
||||
* perf_event subsystem
|
||||
*/
|
||||
path_v1[0] = '\0';
|
||||
path_v2[0] = '\0';
|
||||
|
||||
while (fscanf(fp, "%*s %"__stringify(PATH_MAX)"s %"__stringify(PATH_MAX)"s %"
|
||||
__stringify(PATH_MAX)"s %*d %*d\n",
|
||||
mountpoint, type, tokens) == 3) {
|
||||
|
||||
if (!path_v1[0] && !strcmp(type, "cgroup")) {
|
||||
|
||||
token = strtok_r(tokens, ",", &saved_ptr);
|
||||
|
||||
while (token != NULL) {
|
||||
if (subsys && !strcmp(token, subsys)) {
|
||||
strcpy(path_v1, mountpoint);
|
||||
break;
|
||||
}
|
||||
token = strtok_r(NULL, ",", &saved_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
if (!path_v2[0] && !strcmp(type, "cgroup2"))
|
||||
strcpy(path_v2, mountpoint);
|
||||
|
||||
if (path_v1[0] && path_v2[0])
|
||||
break;
|
||||
}
|
||||
fclose(fp);
|
||||
|
||||
if (path_v1[0])
|
||||
path = path_v1;
|
||||
else if (path_v2[0])
|
||||
path = path_v2;
|
||||
else
|
||||
return -1;
|
||||
|
||||
if (strlen(path) < maxlen) {
|
||||
strcpy(buf, path);
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
}
|
@ -28,6 +28,8 @@ FS(bpf_fs)
|
||||
#undef FS
|
||||
|
||||
|
||||
int cgroupfs_find_mountpoint(char *buf, size_t maxlen, const char *subsys);
|
||||
|
||||
int filename__read_int(const char *filename, int *value);
|
||||
int filename__read_ull(const char *filename, unsigned long long *value);
|
||||
int filename__read_xll(const char *filename, unsigned long long *value);
|
||||
|
83
tools/lib/perf/Documentation/examples/counting.c
Normal file
83
tools/lib/perf/Documentation/examples/counting.c
Normal file
@ -0,0 +1,83 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/evlist.h>
|
||||
#include <perf/evsel.h>
|
||||
#include <perf/cpumap.h>
|
||||
#include <perf/threadmap.h>
|
||||
#include <perf/mmap.h>
|
||||
#include <perf/core.h>
|
||||
#include <perf/event.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static int libperf_print(enum libperf_print_level level,
|
||||
const char *fmt, va_list ap)
|
||||
{
|
||||
return vfprintf(stderr, fmt, ap);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int count = 100000, err = 0;
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_thread_map *threads;
|
||||
struct perf_counts_values counts;
|
||||
|
||||
struct perf_event_attr attr1 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||
.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
|
||||
.disabled = 1,
|
||||
};
|
||||
struct perf_event_attr attr2 = {
|
||||
.type = PERF_TYPE_SOFTWARE,
|
||||
.config = PERF_COUNT_SW_TASK_CLOCK,
|
||||
.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED|PERF_FORMAT_TOTAL_TIME_RUNNING,
|
||||
.disabled = 1,
|
||||
};
|
||||
|
||||
libperf_init(libperf_print);
|
||||
threads = perf_thread_map__new_dummy();
|
||||
if (!threads) {
|
||||
fprintf(stderr, "failed to create threads\n");
|
||||
return -1;
|
||||
}
|
||||
perf_thread_map__set_pid(threads, 0, 0);
|
||||
evlist = perf_evlist__new();
|
||||
if (!evlist) {
|
||||
fprintf(stderr, "failed to create evlist\n");
|
||||
goto out_threads;
|
||||
}
|
||||
evsel = perf_evsel__new(&attr1);
|
||||
if (!evsel) {
|
||||
fprintf(stderr, "failed to create evsel1\n");
|
||||
goto out_evlist;
|
||||
}
|
||||
perf_evlist__add(evlist, evsel);
|
||||
evsel = perf_evsel__new(&attr2);
|
||||
if (!evsel) {
|
||||
fprintf(stderr, "failed to create evsel2\n");
|
||||
goto out_evlist;
|
||||
}
|
||||
perf_evlist__add(evlist, evsel);
|
||||
perf_evlist__set_maps(evlist, NULL, threads);
|
||||
err = perf_evlist__open(evlist);
|
||||
if (err) {
|
||||
fprintf(stderr, "failed to open evsel\n");
|
||||
goto out_evlist;
|
||||
}
|
||||
perf_evlist__enable(evlist);
|
||||
while (count--);
|
||||
perf_evlist__disable(evlist);
|
||||
perf_evlist__for_each_evsel(evlist, evsel) {
|
||||
perf_evsel__read(evsel, 0, 0, &counts);
|
||||
fprintf(stdout, "count %llu, enabled %llu, run %llu\n",
|
||||
counts.val, counts.ena, counts.run);
|
||||
}
|
||||
perf_evlist__close(evlist);
|
||||
out_evlist:
|
||||
perf_evlist__delete(evlist);
|
||||
out_threads:
|
||||
perf_thread_map__put(threads);
|
||||
return err;
|
||||
}
|
@ -5541,7 +5541,7 @@ static void print_event_time(struct tep_handle *tep, struct trace_seq *s,
|
||||
if (p10 > 1 && p10 < time)
|
||||
trace_seq_printf(s, "%5llu.%0*llu", time / p10, prec, time % p10);
|
||||
else
|
||||
trace_seq_printf(s, "%12llu\n", time);
|
||||
trace_seq_printf(s, "%12llu", time);
|
||||
}
|
||||
|
||||
struct print_event_type {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user