mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
x86/cpu/topology: Get rid of cpuinfo::x86_max_cores
Now that __num_cores_per_package and __num_threads_per_package are available, cpuinfo::x86_max_cores and the related math all over the place can be replaced with the ready to consume data. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Michael Kelley <mhklinux@outlook.com> Tested-by: Sohil Mehta <sohil.mehta@intel.com> Link: https://lore.kernel.org/r/20240213210253.176147806@linutronix.de
This commit is contained in:
parent
fd43b8ae76
commit
89b0f15f40
@ -47,17 +47,21 @@ AMD nomenclature for package is 'Node'.
|
||||
|
||||
Package-related topology information in the kernel:
|
||||
|
||||
- cpuinfo_x86.x86_max_cores:
|
||||
- topology_num_threads_per_package()
|
||||
|
||||
The number of cores in a package. This information is retrieved via CPUID.
|
||||
The number of threads in a package.
|
||||
|
||||
- cpuinfo_x86.x86_max_dies:
|
||||
- topology_num_cores_per_package()
|
||||
|
||||
The number of dies in a package. This information is retrieved via CPUID.
|
||||
The number of cores in a package.
|
||||
|
||||
- topology_max_dies_per_package()
|
||||
|
||||
The maximum number of dies in a package.
|
||||
|
||||
- cpuinfo_x86.topo.die_id:
|
||||
|
||||
The physical ID of the die. This information is retrieved via CPUID.
|
||||
The physical ID of the die.
|
||||
|
||||
- cpuinfo_x86.topo.pkg_id:
|
||||
|
||||
@ -96,16 +100,6 @@ are SMT- or CMT-type threads.
|
||||
AMDs nomenclature for a CMT core is "Compute Unit". The kernel always uses
|
||||
"core".
|
||||
|
||||
Core-related topology information in the kernel:
|
||||
|
||||
- smp_num_siblings:
|
||||
|
||||
The number of threads in a core. The number of threads in a package can be
|
||||
calculated by::
|
||||
|
||||
threads_per_package = cpuinfo_x86.x86_max_cores * smp_num_siblings
|
||||
|
||||
|
||||
Threads
|
||||
=======
|
||||
A thread is a single scheduling unit. It's the equivalent to a logical Linux
|
||||
|
@ -1221,8 +1221,8 @@ void nhmex_uncore_cpu_init(void)
|
||||
uncore_nhmex = true;
|
||||
else
|
||||
nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events;
|
||||
if (nhmex_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
nhmex_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (nhmex_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
nhmex_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
uncore_msr_uncores = nhmex_msr_uncores;
|
||||
}
|
||||
/* end of Nehalem-EX uncore support */
|
||||
|
@ -364,8 +364,8 @@ static struct intel_uncore_type *snb_msr_uncores[] = {
|
||||
void snb_uncore_cpu_init(void)
|
||||
{
|
||||
uncore_msr_uncores = snb_msr_uncores;
|
||||
if (snb_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (snb_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
snb_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
}
|
||||
|
||||
static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
|
||||
@ -428,8 +428,8 @@ static struct intel_uncore_type *skl_msr_uncores[] = {
|
||||
void skl_uncore_cpu_init(void)
|
||||
{
|
||||
uncore_msr_uncores = skl_msr_uncores;
|
||||
if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (skl_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
skl_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
snb_uncore_arb.ops = &skl_uncore_msr_ops;
|
||||
}
|
||||
|
||||
|
@ -1172,8 +1172,8 @@ static struct intel_uncore_type *snbep_msr_uncores[] = {
|
||||
|
||||
void snbep_uncore_cpu_init(void)
|
||||
{
|
||||
if (snbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
snbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (snbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
snbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
uncore_msr_uncores = snbep_msr_uncores;
|
||||
}
|
||||
|
||||
@ -1845,8 +1845,8 @@ static struct intel_uncore_type *ivbep_msr_uncores[] = {
|
||||
|
||||
void ivbep_uncore_cpu_init(void)
|
||||
{
|
||||
if (ivbep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
ivbep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (ivbep_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
ivbep_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
uncore_msr_uncores = ivbep_msr_uncores;
|
||||
}
|
||||
|
||||
@ -2917,8 +2917,8 @@ static bool hswep_has_limit_sbox(unsigned int device)
|
||||
|
||||
void hswep_uncore_cpu_init(void)
|
||||
{
|
||||
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (hswep_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
hswep_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
|
||||
/* Detect 6-8 core systems with only two SBOXes */
|
||||
if (hswep_has_limit_sbox(HSWEP_PCU_DID))
|
||||
@ -3280,8 +3280,8 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
|
||||
|
||||
void bdx_uncore_cpu_init(void)
|
||||
{
|
||||
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
if (bdx_uncore_cbox.num_boxes > topology_num_cores_per_package())
|
||||
bdx_uncore_cbox.num_boxes = topology_num_cores_per_package();
|
||||
uncore_msr_uncores = bdx_msr_uncores;
|
||||
|
||||
/* Detect systems with no SBOXes */
|
||||
|
@ -149,8 +149,6 @@ struct cpuinfo_x86 {
|
||||
unsigned long loops_per_jiffy;
|
||||
/* protected processor identification number */
|
||||
u64 ppin;
|
||||
/* cpuid returned max cores value: */
|
||||
u16 x86_max_cores;
|
||||
u16 x86_clflush_size;
|
||||
/* number of cores as seen by the OS: */
|
||||
u16 booted_cores;
|
||||
|
@ -301,7 +301,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
|
||||
eax->split.type = types[leaf];
|
||||
eax->split.level = levels[leaf];
|
||||
eax->split.num_threads_sharing = 0;
|
||||
eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
|
||||
eax->split.num_cores_on_die = topology_num_cores_per_package();
|
||||
|
||||
|
||||
if (assoc == 0xffff)
|
||||
|
@ -1738,7 +1738,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||
c->x86_model = c->x86_stepping = 0; /* So far unknown... */
|
||||
c->x86_vendor_id[0] = '\0'; /* Unset */
|
||||
c->x86_model_id[0] = '\0'; /* Unset */
|
||||
c->x86_max_cores = 1;
|
||||
#ifdef CONFIG_X86_64
|
||||
c->x86_clflush_size = 64;
|
||||
c->x86_phys_bits = 36;
|
||||
|
@ -28,7 +28,8 @@ static int cpu_debug_show(struct seq_file *m, void *p)
|
||||
seq_printf(m, "l2c_id: %u\n", c->topo.l2c_id);
|
||||
seq_printf(m, "amd_node_id: %u\n", c->topo.amd_node_id);
|
||||
seq_printf(m, "amd_nodes_per_pkg: %u\n", topology_amd_nodes_per_pkg());
|
||||
seq_printf(m, "max_cores: %u\n", c->x86_max_cores);
|
||||
seq_printf(m, "num_threads: %u\n", __num_threads_per_package);
|
||||
seq_printf(m, "num_cores: %u\n", __num_cores_per_package);
|
||||
seq_printf(m, "max_dies_per_pkg: %u\n", __max_dies_per_package);
|
||||
seq_printf(m, "max_threads_per_core:%u\n", __max_threads_per_core);
|
||||
return 0;
|
||||
|
@ -430,10 +430,9 @@ static void trigger_thr_int(void *info)
|
||||
|
||||
static u32 get_nbc_for_node(int node_id)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
u32 cores_per_node;
|
||||
|
||||
cores_per_node = (c->x86_max_cores * __max_threads_per_core) / topology_amd_nodes_per_pkg();
|
||||
cores_per_node = topology_num_threads_per_package() / topology_amd_nodes_per_pkg();
|
||||
return cores_per_node * node_id;
|
||||
}
|
||||
|
||||
|
@ -641,7 +641,7 @@ static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 llc_size = c->x86_cache_size * 1024ULL;
|
||||
|
||||
do_div(llc_size, c->x86_max_cores);
|
||||
do_div(llc_size, topology_num_cores_per_package());
|
||||
llc_size_per_core = (unsigned int)llc_size;
|
||||
}
|
||||
|
||||
|
@ -155,9 +155,6 @@ static void topo_set_ids(struct topo_scan *tscan)
|
||||
c->topo.core_id = (apicid & topo_domain_mask(TOPO_PKG_DOMAIN)) >>
|
||||
x86_topo_system.dom_shifts[TOPO_SMT_DOMAIN];
|
||||
|
||||
/* Maximum number of cores on this package */
|
||||
c->x86_max_cores = topology_unit_count(apicid, TOPO_CORE_DOMAIN, TOPO_PKG_DOMAIN);
|
||||
|
||||
c->topo.amd_node_id = tscan->amd_node_id;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_AMD)
|
||||
|
@ -564,7 +564,7 @@ static void __init build_sched_topology(void)
|
||||
void set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
bool has_smt = __max_threads_per_core > 1;
|
||||
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
|
||||
bool has_mp = has_smt || topology_num_cores_per_package() > 1;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct cpuinfo_x86 *o;
|
||||
int i, threads;
|
||||
|
@ -451,7 +451,7 @@ static int vangogh_init_smc_tables(struct smu_context *smu)
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
/* AMD x86 APU only */
|
||||
smu->cpu_core_num = boot_cpu_data.x86_max_cores;
|
||||
smu->cpu_core_num = topology_num_cores_per_package();
|
||||
#else
|
||||
smu->cpu_core_num = 4;
|
||||
#endif
|
||||
|
@ -209,7 +209,7 @@ static ssize_t power1_average_show(struct device *dev,
|
||||
* With the new x86 topology modelling, x86_max_cores is the
|
||||
* compute unit number.
|
||||
*/
|
||||
cu_num = boot_cpu_data.x86_max_cores;
|
||||
cu_num = topology_num_cores_per_package();
|
||||
|
||||
ret = read_registers(data);
|
||||
if (ret)
|
||||
|
Loading…
Reference in New Issue
Block a user