mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-18 03:06:43 +00:00
4b92d4add5
DEFINE_SMP_CALL_CACHE_FUNCTION() was usefel before the CPU hotplug rework to ensure that the cache related functions are called on the upcoming CPU because the notifier itself could run on any online CPU. The hotplug state machine guarantees that the callbacks are invoked on the upcoming CPU. So there is no need to have this SMP function call obfuscation. That indirection was missed when the hotplug notifiers were converted. This also solves the problem of ARM64 init_cache_level() invoking ACPI functions which take a semaphore in that context. That's invalid as SMP function calls run with interrupts disabled. Running it just from the callback in context of the CPU hotplug thread solves this. Fixes: 8571890e1513 ("arm64: Add support for ACPI based firmware tables") Reported-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Guenter Roeck <linux@roeck-us.net> Acked-by: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: https://lore.kernel.org/r/871r69ersb.ffs@tglx
117 lines
2.7 KiB
C
117 lines
2.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* MIPS cacheinfo support
|
|
*/
|
|
#include <linux/cacheinfo.h>
|
|
|
|
/* Populates leaf and increments to next leaf */
|
|
#define populate_cache(cache, leaf, c_level, c_type) \
|
|
do { \
|
|
leaf->type = c_type; \
|
|
leaf->level = c_level; \
|
|
leaf->coherency_line_size = c->cache.linesz; \
|
|
leaf->number_of_sets = c->cache.sets; \
|
|
leaf->ways_of_associativity = c->cache.ways; \
|
|
leaf->size = c->cache.linesz * c->cache.sets * \
|
|
c->cache.ways; \
|
|
leaf++; \
|
|
} while (0)
|
|
|
|
int init_cache_level(unsigned int cpu)
|
|
{
|
|
struct cpuinfo_mips *c = ¤t_cpu_data;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
int levels = 0, leaves = 0;
|
|
|
|
/*
|
|
* If Dcache is not set, we assume the cache structures
|
|
* are not properly initialized.
|
|
*/
|
|
if (c->dcache.waysize)
|
|
levels += 1;
|
|
else
|
|
return -ENOENT;
|
|
|
|
|
|
leaves += (c->icache.waysize) ? 2 : 1;
|
|
|
|
if (c->vcache.waysize) {
|
|
levels++;
|
|
leaves++;
|
|
}
|
|
|
|
if (c->scache.waysize) {
|
|
levels++;
|
|
leaves++;
|
|
}
|
|
|
|
if (c->tcache.waysize) {
|
|
levels++;
|
|
leaves++;
|
|
}
|
|
|
|
this_cpu_ci->num_levels = levels;
|
|
this_cpu_ci->num_leaves = leaves;
|
|
return 0;
|
|
}
|
|
|
|
static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
|
|
{
|
|
int cpu1;
|
|
|
|
for_each_possible_cpu(cpu1)
|
|
if (cpus_are_siblings(cpu, cpu1))
|
|
cpumask_set_cpu(cpu1, cpu_map);
|
|
}
|
|
|
|
static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
|
|
{
|
|
int cpu1;
|
|
int cluster = cpu_cluster(&cpu_data[cpu]);
|
|
|
|
for_each_possible_cpu(cpu1)
|
|
if (cpu_cluster(&cpu_data[cpu1]) == cluster)
|
|
cpumask_set_cpu(cpu1, cpu_map);
|
|
}
|
|
|
|
int populate_cache_leaves(unsigned int cpu)
|
|
{
|
|
struct cpuinfo_mips *c = ¤t_cpu_data;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
|
int level = 1;
|
|
|
|
if (c->icache.waysize) {
|
|
/* I/D caches are per core */
|
|
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
|
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
|
|
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
|
populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
|
|
level++;
|
|
} else {
|
|
populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
|
|
level++;
|
|
}
|
|
|
|
if (c->vcache.waysize) {
|
|
/* Vcache is per core as well */
|
|
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
|
populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
|
|
level++;
|
|
}
|
|
|
|
if (c->scache.waysize) {
|
|
/* Scache is per cluster */
|
|
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
|
|
populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
|
|
level++;
|
|
}
|
|
|
|
if (c->tcache.waysize)
|
|
populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
|
|
|
|
this_cpu_ci->cpu_map_populated = true;
|
|
|
|
return 0;
|
|
}
|