mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
arch_numa: switch over to numa_memblks
Until now arch_numa was directly translating firmware NUMA information to memblock. Using numa_memblks as an intermediate step has a few advantages: * alignment with more battle tested x86 implementation * availability of NUMA emulation * maintaining node information for not yet populated memory Adjust a few places in numa_memblks to compile with 32-bit phys_addr_t and replace current functionality related to numa_add_memblk() and __node_distance() in arch_numa with the implementation based on numa_memblks and add functions required by numa_emulation. [rppt@kernel.org: fix section mismatch] Link: https://lkml.kernel.org/r/ZrO6cExVz1He_yPn@kernel.org [rppt@kernel.org: PFN_PHYS() translation is unnecessary here] Link: https://lkml.kernel.org/r/Zs2T5wkSYO9MGcab@kernel.org Link: https://lkml.kernel.org/r/20240807064110.1003856-25-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Tested-by: Zi Yan <ziy@nvidia.com> # for x86_64 and arm64 Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Tested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> [arm64 + CXL via QEMU] Acked-by: Dan Williams <dan.j.williams@intel.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: David S. Miller <davem@davemloft.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jiaxun Yang <jiaxun.yang@flygoat.com> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Rob Herring (Arm) <robh@kernel.org> Cc: Samuel Holland <samuel.holland@sifive.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7e488677a5
commit
767507654c
@ -226,6 +226,7 @@ config GENERIC_ARCH_TOPOLOGY
|
||||
|
||||
config GENERIC_ARCH_NUMA
|
||||
bool
|
||||
select NUMA_MEMBLKS
|
||||
help
|
||||
Enable support for generic NUMA implementation. Currently, RISC-V
|
||||
and ARM64 use it.
|
||||
|
@ -12,14 +12,12 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/numa_memblks.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
nodemask_t numa_nodes_parsed __initdata;
|
||||
static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
|
||||
|
||||
static int numa_distance_cnt;
|
||||
static u8 *numa_distance;
|
||||
bool numa_off;
|
||||
|
||||
static __init int numa_parse_early_param(char *opt)
|
||||
@ -28,6 +26,8 @@ static __init int numa_parse_early_param(char *opt)
|
||||
return -EINVAL;
|
||||
if (str_has_prefix(opt, "off"))
|
||||
numa_off = true;
|
||||
if (!strncmp(opt, "fake=", 5))
|
||||
return numa_emu_cmdline(opt + 5);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -59,6 +59,7 @@ EXPORT_SYMBOL(cpumask_of_node);
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_NUMA_EMU
|
||||
static void numa_update_cpu(unsigned int cpu, bool remove)
|
||||
{
|
||||
int nid = cpu_to_node(cpu);
|
||||
@ -81,6 +82,7 @@ void numa_remove_cpu(unsigned int cpu)
|
||||
{
|
||||
numa_update_cpu(cpu, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
void numa_clear_node(unsigned int cpu)
|
||||
{
|
||||
@ -142,7 +144,7 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
|
||||
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
|
||||
EXPORT_SYMBOL(__per_cpu_offset);
|
||||
|
||||
int __init early_cpu_to_node(int cpu)
|
||||
int early_cpu_to_node(int cpu)
|
||||
{
|
||||
return cpu_to_node_map[cpu];
|
||||
}
|
||||
@ -187,30 +189,6 @@ void __init setup_per_cpu_areas(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* numa_add_memblk() - Set node id to memblk
|
||||
* @nid: NUMA node ID of the new memblk
|
||||
* @start: Start address of the new memblk
|
||||
* @end: End address of the new memblk
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, -errno on failure.
|
||||
*/
|
||||
int __init numa_add_memblk(int nid, u64 start, u64 end)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
|
||||
if (ret < 0) {
|
||||
pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n",
|
||||
start, (end - 1), nid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
node_set(nid, numa_nodes_parsed);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize NODE_DATA for a node on the local memory
|
||||
*/
|
||||
@ -226,116 +204,9 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
|
||||
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
|
||||
}
|
||||
|
||||
/*
|
||||
* numa_free_distance
|
||||
*
|
||||
* The current table is freed.
|
||||
*/
|
||||
void __init numa_free_distance(void)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
if (!numa_distance)
|
||||
return;
|
||||
|
||||
size = numa_distance_cnt * numa_distance_cnt *
|
||||
sizeof(numa_distance[0]);
|
||||
|
||||
memblock_free(numa_distance, size);
|
||||
numa_distance_cnt = 0;
|
||||
numa_distance = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new NUMA distance table.
|
||||
*/
|
||||
static int __init numa_alloc_distance(void)
|
||||
{
|
||||
size_t size;
|
||||
int i, j;
|
||||
|
||||
size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]);
|
||||
numa_distance = memblock_alloc(size, PAGE_SIZE);
|
||||
if (WARN_ON(!numa_distance))
|
||||
return -ENOMEM;
|
||||
|
||||
numa_distance_cnt = nr_node_ids;
|
||||
|
||||
/* fill with the default distances */
|
||||
for (i = 0; i < numa_distance_cnt; i++)
|
||||
for (j = 0; j < numa_distance_cnt; j++)
|
||||
numa_distance[i * numa_distance_cnt + j] = i == j ?
|
||||
LOCAL_DISTANCE : REMOTE_DISTANCE;
|
||||
|
||||
pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* numa_set_distance() - Set inter node NUMA distance from node to node.
|
||||
* @from: the 'from' node to set distance
|
||||
* @to: the 'to' node to set distance
|
||||
* @distance: NUMA distance
|
||||
*
|
||||
* Set the distance from node @from to @to to @distance.
|
||||
* If distance table doesn't exist, a warning is printed.
|
||||
*
|
||||
* If @from or @to is higher than the highest known node or lower than zero
|
||||
* or @distance doesn't make sense, the call is ignored.
|
||||
*/
|
||||
void __init numa_set_distance(int from, int to, int distance)
|
||||
{
|
||||
if (!numa_distance) {
|
||||
pr_warn_once("Warning: distance table not allocated yet\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
|
||||
from < 0 || to < 0) {
|
||||
pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
|
||||
from, to, distance);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((u8)distance != distance ||
|
||||
(from == to && distance != LOCAL_DISTANCE)) {
|
||||
pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
|
||||
from, to, distance);
|
||||
return;
|
||||
}
|
||||
|
||||
numa_distance[from * numa_distance_cnt + to] = distance;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return NUMA distance @from to @to
|
||||
*/
|
||||
int __node_distance(int from, int to)
|
||||
{
|
||||
if (from >= numa_distance_cnt || to >= numa_distance_cnt)
|
||||
return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
|
||||
return numa_distance[from * numa_distance_cnt + to];
|
||||
}
|
||||
EXPORT_SYMBOL(__node_distance);
|
||||
|
||||
static int __init numa_register_nodes(void)
|
||||
{
|
||||
int nid;
|
||||
struct memblock_region *mblk;
|
||||
|
||||
/* Check that valid nid is set to memblks */
|
||||
for_each_mem_region(mblk) {
|
||||
int mblk_nid = memblock_get_region_node(mblk);
|
||||
phys_addr_t start = mblk->base;
|
||||
phys_addr_t end = mblk->base + mblk->size - 1;
|
||||
|
||||
if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) {
|
||||
pr_warn("Warning: invalid memblk node %d [mem %pap-%pap]\n",
|
||||
mblk_nid, &start, &end);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Finally register nodes. */
|
||||
for_each_node_mask(nid, numa_nodes_parsed) {
|
||||
@ -360,11 +231,7 @@ static int __init numa_init(int (*init_func)(void))
|
||||
nodes_clear(node_possible_map);
|
||||
nodes_clear(node_online_map);
|
||||
|
||||
ret = numa_alloc_distance();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = init_func();
|
||||
ret = numa_memblks_init(init_func, /* memblock_force_top_down */ false);
|
||||
if (ret < 0)
|
||||
goto out_free_distance;
|
||||
|
||||
@ -382,7 +249,7 @@ static int __init numa_init(int (*init_func)(void))
|
||||
|
||||
return 0;
|
||||
out_free_distance:
|
||||
numa_free_distance();
|
||||
numa_reset_distance();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -412,6 +279,7 @@ static int __init dummy_numa_init(void)
|
||||
pr_err("NUMA init failed\n");
|
||||
return ret;
|
||||
}
|
||||
node_set(0, numa_nodes_parsed);
|
||||
|
||||
numa_off = true;
|
||||
return 0;
|
||||
@ -454,3 +322,54 @@ void __init arch_numa_init(void)
|
||||
|
||||
numa_init(dummy_numa_init);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys,
|
||||
unsigned int nr_emu_nids)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* Transform cpu_to_node_map table to use emulated nids by
|
||||
* reverse-mapping phys_nid. The maps should always exist but fall
|
||||
* back to zero just in case.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) {
|
||||
if (cpu_to_node_map[i] == NUMA_NO_NODE)
|
||||
continue;
|
||||
for (j = 0; j < nr_emu_nids; j++)
|
||||
if (cpu_to_node_map[i] == emu_nid_to_phys[j])
|
||||
break;
|
||||
cpu_to_node_map[i] = j < nr_emu_nids ? j : 0;
|
||||
}
|
||||
}
|
||||
|
||||
u64 __init numa_emu_dma_end(void)
|
||||
{
|
||||
return memblock_start_of_DRAM() + SZ_4G;
|
||||
}
|
||||
|
||||
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable)
|
||||
{
|
||||
struct cpumask *mask;
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
return;
|
||||
|
||||
mask = node_to_cpumask_map[node];
|
||||
if (!cpumask_available(mask)) {
|
||||
pr_err("node_to_cpumask_map[%i] NULL\n", node);
|
||||
dump_stack();
|
||||
return;
|
||||
}
|
||||
|
||||
if (enable)
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
else
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
|
||||
pr_debug("%s cpu %d node %d: mask now %*pbl\n",
|
||||
enable ? "numa_add_cpu" : "numa_remove_cpu",
|
||||
cpu, node, cpumask_pr_args(mask));
|
||||
}
|
||||
#endif /* CONFIG_NUMA_EMU */
|
||||
|
@ -32,10 +32,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
|
||||
|
||||
void __init arch_numa_init(void);
|
||||
int __init numa_add_memblk(int nodeid, u64 start, u64 end);
|
||||
void __init numa_set_distance(int from, int to, int distance);
|
||||
void __init numa_free_distance(void);
|
||||
void __init early_map_cpu_to_node(unsigned int cpu, int nid);
|
||||
int __init early_cpu_to_node(int cpu);
|
||||
int early_cpu_to_node(int cpu);
|
||||
void numa_store_cpu_info(unsigned int cpu);
|
||||
void numa_add_cpu(unsigned int cpu);
|
||||
void numa_remove_cpu(unsigned int cpu);
|
||||
@ -51,4 +49,8 @@ static inline int early_cpu_to_node(int cpu) { return 0; }
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#ifdef CONFIG_NUMA_EMU
|
||||
void debug_cpumask_set_cpu(unsigned int cpu, int node, bool enable);
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_GENERIC_NUMA_H */
|
||||
|
@ -405,9 +405,12 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi)
|
||||
unsigned long pfn_align = node_map_pfn_alignment();
|
||||
|
||||
if (pfn_align && pfn_align < PAGES_PER_SECTION) {
|
||||
pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
|
||||
PFN_PHYS(pfn_align) >> 20,
|
||||
PFN_PHYS(PAGES_PER_SECTION) >> 20);
|
||||
unsigned long node_align_mb = PFN_PHYS(pfn_align) >> 20;
|
||||
|
||||
unsigned long sect_align_mb = PFN_PHYS(PAGES_PER_SECTION) >> 20;
|
||||
|
||||
pr_warn("Node alignment %luMB < min %luMB, rejecting NUMA config\n",
|
||||
node_align_mb, sect_align_mb);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
@ -418,18 +421,18 @@ static int __init numa_register_meminfo(struct numa_meminfo *mi)
|
||||
int __init numa_memblks_init(int (*init_func)(void),
|
||||
bool memblock_force_top_down)
|
||||
{
|
||||
phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
|
||||
int ret;
|
||||
|
||||
nodes_clear(numa_nodes_parsed);
|
||||
nodes_clear(node_possible_map);
|
||||
nodes_clear(node_online_map);
|
||||
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
|
||||
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
|
||||
NUMA_NO_NODE));
|
||||
WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
|
||||
WARN_ON(memblock_set_node(0, max_addr, &memblock.memory, NUMA_NO_NODE));
|
||||
WARN_ON(memblock_set_node(0, max_addr, &memblock.reserved,
|
||||
NUMA_NO_NODE));
|
||||
/* In case that parsing SRAT failed. */
|
||||
WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
|
||||
WARN_ON(memblock_clear_hotplug(0, max_addr));
|
||||
numa_reset_distance();
|
||||
|
||||
ret = init_func();
|
||||
|
Loading…
Reference in New Issue
Block a user