2022-05-31 18:04:11 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* acpi.c - Architecture-Specific Low-Level ACPI Boot Support
|
|
|
|
*
|
|
|
|
* Author: Jianmin Lv <lvjianmin@loongson.cn>
|
|
|
|
* Huacai Chen <chenhuacai@loongson.cn>
|
|
|
|
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/acpi.h>
|
2024-09-24 15:32:06 +08:00
|
|
|
#include <linux/efi-bgrt.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/memblock.h>
|
2022-12-10 22:40:05 +08:00
|
|
|
#include <linux/of_fdt.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
#include <linux/serial_core.h>
|
|
|
|
#include <asm/io.h>
|
2022-05-31 18:04:12 +08:00
|
|
|
#include <asm/numa.h>
|
2022-05-31 18:04:11 +08:00
|
|
|
#include <asm/loongson.h>
|
|
|
|
|
|
|
|
int acpi_disabled;
|
|
|
|
EXPORT_SYMBOL(acpi_disabled);
|
|
|
|
int acpi_noirq;
|
|
|
|
int acpi_pci_disabled;
|
|
|
|
EXPORT_SYMBOL(acpi_pci_disabled);
|
|
|
|
int acpi_strict = 1; /* We have no workarounds on LoongArch */
|
|
|
|
int num_processors;
|
|
|
|
int disabled_cpus;
|
|
|
|
|
|
|
|
u64 acpi_saved_sp;
|
|
|
|
|
|
|
|
#define PREFIX "ACPI: "
|
|
|
|
|
LoongArch: Change acpi_core_pic[NR_CPUS] to acpi_core_pic[MAX_CORE_PIC]
With default config, the value of NR_CPUS is 64. When HW platform has
more then 64 cpus, system will crash on these platforms. MAX_CORE_PIC
is the maximum cpu number in MADT table (max physical number) which can
exceed the supported maximum cpu number (NR_CPUS, max logical number),
but kernel should not crash. Kernel should boot cpus with NR_CPUS, let
the remainder cpus stay in BIOS.
The potential crash reason is that the array acpi_core_pic[NR_CPUS] can
be overflowed when parsing MADT table, and it is obvious that CORE_PIC
should be corresponding to physical core rather than logical core, so it
is better to define the array as acpi_core_pic[MAX_CORE_PIC].
With the patch, system can boot up 64 vcpus with qemu parameter -smp 128,
otherwise system will crash with the following message.
[ 0.000000] CPU 0 Unable to handle kernel paging request at virtual address 0000420000004259, era == 90000000037a5f0c, ra == 90000000037a46ec
[ 0.000000] Oops[#1]:
[ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 6.8.0-rc2+ #192
[ 0.000000] Hardware name: QEMU QEMU Virtual Machine, BIOS unknown 2/2/2022
[ 0.000000] pc 90000000037a5f0c ra 90000000037a46ec tp 9000000003c90000 sp 9000000003c93d60
[ 0.000000] a0 0000000000000019 a1 9000000003d93bc0 a2 0000000000000000 a3 9000000003c93bd8
[ 0.000000] a4 9000000003c93a74 a5 9000000083c93a67 a6 9000000003c938f0 a7 0000000000000005
[ 0.000000] t0 0000420000004201 t1 0000000000000000 t2 0000000000000001 t3 0000000000000001
[ 0.000000] t4 0000000000000003 t5 0000000000000000 t6 0000000000000030 t7 0000000000000063
[ 0.000000] t8 0000000000000014 u0 ffffffffffffffff s9 0000000000000000 s0 9000000003caee98
[ 0.000000] s1 90000000041b0480 s2 9000000003c93da0 s3 9000000003c93d98 s4 9000000003c93d90
[ 0.000000] s5 9000000003caa000 s6 000000000a7fd000 s7 000000000f556b60 s8 000000000e0a4330
[ 0.000000] ra: 90000000037a46ec platform_init+0x214/0x250
[ 0.000000] ERA: 90000000037a5f0c efi_runtime_init+0x30/0x94
[ 0.000000] CRMD: 000000b0 (PLV0 -IE -DA +PG DACF=CC DACM=CC -WE)
[ 0.000000] PRMD: 00000000 (PPLV0 -PIE -PWE)
[ 0.000000] EUEN: 00000000 (-FPE -SXE -ASXE -BTE)
[ 0.000000] ECFG: 00070800 (LIE=11 VS=7)
[ 0.000000] ESTAT: 00010000 [PIL] (IS= ECode=1 EsubCode=0)
[ 0.000000] BADV: 0000420000004259
[ 0.000000] PRID: 0014c010 (Loongson-64bit, Loongson-3A5000)
[ 0.000000] Modules linked in:
[ 0.000000] Process swapper (pid: 0, threadinfo=(____ptrval____), task=(____ptrval____))
[ 0.000000] Stack : 9000000003c93a14 9000000003800898 90000000041844f8 90000000037a46ec
[ 0.000000] 000000000a7fd000 0000000008290000 0000000000000000 0000000000000000
[ 0.000000] 0000000000000000 0000000000000000 00000000019d8000 000000000f556b60
[ 0.000000] 000000000a7fd000 000000000f556b08 9000000003ca7700 9000000003800000
[ 0.000000] 9000000003c93e50 9000000003800898 9000000003800108 90000000037a484c
[ 0.000000] 000000000e0a4330 000000000f556b60 000000000a7fd000 000000000f556b08
[ 0.000000] 9000000003ca7700 9000000004184000 0000000000200000 000000000e02b018
[ 0.000000] 000000000a7fd000 90000000037a0790 9000000003800108 0000000000000000
[ 0.000000] 0000000000000000 000000000e0a4330 000000000f556b60 000000000a7fd000
[ 0.000000] 000000000f556b08 000000000eaae298 000000000eaa5040 0000000000200000
[ 0.000000] ...
[ 0.000000] Call Trace:
[ 0.000000] [<90000000037a5f0c>] efi_runtime_init+0x30/0x94
[ 0.000000] [<90000000037a46ec>] platform_init+0x214/0x250
[ 0.000000] [<90000000037a484c>] setup_arch+0x124/0x45c
[ 0.000000] [<90000000037a0790>] start_kernel+0x90/0x670
[ 0.000000] [<900000000378b0d8>] kernel_entry+0xd8/0xdc
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-02-06 12:32:05 +08:00
|
|
|
struct acpi_madt_core_pic acpi_core_pic[MAX_CORE_PIC];
|
2023-06-29 20:58:43 +08:00
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (!phys || !size)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return early_memremap(phys, size);
|
|
|
|
}
|
|
|
|
void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
|
|
|
|
{
|
|
|
|
if (!map || !size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
early_memunmap(map, size);
|
|
|
|
}
|
|
|
|
|
2022-09-02 22:33:42 +08:00
|
|
|
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
|
|
|
if (!memblock_is_memory(phys))
|
|
|
|
return ioremap(phys, size);
|
|
|
|
else
|
|
|
|
return ioremap_cache(phys, size);
|
|
|
|
}
|
|
|
|
|
LoongArch: Fix the !CONFIG_SMP build
1, We assume arch/loongarch/include/asm/smp.h be included in include/
linux/smp.h is valid and the reverse inclusion isn't. So remove the
<linux/smp.h> in arch/loongarch/include/asm/smp.h.
2, arch/loongarch/include/asm/smp.h is only needed when CONFIG_SMP,
and setup.c include it only because it need plat_smp_setup(). So,
reorganize setup.c & smp.h, and then remove <asm/smp.h> in setup.c.
3, Fix cacheinfo.c and percpu.h build error by adding the missing header
files when !CONFIG_SMP.
4, Fix acpi.c build error by adding CONFIG_SMP guards.
5, Move irq_stat definition from smp.c to irq.c and fix its declaration.
6, Select CONFIG_SMP for CONFIG_NUMA, similar as other architectures do.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-06-05 16:19:53 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2024-11-12 16:35:36 +08:00
|
|
|
static int set_processor_mask(u32 id, u32 pass)
|
2022-05-31 18:04:12 +08:00
|
|
|
{
|
2024-11-12 16:35:36 +08:00
|
|
|
int cpu = -1, cpuid = id;
|
LoongArch: Always enumerate MADT and setup logical-physical CPU mapping
Some drivers want to use cpu_logical_map(), early_cpu_to_node() and some
other CPU mapping APIs, even if we use "nr_cpus=1" to hard limit the CPU
number. This is strongly required for the multi-bridges machines.
Currently, we stop parsing the MADT if the nr_cpus limit is reached, but
to achieve the above goal we should always enumerate the MADT table and
setup logical-physical CPU mapping whether there is a nr_cpus limit.
Rework the MADT enumeration:
1. Define a flag "cpu_enumerated" to distinguish the first enumeration
(cpu_enumerated=0) and the physical hotplug case (cpu_enumerated=1)
for set_processor_mask().
2. If cpu_enumerated=0, stop parsing only when NR_CPUS limit is reached,
so we can setup logical-physical CPU mapping; if cpu_enumerated=1,
stop parsing when nr_cpu_ids limit is reached, so we can avoid some
runtime bugs. Once logical-physical CPU mapping is setup, we will let
cpu_enumerated=1.
3. Use find_first_zero_bit() instead of cpumask_next_zero() to find the
next zero bit (free logical CPU id) in the cpu_present_mask, because
cpumask_next_zero() will stop at nr_cpu_ids.
4. Only touch cpu_possible_mask if cpu_enumerated=0, this is in order to
avoid some potential crashes, because cpu_possible_mask is marked as
__ro_after_init.
5. In prefill_possible_map(), clear cpu_present_mask bits greater than
nr_cpu_ids, in order to avoid a CPU be "present" but not "possible".
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-07-20 22:40:58 +08:00
|
|
|
|
2024-11-12 16:35:36 +08:00
|
|
|
if (num_processors >= NR_CPUS) {
|
LoongArch: Always enumerate MADT and setup logical-physical CPU mapping
Some drivers want to use cpu_logical_map(), early_cpu_to_node() and some
other CPU mapping APIs, even if we use "nr_cpus=1" to hard limit the CPU
number. This is strongly required for the multi-bridges machines.
Currently, we stop parsing the MADT if the nr_cpus limit is reached, but
to achieve the above goal we should always enumerate the MADT table and
setup logical-physical CPU mapping whether there is a nr_cpus limit.
Rework the MADT enumeration:
1. Define a flag "cpu_enumerated" to distinguish the first enumeration
(cpu_enumerated=0) and the physical hotplug case (cpu_enumerated=1)
for set_processor_mask().
2. If cpu_enumerated=0, stop parsing only when NR_CPUS limit is reached,
so we can setup logical-physical CPU mapping; if cpu_enumerated=1,
stop parsing when nr_cpu_ids limit is reached, so we can avoid some
runtime bugs. Once logical-physical CPU mapping is setup, we will let
cpu_enumerated=1.
3. Use find_first_zero_bit() instead of cpumask_next_zero() to find the
next zero bit (free logical CPU id) in the cpu_present_mask, because
cpumask_next_zero() will stop at nr_cpu_ids.
4. Only touch cpu_possible_mask if cpu_enumerated=0, this is in order to
avoid some potential crashes, because cpu_possible_mask is marked as
__ro_after_init.
5. In prefill_possible_map(), clear cpu_present_mask bits greater than
nr_cpu_ids, in order to avoid a CPU be "present" but not "possible".
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-07-20 22:40:58 +08:00
|
|
|
pr_warn(PREFIX "nr_cpus limit of %i reached."
|
2024-11-12 16:35:36 +08:00
|
|
|
" processor 0x%x ignored.\n", NR_CPUS, cpuid);
|
2022-05-31 18:04:12 +08:00
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
}
|
2024-11-12 16:35:36 +08:00
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
if (cpuid == loongson_sysconf.boot_cpu_id)
|
|
|
|
cpu = 0;
|
|
|
|
|
2024-11-12 16:35:36 +08:00
|
|
|
switch (pass) {
|
|
|
|
case 1: /* Pass 1 handle enabled processors */
|
|
|
|
if (cpu < 0)
|
|
|
|
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
|
2022-05-31 18:04:12 +08:00
|
|
|
num_processors++;
|
|
|
|
set_cpu_present(cpu, true);
|
2024-11-12 16:35:36 +08:00
|
|
|
break;
|
|
|
|
case 2: /* Pass 2 handle disabled processors */
|
|
|
|
if (cpu < 0)
|
|
|
|
cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
|
2022-05-31 18:04:12 +08:00
|
|
|
disabled_cpus++;
|
2024-11-12 16:35:36 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
|
|
|
|
set_cpu_possible(cpu, true);
|
|
|
|
__cpu_number_map[cpuid] = cpu;
|
|
|
|
__cpu_logical_map[cpu] = cpuid;
|
2022-05-31 18:04:12 +08:00
|
|
|
|
|
|
|
return cpu;
|
|
|
|
}
|
LoongArch: Fix the !CONFIG_SMP build
1, We assume arch/loongarch/include/asm/smp.h be included in include/
linux/smp.h is valid and the reverse inclusion isn't. So remove the
<linux/smp.h> in arch/loongarch/include/asm/smp.h.
2, arch/loongarch/include/asm/smp.h is only needed when CONFIG_SMP,
and setup.c include it only because it need plat_smp_setup(). So,
reorganize setup.c & smp.h, and then remove <asm/smp.h> in setup.c.
3, Fix cacheinfo.c and percpu.h build error by adding the missing header
files when !CONFIG_SMP.
4, Fix acpi.c build error by adding CONFIG_SMP guards.
5, Move irq_stat definition from smp.c to irq.c and fix its declaration.
6, Select CONFIG_SMP for CONFIG_NUMA, similar as other architectures do.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-06-05 16:19:53 +08:00
|
|
|
#endif
|
2022-05-31 18:04:12 +08:00
|
|
|
|
2022-07-19 10:53:13 +08:00
|
|
|
static int __init
|
2024-11-12 16:35:36 +08:00
|
|
|
acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
|
2022-07-19 10:53:13 +08:00
|
|
|
{
|
|
|
|
struct acpi_madt_core_pic *processor = NULL;
|
|
|
|
|
|
|
|
processor = (struct acpi_madt_core_pic *)header;
|
|
|
|
if (BAD_MADT_ENTRY(processor, end))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
acpi_table_print_madt_entry(&header->common);
|
|
|
|
#ifdef CONFIG_SMP
|
2023-06-29 20:58:43 +08:00
|
|
|
acpi_core_pic[processor->core_id] = *processor;
|
2024-11-12 16:35:36 +08:00
|
|
|
if (processor->flags & ACPI_MADT_ENABLED)
|
|
|
|
set_processor_mask(processor->core_id, 1);
|
2022-07-19 10:53:13 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-11-12 16:35:36 +08:00
|
|
|
static int __init
|
|
|
|
acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
|
|
|
|
{
|
|
|
|
struct acpi_madt_core_pic *processor = NULL;
|
|
|
|
|
|
|
|
processor = (struct acpi_madt_core_pic *)header;
|
|
|
|
if (BAD_MADT_ENTRY(processor, end))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
if (!(processor->flags & ACPI_MADT_ENABLED))
|
|
|
|
set_processor_mask(processor->core_id, 2);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2022-07-19 10:53:13 +08:00
|
|
|
static int __init
|
|
|
|
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
|
|
|
|
{
|
|
|
|
static int core = 0;
|
|
|
|
struct acpi_madt_eio_pic *eiointc = NULL;
|
|
|
|
|
|
|
|
eiointc = (struct acpi_madt_eio_pic *)header;
|
|
|
|
if (BAD_MADT_ENTRY(eiointc, end))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
core = eiointc->node * CORES_PER_EIO_NODE;
|
2024-01-17 12:43:08 +08:00
|
|
|
set_bit(core, loongson_sysconf.cores_io_master);
|
2022-07-19 10:53:13 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
static void __init acpi_process_madt(void)
|
|
|
|
{
|
LoongArch: Fix the !CONFIG_SMP build
1, We assume arch/loongarch/include/asm/smp.h be included in include/
linux/smp.h is valid and the reverse inclusion isn't. So remove the
<linux/smp.h> in arch/loongarch/include/asm/smp.h.
2, arch/loongarch/include/asm/smp.h is only needed when CONFIG_SMP,
and setup.c include it only because it need plat_smp_setup(). So,
reorganize setup.c & smp.h, and then remove <asm/smp.h> in setup.c.
3, Fix cacheinfo.c and percpu.h build error by adding the missing header
files when !CONFIG_SMP.
4, Fix acpi.c build error by adding CONFIG_SMP guards.
5, Move irq_stat definition from smp.c to irq.c and fix its declaration.
6, Select CONFIG_SMP for CONFIG_NUMA, similar as other architectures do.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-06-05 16:19:53 +08:00
|
|
|
#ifdef CONFIG_SMP
|
2022-05-31 18:04:12 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NR_CPUS; i++) {
|
|
|
|
__cpu_number_map[i] = -1;
|
|
|
|
__cpu_logical_map[i] = -1;
|
|
|
|
}
|
LoongArch: Fix the !CONFIG_SMP build
1, We assume arch/loongarch/include/asm/smp.h be included in include/
linux/smp.h is valid and the reverse inclusion isn't. So remove the
<linux/smp.h> in arch/loongarch/include/asm/smp.h.
2, arch/loongarch/include/asm/smp.h is only needed when CONFIG_SMP,
and setup.c include it only because it need plat_smp_setup(). So,
reorganize setup.c & smp.h, and then remove <asm/smp.h> in setup.c.
3, Fix cacheinfo.c and percpu.h build error by adding the missing header
files when !CONFIG_SMP.
4, Fix acpi.c build error by adding CONFIG_SMP guards.
5, Move irq_stat definition from smp.c to irq.c and fix its declaration.
6, Select CONFIG_SMP for CONFIG_NUMA, similar as other architectures do.
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2022-06-05 16:19:53 +08:00
|
|
|
#endif
|
2022-07-19 10:53:13 +08:00
|
|
|
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
|
2024-11-12 16:35:36 +08:00
|
|
|
acpi_parse_p1_processor, MAX_CORE_PIC);
|
|
|
|
|
|
|
|
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
|
|
|
|
acpi_parse_p2_processor, MAX_CORE_PIC);
|
2022-07-19 10:53:13 +08:00
|
|
|
|
|
|
|
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
|
|
|
|
acpi_parse_eio_master, MAX_IO_PICS);
|
2022-05-31 18:04:12 +08:00
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
loongson_sysconf.nr_cpus = num_processors;
|
|
|
|
}
|
|
|
|
|
2023-06-29 20:58:43 +08:00
|
|
|
int pptt_enabled;
|
|
|
|
|
|
|
|
int __init parse_acpi_topology(void)
|
|
|
|
{
|
|
|
|
int cpu, topology_id;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
topology_id = find_acpi_cpu_topology(cpu, 0);
|
|
|
|
if (topology_id < 0) {
|
|
|
|
pr_warn("Invalid BIOS PPTT\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (acpi_pptt_cpu_is_thread(cpu) <= 0)
|
|
|
|
cpu_data[cpu].core = topology_id;
|
|
|
|
else {
|
|
|
|
topology_id = find_acpi_cpu_topology(cpu, 1);
|
|
|
|
if (topology_id < 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
cpu_data[cpu].core = topology_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pptt_enabled = 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-12-10 22:40:15 +08:00
|
|
|
#ifndef CONFIG_SUSPEND
|
|
|
|
int (*acpi_suspend_lowlevel)(void);
|
|
|
|
#else
|
|
|
|
int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
|
|
|
|
#endif
|
|
|
|
|
2022-11-21 19:02:57 +08:00
|
|
|
void __init acpi_boot_table_init(void)
|
2022-05-31 18:04:11 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If acpi_disabled, bail out
|
|
|
|
*/
|
|
|
|
if (acpi_disabled)
|
2022-12-10 22:40:05 +08:00
|
|
|
goto fdt_earlycon;
|
2022-11-21 19:02:57 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the ACPI boot-time table parser.
|
|
|
|
*/
|
|
|
|
if (acpi_table_init()) {
|
|
|
|
disable_acpi();
|
2022-12-10 22:40:05 +08:00
|
|
|
goto fdt_earlycon;
|
2022-11-21 19:02:57 +08:00
|
|
|
}
|
2022-05-31 18:04:11 +08:00
|
|
|
|
|
|
|
loongson_sysconf.boot_cpu_id = read_csr_cpuid();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process the Multiple APIC Description Table (MADT), if present
|
|
|
|
*/
|
|
|
|
acpi_process_madt();
|
|
|
|
|
|
|
|
/* Do not enable ACPI SPCR console by default */
|
|
|
|
acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
|
2022-12-10 22:40:05 +08:00
|
|
|
|
2024-09-24 15:32:06 +08:00
|
|
|
if (IS_ENABLED(CONFIG_ACPI_BGRT))
|
|
|
|
acpi_table_parse(ACPI_SIG_BGRT, acpi_parse_bgrt);
|
|
|
|
|
2022-12-10 22:40:05 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
fdt_earlycon:
|
|
|
|
if (earlycon_acpi_spcr_enable)
|
|
|
|
early_init_dt_scan_chosen_stdout();
|
2022-05-31 18:04:11 +08:00
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
|
|
|
|
static __init int setup_node(int pxm)
|
|
|
|
{
|
|
|
|
return acpi_map_pxm_to_node(pxm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
|
|
|
|
* I/O localities since SRAT does not list them. I/O localities are
|
|
|
|
* not supported at this point.
|
|
|
|
*/
|
|
|
|
unsigned int numa_distance_cnt;
|
|
|
|
|
|
|
|
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
|
|
|
|
{
|
|
|
|
return slit->locality_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
void __init numa_set_distance(int from, int to, int distance)
|
|
|
|
{
|
|
|
|
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
|
|
|
|
pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
|
|
|
|
from, to, distance);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
node_distances[from][to] = distance;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Callback for Proximity Domain -> CPUID mapping */
|
|
|
|
void __init
|
|
|
|
acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
|
|
|
|
{
|
|
|
|
int pxm, node;
|
|
|
|
|
|
|
|
if (srat_disabled())
|
|
|
|
return;
|
|
|
|
if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
|
|
|
|
bad_srat();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
|
|
|
|
return;
|
|
|
|
pxm = pa->proximity_domain_lo;
|
|
|
|
if (acpi_srat_revision >= 2) {
|
|
|
|
pxm |= (pa->proximity_domain_hi[0] << 8);
|
|
|
|
pxm |= (pa->proximity_domain_hi[1] << 16);
|
|
|
|
pxm |= (pa->proximity_domain_hi[2] << 24);
|
|
|
|
}
|
|
|
|
node = setup_node(pxm);
|
|
|
|
if (node < 0) {
|
|
|
|
pr_err("SRAT: Too many proximity domains %x\n", pxm);
|
|
|
|
bad_srat();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pa->apic_id >= CONFIG_NR_CPUS) {
|
|
|
|
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
|
|
|
|
pxm, pa->apic_id, node);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
early_numa_add_cpu(pa->apic_id, node);
|
|
|
|
|
|
|
|
set_cpuid_to_node(pa->apic_id, node);
|
|
|
|
node_set(node, numa_nodes_parsed);
|
|
|
|
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2022-05-31 18:04:11 +08:00
|
|
|
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
|
|
|
|
{
|
|
|
|
memblock_reserve(addr, size);
|
|
|
|
}
|
2022-05-31 18:04:12 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
|
|
|
|
|
|
|
#include <acpi/processor.h>
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
int nid;
|
|
|
|
|
|
|
|
nid = acpi_get_node(handle);
|
2024-11-12 16:35:36 +08:00
|
|
|
|
|
|
|
if (nid != NUMA_NO_NODE)
|
|
|
|
nid = early_cpu_to_node(cpu);
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
if (nid != NUMA_NO_NODE) {
|
|
|
|
set_cpuid_to_node(physid, nid);
|
|
|
|
node_set(nid, numa_nodes_parsed);
|
|
|
|
set_cpu_numa_node(cpu, nid);
|
|
|
|
cpumask_set_cpu(cpu, cpumask_of_node(nid));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
2024-11-12 16:35:36 +08:00
|
|
|
cpu = cpu_number_map(physid);
|
|
|
|
if (cpu < 0 || cpu >= nr_cpu_ids) {
|
2022-05-31 18:04:12 +08:00
|
|
|
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
|
2024-11-12 16:35:36 +08:00
|
|
|
return -ERANGE;
|
2022-05-31 18:04:12 +08:00
|
|
|
}
|
|
|
|
|
2024-11-12 16:35:36 +08:00
|
|
|
num_processors++;
|
|
|
|
set_cpu_present(cpu, true);
|
2022-05-31 18:04:12 +08:00
|
|
|
acpi_map_cpu2node(handle, cpu, physid);
|
|
|
|
|
2022-05-31 18:04:12 +08:00
|
|
|
*pcpu = cpu;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(acpi_map_cpu);
|
|
|
|
|
|
|
|
int acpi_unmap_cpu(int cpu)
|
|
|
|
{
|
2022-05-31 18:04:12 +08:00
|
|
|
#ifdef CONFIG_ACPI_NUMA
|
|
|
|
set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
|
|
|
|
#endif
|
2022-05-31 18:04:12 +08:00
|
|
|
set_cpu_present(cpu, false);
|
|
|
|
num_processors--;
|
|
|
|
|
|
|
|
pr_info("cpu%d hot remove!\n", cpu);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(acpi_unmap_cpu);
|
|
|
|
|
|
|
|
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|