2019-05-29 14:17:54 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-01-18 23:45:02 +00:00
|
|
|
/*
|
|
|
|
* X86 specific Hyper-V initialization code.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016, Microsoft, Inc.
|
|
|
|
*
|
|
|
|
* Author : K. Y. Srinivasan <kys@microsoft.com>
|
|
|
|
*/
|
|
|
|
|
2023-09-19 11:36:01 +00:00
|
|
|
#define pr_fmt(fmt) "Hyper-V: " fmt
|
|
|
|
|
2018-09-18 22:29:50 +00:00
|
|
|
#include <linux/efi.h>
|
2017-01-18 23:45:02 +00:00
|
|
|
#include <linux/types.h>
|
2021-02-01 14:48:11 +00:00
|
|
|
#include <linux/bitfield.h>
|
2021-07-14 18:34:47 +00:00
|
|
|
#include <linux/io.h>
|
2018-01-24 13:23:33 +00:00
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/desc.h>
|
2023-11-11 08:37:47 +00:00
|
|
|
#include <asm/e820/api.h>
|
2022-06-14 01:45:53 +00:00
|
|
|
#include <asm/sev.h>
|
2023-07-22 04:51:16 +00:00
|
|
|
#include <asm/ibt.h>
|
2017-01-18 23:45:02 +00:00
|
|
|
#include <asm/hypervisor.h>
|
2018-03-20 14:02:05 +00:00
|
|
|
#include <asm/hyperv-tlfs.h>
|
2017-01-18 23:45:02 +00:00
|
|
|
#include <asm/mshyperv.h>
|
2020-05-21 20:05:43 +00:00
|
|
|
#include <asm/idtentry.h>
|
2023-08-18 10:29:13 +00:00
|
|
|
#include <asm/set_memory.h>
|
2020-12-22 06:55:41 +00:00
|
|
|
#include <linux/kexec.h>
|
2017-01-18 23:45:02 +00:00
|
|
|
#include <linux/version.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/mm.h>
|
2017-03-05 01:27:11 +00:00
|
|
|
#include <linux/hyperv.h>
|
2017-08-02 16:09:18 +00:00
|
|
|
#include <linux/slab.h>
|
2020-04-06 15:53:31 +00:00
|
|
|
#include <linux/kernel.h>
|
2017-08-02 16:09:18 +00:00
|
|
|
#include <linux/cpuhotplug.h>
|
2020-01-06 22:42:39 +00:00
|
|
|
#include <linux/syscore_ops.h>
|
2019-07-01 04:26:06 +00:00
|
|
|
#include <clocksource/hyperv_timer.h>
|
2021-02-03 15:04:26 +00:00
|
|
|
#include <linux/highmem.h>
|
2017-01-18 23:45:02 +00:00
|
|
|
|
2021-02-03 15:04:25 +00:00
|
|
|
u64 hv_current_partition_id = ~0ull;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_current_partition_id);
|
2020-12-22 06:55:41 +00:00
|
|
|
|
2017-08-02 16:09:14 +00:00
|
|
|
void *hv_hypercall_pg;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
|
2017-02-04 16:57:13 +00:00
|
|
|
|
2021-12-28 03:31:55 +00:00
|
|
|
union hv_ghcb * __percpu *hv_ghcb_pg;
|
2021-10-25 12:21:06 +00:00
|
|
|
|
2020-01-06 22:42:39 +00:00
|
|
|
/* Storage to save the hypercall page temporarily for hibernation */
|
|
|
|
static void *hv_hypercall_pg_saved;
|
|
|
|
|
2018-03-20 14:02:08 +00:00
|
|
|
struct hv_vp_assist_page **hv_vp_assist_page;
|
|
|
|
EXPORT_SYMBOL_GPL(hv_vp_assist_page);
|
|
|
|
|
2021-10-25 12:21:06 +00:00
|
|
|
static int hyperv_init_ghcb(void)
|
|
|
|
{
|
|
|
|
u64 ghcb_gpa;
|
|
|
|
void *ghcb_va;
|
|
|
|
void **ghcb_base;
|
|
|
|
|
2023-08-24 08:07:11 +00:00
|
|
|
if (!ms_hyperv.paravisor_present || !hv_isolation_type_snp())
|
2021-10-25 12:21:06 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!hv_ghcb_pg)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* GHCB page is allocated by paravisor. The address
|
|
|
|
* returned by MSR_AMD64_SEV_ES_GHCB is above shared
|
|
|
|
* memory boundary and map it here.
|
|
|
|
*/
|
|
|
|
rdmsrl(MSR_AMD64_SEV_ES_GHCB, ghcb_gpa);
|
2023-03-26 13:52:06 +00:00
|
|
|
|
|
|
|
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
|
|
|
|
ghcb_gpa &= ~ms_hyperv.shared_gpa_boundary;
|
|
|
|
ghcb_va = (void *)ioremap_cache(ghcb_gpa, HV_HYP_PAGE_SIZE);
|
2021-10-25 12:21:06 +00:00
|
|
|
if (!ghcb_va)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ghcb_base = (void **)this_cpu_ptr(hv_ghcb_pg);
|
|
|
|
*ghcb_base = ghcb_va;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-02 16:09:18 +00:00
|
|
|
static int hv_cpu_init(unsigned int cpu)
|
|
|
|
{
|
2021-07-31 12:05:19 +00:00
|
|
|
union hv_vp_assist_msr_contents msr = { 0 };
|
2023-08-24 08:07:05 +00:00
|
|
|
struct hv_vp_assist_page **hvp;
|
2021-07-14 18:34:45 +00:00
|
|
|
int ret;
|
2017-08-02 16:09:18 +00:00
|
|
|
|
2021-07-14 18:34:45 +00:00
|
|
|
ret = hv_common_cpu_init(cpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2017-10-06 15:48:54 +00:00
|
|
|
|
2018-03-20 14:02:08 +00:00
|
|
|
if (!hv_vp_assist_page)
|
|
|
|
return 0;
|
|
|
|
|
2023-08-24 08:07:05 +00:00
|
|
|
hvp = &hv_vp_assist_page[cpu];
|
2022-11-03 19:06:01 +00:00
|
|
|
if (hv_root_partition) {
|
|
|
|
/*
|
|
|
|
* For root partition we get the hypervisor provided VP assist
|
|
|
|
* page, instead of allocating a new page.
|
|
|
|
*/
|
|
|
|
rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
*hvp = memremap(msr.pfn << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT,
|
|
|
|
PAGE_SIZE, MEMREMAP_WB);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The VP assist page is an "overlay" page (see Hyper-V TLFS's
|
|
|
|
* Section 5.2.1 "GPA Overlay Pages"). Here it must be zeroed
|
|
|
|
* out to make sure we always write the EOI MSR in
|
|
|
|
* hv_apic_eoi_write() *after* the EOI optimization is disabled
|
|
|
|
* in hv_cpu_die(), otherwise a CPU may not be stopped in the
|
|
|
|
* case of CPU offlining and the VM will hang.
|
|
|
|
*/
|
2023-08-18 10:29:13 +00:00
|
|
|
if (!*hvp) {
|
2021-07-31 12:05:19 +00:00
|
|
|
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
|
2023-08-18 10:29:13 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Hyper-V should never specify a VM that is a Confidential
|
|
|
|
* VM and also running in the root partition. Root partition
|
|
|
|
* is blocked to run in Confidential VM. So only decrypt assist
|
|
|
|
* page in non-root partition here.
|
|
|
|
*/
|
2023-08-24 08:07:11 +00:00
|
|
|
if (*hvp && !ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
|
2023-08-18 10:29:13 +00:00
|
|
|
WARN_ON_ONCE(set_memory_decrypted((unsigned long)(*hvp), 1));
|
|
|
|
memset(*hvp, 0, PAGE_SIZE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-03 19:06:01 +00:00
|
|
|
if (*hvp)
|
|
|
|
msr.pfn = vmalloc_to_pfn(*hvp);
|
|
|
|
|
|
|
|
}
|
|
|
|
if (!WARN_ON(!(*hvp))) {
|
|
|
|
msr.enable = 1;
|
|
|
|
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
2018-03-20 14:02:08 +00:00
|
|
|
}
|
|
|
|
|
2021-10-25 12:21:06 +00:00
|
|
|
return hyperv_init_ghcb();
|
2017-08-02 16:09:18 +00:00
|
|
|
}
|
|
|
|
|
2018-01-24 13:23:33 +00:00
|
|
|
static void (*hv_reenlightenment_cb)(void);
|
|
|
|
|
|
|
|
static void hv_reenlightenment_notify(struct work_struct *dummy)
|
|
|
|
{
|
|
|
|
struct hv_tsc_emulation_status emu_status;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
|
|
|
|
/* Don't issue the callback if TSC accesses are not emulated */
|
|
|
|
if (hv_reenlightenment_cb && emu_status.inprogress)
|
|
|
|
hv_reenlightenment_cb();
|
|
|
|
}
|
|
|
|
static DECLARE_DELAYED_WORK(hv_reenlightenment_work, hv_reenlightenment_notify);
|
|
|
|
|
|
|
|
void hyperv_stop_tsc_emulation(void)
|
|
|
|
{
|
|
|
|
u64 freq;
|
|
|
|
struct hv_tsc_emulation_status emu_status;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
emu_status.inprogress = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_TSC_EMULATION_STATUS, *(u64 *)&emu_status);
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_TSC_FREQUENCY, freq);
|
|
|
|
tsc_khz = div64_u64(freq, 1000);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
|
|
|
|
|
|
|
|
static inline bool hv_reenlightenment_available(void)
|
|
|
|
{
|
|
|
|
/*
|
2021-03-18 14:28:01 +00:00
|
|
|
* Check for required features and privileges to make TSC frequency
|
2018-01-24 13:23:33 +00:00
|
|
|
* change notifications work.
|
|
|
|
*/
|
2020-09-26 14:26:26 +00:00
|
|
|
return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
|
2018-01-24 13:23:33 +00:00
|
|
|
ms_hyperv.misc_features & HV_FEATURE_FREQUENCY_MSRS_AVAILABLE &&
|
2020-09-26 14:26:26 +00:00
|
|
|
ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
|
2018-01-24 13:23:33 +00:00
|
|
|
}
|
|
|
|
|
2020-05-21 20:05:43 +00:00
|
|
|
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
|
2018-01-24 13:23:33 +00:00
|
|
|
{
|
2023-08-09 15:16:46 +00:00
|
|
|
apic_eoi();
|
2018-01-24 13:23:35 +00:00
|
|
|
inc_irq_stat(irq_hv_reenlightenment_count);
|
2018-01-24 13:23:33 +00:00
|
|
|
schedule_delayed_work(&hv_reenlightenment_work, HZ/10);
|
|
|
|
}
|
|
|
|
|
|
|
|
void set_hv_tscchange_cb(void (*cb)(void))
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl = {
|
|
|
|
.vector = HYPERV_REENLIGHTENMENT_VECTOR,
|
|
|
|
.enabled = 1,
|
|
|
|
};
|
|
|
|
struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
|
|
|
|
|
|
|
|
if (!hv_reenlightenment_available()) {
|
2023-09-19 11:36:01 +00:00
|
|
|
pr_warn("reenlightenment support is unavailable\n");
|
2018-01-24 13:23:33 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-11-04 18:22:38 +00:00
|
|
|
if (!hv_vp_index)
|
|
|
|
return;
|
|
|
|
|
2018-01-24 13:23:33 +00:00
|
|
|
hv_reenlightenment_cb = cb;
|
|
|
|
|
|
|
|
/* Make sure callback is registered before we write to MSRs */
|
|
|
|
wmb();
|
|
|
|
|
2021-10-12 15:50:05 +00:00
|
|
|
re_ctrl.target_vp = hv_vp_index[get_cpu()];
|
|
|
|
|
2018-01-24 13:23:33 +00:00
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
wrmsrl(HV_X64_MSR_TSC_EMULATION_CONTROL, *((u64 *)&emu_ctrl));
|
2021-10-12 15:50:05 +00:00
|
|
|
|
|
|
|
put_cpu();
|
2018-01-24 13:23:33 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(set_hv_tscchange_cb);
|
|
|
|
|
|
|
|
void clear_hv_tscchange_cb(void)
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl;
|
|
|
|
|
|
|
|
if (!hv_reenlightenment_available())
|
|
|
|
return;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
|
|
|
|
re_ctrl.enabled = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *(u64 *)&re_ctrl);
|
|
|
|
|
|
|
|
hv_reenlightenment_cb = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(clear_hv_tscchange_cb);
|
|
|
|
|
2018-01-24 13:23:34 +00:00
|
|
|
static int hv_cpu_die(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct hv_reenlightenment_control re_ctrl;
|
|
|
|
unsigned int new_cpu;
|
2021-10-25 12:21:06 +00:00
|
|
|
void **ghcb_va;
|
|
|
|
|
|
|
|
if (hv_ghcb_pg) {
|
|
|
|
ghcb_va = (void **)this_cpu_ptr(hv_ghcb_pg);
|
|
|
|
if (*ghcb_va)
|
2023-03-26 13:52:06 +00:00
|
|
|
iounmap(*ghcb_va);
|
2021-10-25 12:21:06 +00:00
|
|
|
*ghcb_va = NULL;
|
|
|
|
}
|
2018-05-16 21:53:31 +00:00
|
|
|
|
2021-07-14 18:34:45 +00:00
|
|
|
hv_common_cpu_die(cpu);
|
2018-01-24 13:23:34 +00:00
|
|
|
|
2021-07-31 12:05:19 +00:00
|
|
|
if (hv_vp_assist_page && hv_vp_assist_page[cpu]) {
|
|
|
|
union hv_vp_assist_msr_contents msr = { 0 };
|
|
|
|
if (hv_root_partition) {
|
|
|
|
/*
|
|
|
|
* For root partition the VP assist page is mapped to
|
|
|
|
* hypervisor provided page, and thus we unmap the
|
|
|
|
* page here and nullify it, so that in future we have
|
|
|
|
* correct page address mapped in hv_cpu_init.
|
|
|
|
*/
|
|
|
|
memunmap(hv_vp_assist_page[cpu]);
|
|
|
|
hv_vp_assist_page[cpu] = NULL;
|
|
|
|
rdmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
msr.enable = 0;
|
|
|
|
}
|
|
|
|
wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, msr.as_uint64);
|
|
|
|
}
|
2018-03-20 14:02:08 +00:00
|
|
|
|
2018-01-24 13:23:34 +00:00
|
|
|
if (hv_reenlightenment_cb == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
if (re_ctrl.target_vp == hv_vp_index[cpu]) {
|
2020-05-12 16:01:53 +00:00
|
|
|
/*
|
|
|
|
* Reassign reenlightenment notifications to some other online
|
|
|
|
* CPU or just disable the feature if there are no online CPUs
|
|
|
|
* left (happens on hibernation).
|
|
|
|
*/
|
2018-01-24 13:23:34 +00:00
|
|
|
new_cpu = cpumask_any_but(cpu_online_mask, cpu);
|
|
|
|
|
2020-05-12 16:01:53 +00:00
|
|
|
if (new_cpu < nr_cpu_ids)
|
|
|
|
re_ctrl.target_vp = hv_vp_index[new_cpu];
|
|
|
|
else
|
|
|
|
re_ctrl.enabled = 0;
|
|
|
|
|
2018-01-24 13:23:34 +00:00
|
|
|
wrmsrl(HV_X64_MSR_REENLIGHTENMENT_CONTROL, *((u64 *)&re_ctrl));
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-09-18 22:29:50 +00:00
|
|
|
static int __init hv_pci_init(void)
|
|
|
|
{
|
2023-11-11 08:37:47 +00:00
|
|
|
bool gen2vm = efi_enabled(EFI_BOOT);
|
2018-09-18 22:29:50 +00:00
|
|
|
|
|
|
|
/*
|
2023-11-11 08:37:47 +00:00
|
|
|
* A Generation-2 VM doesn't support legacy PCI/PCIe, so both
|
|
|
|
* raw_pci_ops and raw_pci_ext_ops are NULL, and pci_subsys_init() ->
|
|
|
|
* pcibios_init() doesn't call pcibios_resource_survey() ->
|
|
|
|
* e820__reserve_resources_late(); as a result, any emulated persistent
|
|
|
|
* memory of E820_TYPE_PRAM (12) via the kernel parameter
|
|
|
|
* memmap=nn[KMG]!ss is not added into iomem_resource and hence can't be
|
|
|
|
* detected by register_e820_pmem(). Fix this by directly calling
|
|
|
|
* e820__reserve_resources_late() here: e820__reserve_resources_late()
|
|
|
|
* depends on e820__reserve_resources(), which has been called earlier
|
|
|
|
* from setup_arch(). Note: e820__reserve_resources_late() also adds
|
|
|
|
* any memory of E820_TYPE_PMEM (7) into iomem_resource, and
|
|
|
|
* acpi_nfit_register_region() -> acpi_nfit_insert_resource() ->
|
|
|
|
* region_intersects() returns REGION_INTERSECTS, so the memory of
|
|
|
|
* E820_TYPE_PMEM won't get added twice.
|
|
|
|
*
|
|
|
|
* We return 0 here so that pci_arch_init() won't print the warning:
|
2018-09-18 22:29:50 +00:00
|
|
|
* "PCI: Fatal: No config space access function found"
|
|
|
|
*/
|
2023-11-11 08:37:47 +00:00
|
|
|
if (gen2vm) {
|
|
|
|
e820__reserve_resources_late();
|
2018-09-18 22:29:50 +00:00
|
|
|
return 0;
|
2023-11-11 08:37:47 +00:00
|
|
|
}
|
2018-09-18 22:29:50 +00:00
|
|
|
|
|
|
|
/* For Generation-1 VM, we'll proceed in pci_arch_init(). */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-01-06 22:42:39 +00:00
|
|
|
static int hv_suspend(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 02:46:11 +00:00
|
|
|
int ret;
|
2020-01-06 22:42:39 +00:00
|
|
|
|
2021-02-03 15:04:26 +00:00
|
|
|
if (hv_root_partition)
|
|
|
|
return -EPERM;
|
|
|
|
|
2020-01-06 22:42:39 +00:00
|
|
|
/*
|
|
|
|
* Reset the hypercall page as it is going to be invalidated
|
2021-03-18 14:28:01 +00:00
|
|
|
* across hibernation. Setting hv_hypercall_pg to NULL ensures
|
2020-01-06 22:42:39 +00:00
|
|
|
* that any subsequent hypercall operation fails safely instead of
|
|
|
|
* crashing due to an access of an invalid page. The hypercall page
|
|
|
|
* pointer is restored on resume.
|
|
|
|
*/
|
|
|
|
hv_hypercall_pg_saved = hv_hypercall_pg;
|
|
|
|
hv_hypercall_pg = NULL;
|
|
|
|
|
|
|
|
/* Disable the hypercall page in the hypervisor */
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 0;
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 02:46:11 +00:00
|
|
|
ret = hv_cpu_die(0);
|
|
|
|
return ret;
|
2020-01-06 22:42:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hv_resume(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 02:46:11 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = hv_cpu_init(0);
|
|
|
|
WARN_ON(ret);
|
2020-01-06 22:42:39 +00:00
|
|
|
|
|
|
|
/* Re-enable the hypercall page */
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 1;
|
|
|
|
hypercall_msr.guest_physical_address =
|
|
|
|
vmalloc_to_pfn(hv_hypercall_pg_saved);
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
|
|
|
hv_hypercall_pg = hv_hypercall_pg_saved;
|
|
|
|
hv_hypercall_pg_saved = NULL;
|
2020-05-12 16:01:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reenlightenment notifications are disabled by hv_cpu_die(0),
|
|
|
|
* reenable them here if hv_reenlightenment_cb was previously set.
|
|
|
|
*/
|
|
|
|
if (hv_reenlightenment_cb)
|
|
|
|
set_hv_tscchange_cb(hv_reenlightenment_cb);
|
2020-01-06 22:42:39 +00:00
|
|
|
}
|
|
|
|
|
x86/hyperv: Suspend/resume the VP assist page for hibernation
Unlike the other CPUs, CPU0 is never offlined during hibernation, so in the
resume path, the "new" kernel's VP assist page is not suspended (i.e. not
disabled), and later when we jump to the "old" kernel, the page is not
properly re-enabled for CPU0 with the allocated page from the old kernel.
So far, the VP assist page is used by hv_apic_eoi_write(), and is also
used in the case of nested virtualization (running KVM atop Hyper-V).
For hv_apic_eoi_write(), when the page is not properly re-enabled,
hvp->apic_assist is always 0, so the HV_X64_MSR_EOI MSR is always written.
This is not ideal with respect to performance, but Hyper-V can still
correctly handle this according to the Hyper-V spec; nevertheless, Linux
still must update the Hyper-V hypervisor with the correct VP assist page
to prevent Hyper-V from writing to the stale page, which causes guest
memory corruption and consequently may have caused the hangs and triple
faults seen during non-boot CPUs resume.
Fix the issue by calling hv_cpu_die()/hv_cpu_init() in the syscore ops.
Without the fix, hibernation can fail at a rate of 1/300 ~ 1/500.
With the fix, hibernation can pass a long-haul test of 2000 runs.
In the case of nested virtualization, disabling/reenabling the assist
page upon hibernation may be unsafe if there are active L2 guests.
It looks KVM should be enhanced to abort the hibernation request if
there is any active L2 guest.
Fixes: 05bd330a7fd8 ("x86/hyperv: Suspend/resume the hypercall page for hibernation")
Cc: stable@vger.kernel.org
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Link: https://lore.kernel.org/r/1587437171-2472-1-git-send-email-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2020-04-21 02:46:11 +00:00
|
|
|
/* Note: when the ops are called, only CPU0 is online and IRQs are disabled. */
|
2020-01-06 22:42:39 +00:00
|
|
|
static struct syscore_ops hv_syscore_ops = {
|
|
|
|
.suspend = hv_suspend,
|
|
|
|
.resume = hv_resume,
|
|
|
|
};
|
|
|
|
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-16 22:31:36 +00:00
|
|
|
static void (* __initdata old_setup_percpu_clockev)(void);
|
|
|
|
|
|
|
|
static void __init hv_stimer_setup_percpu_clockev(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ignore any errors in setting up stimer clockevents
|
|
|
|
* as we can run with the LAPIC timer as a fallback.
|
|
|
|
*/
|
2021-03-02 21:38:22 +00:00
|
|
|
(void)hv_stimer_alloc(false);
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-16 22:31:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Still register the LAPIC timer, because the direct-mode STIMER is
|
|
|
|
* not supported by old versions of Hyper-V. This also allows users
|
|
|
|
* to switch to LAPIC timer via /sys, if they want to.
|
|
|
|
*/
|
|
|
|
if (old_setup_percpu_clockev)
|
|
|
|
old_setup_percpu_clockev();
|
|
|
|
}
|
|
|
|
|
2021-02-03 15:04:25 +00:00
|
|
|
static void __init hv_get_partition_id(void)
|
|
|
|
{
|
|
|
|
struct hv_get_partition_id *output_page;
|
|
|
|
u64 status;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
|
|
|
|
status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
|
2021-04-17 00:43:03 +00:00
|
|
|
if (!hv_result_success(status)) {
|
2021-02-03 15:04:25 +00:00
|
|
|
/* No point in proceeding if this failed */
|
|
|
|
pr_err("Failed to get partition ID: %lld\n", status);
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
hv_current_partition_id = output_page->partition_id;
|
|
|
|
local_irq_restore(flags);
|
|
|
|
}
|
|
|
|
|
2023-09-20 04:04:35 +00:00
|
|
|
#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
|
2023-08-18 10:29:12 +00:00
|
|
|
static u8 __init get_vtl(void)
|
|
|
|
{
|
|
|
|
u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
|
|
|
|
struct hv_get_vp_registers_input *input;
|
|
|
|
struct hv_get_vp_registers_output *output;
|
|
|
|
unsigned long flags;
|
|
|
|
u64 ret;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
|
|
|
|
output = (struct hv_get_vp_registers_output *)input;
|
|
|
|
|
|
|
|
memset(input, 0, struct_size(input, element, 1));
|
|
|
|
input->header.partitionid = HV_PARTITION_ID_SELF;
|
|
|
|
input->header.vpindex = HV_VP_INDEX_SELF;
|
|
|
|
input->header.inputvtl = 0;
|
|
|
|
input->element[0].name0 = HV_X64_REGISTER_VSM_VP_STATUS;
|
|
|
|
|
|
|
|
ret = hv_do_hypercall(control, input, output);
|
|
|
|
if (hv_result_success(ret)) {
|
|
|
|
ret = output->as64.low & HV_X64_VTL_MASK;
|
|
|
|
} else {
|
2023-09-20 04:04:35 +00:00
|
|
|
pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
|
|
|
|
BUG();
|
2023-08-18 10:29:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
local_irq_restore(flags);
|
|
|
|
return ret;
|
|
|
|
}
|
2023-09-20 04:04:35 +00:00
|
|
|
#else
|
|
|
|
static inline u8 get_vtl(void) { return 0; }
|
|
|
|
#endif
|
2023-08-18 10:29:12 +00:00
|
|
|
|
2017-01-18 23:45:02 +00:00
|
|
|
/*
|
|
|
|
* This function is to be invoked early in the boot sequence after the
|
|
|
|
* hypervisor has been detected.
|
|
|
|
*
|
|
|
|
* 1. Setup the hypercall page.
|
2017-01-19 18:51:46 +00:00
|
|
|
* 2. Register Hyper-V specific clocksource.
|
2018-05-16 21:53:30 +00:00
|
|
|
* 3. Setup Hyper-V specific APIC entry points.
|
2017-01-18 23:45:02 +00:00
|
|
|
*/
|
2018-05-16 21:53:30 +00:00
|
|
|
void __init hyperv_init(void)
|
2017-01-18 23:45:02 +00:00
|
|
|
{
|
2021-11-04 18:22:39 +00:00
|
|
|
u64 guest_id;
|
2017-01-18 23:45:02 +00:00
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
2021-07-14 18:34:45 +00:00
|
|
|
int cpuhp;
|
2017-01-18 23:45:02 +00:00
|
|
|
|
2017-11-09 13:27:36 +00:00
|
|
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
2017-01-18 23:45:02 +00:00
|
|
|
return;
|
|
|
|
|
2021-07-14 18:34:45 +00:00
|
|
|
if (hv_common_init())
|
2017-08-02 16:09:18 +00:00
|
|
|
return;
|
|
|
|
|
2023-08-24 08:07:05 +00:00
|
|
|
/*
|
|
|
|
* The VP assist page is useless to a TDX guest: the only use we
|
|
|
|
* would have for it is lazy EOI, which can not be used with TDX.
|
|
|
|
*/
|
|
|
|
if (hv_isolation_type_tdx())
|
|
|
|
hv_vp_assist_page = NULL;
|
|
|
|
else
|
|
|
|
hv_vp_assist_page = kcalloc(num_possible_cpus(),
|
|
|
|
sizeof(*hv_vp_assist_page),
|
|
|
|
GFP_KERNEL);
|
2018-03-20 14:02:08 +00:00
|
|
|
if (!hv_vp_assist_page) {
|
|
|
|
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
|
2023-08-24 08:07:05 +00:00
|
|
|
|
|
|
|
if (!hv_isolation_type_tdx())
|
|
|
|
goto common_free;
|
2018-03-20 14:02:08 +00:00
|
|
|
}
|
|
|
|
|
2023-08-24 08:07:11 +00:00
|
|
|
if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
|
2022-06-14 01:45:53 +00:00
|
|
|
/* Negotiate GHCB Version. */
|
|
|
|
if (!hv_ghcb_negotiate_protocol())
|
|
|
|
hv_ghcb_terminate(SEV_TERM_SET_GEN,
|
|
|
|
GHCB_SEV_ES_PROT_UNSUPPORTED);
|
|
|
|
|
2021-10-25 12:21:11 +00:00
|
|
|
hv_ghcb_pg = alloc_percpu(union hv_ghcb *);
|
2021-10-25 12:21:06 +00:00
|
|
|
if (!hv_ghcb_pg)
|
|
|
|
goto free_vp_assist_page;
|
|
|
|
}
|
|
|
|
|
2023-05-23 17:14:21 +00:00
|
|
|
cpuhp = cpuhp_setup_state(CPUHP_AP_HYPERV_ONLINE, "x86/hyperv_init:online",
|
2018-03-20 14:02:08 +00:00
|
|
|
hv_cpu_init, hv_cpu_die);
|
|
|
|
if (cpuhp < 0)
|
2021-10-25 12:21:06 +00:00
|
|
|
goto free_ghcb_page;
|
2017-08-02 16:09:18 +00:00
|
|
|
|
2017-01-18 23:45:02 +00:00
|
|
|
/*
|
|
|
|
* Setup the hypercall page and enable hypercalls.
|
|
|
|
* 1. Register the guest ID
|
|
|
|
* 2. Enable the hypercall and register the hypercall page
|
2023-08-24 08:07:09 +00:00
|
|
|
*
|
|
|
|
* A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg:
|
|
|
|
* when the hypercall input is a page, such a VM must pass a decrypted
|
|
|
|
* page to Hyper-V, e.g. hv_post_message() uses the per-CPU page
|
|
|
|
* hyperv_pcpu_input_arg, which is decrypted if no paravisor is present.
|
|
|
|
*
|
|
|
|
* A TDX VM with the paravisor uses hv_hypercall_pg for most hypercalls,
|
|
|
|
* which are handled by the paravisor and the VM must use an encrypted
|
|
|
|
* input page: in such a VM, the hyperv_pcpu_input_arg is encrypted and
|
|
|
|
* used in the hypercalls, e.g. see hv_mark_gpa_visibility() and
|
|
|
|
* hv_arch_irq_unmask(). Such a VM uses TDX GHCI for two hypercalls:
|
|
|
|
* 1. HVCALL_SIGNAL_EVENT: see vmbus_set_event() and _hv_do_fast_hypercall8().
|
|
|
|
* 2. HVCALL_POST_MESSAGE: the input page must be a decrypted page, i.e.
|
|
|
|
* hv_post_message() in such a VM can't use the encrypted hyperv_pcpu_input_arg;
|
|
|
|
* instead, hv_post_message() uses the post_msg_page, which is decrypted
|
|
|
|
* in such a VM and is only used in such a VM.
|
2017-01-18 23:45:02 +00:00
|
|
|
*/
|
2022-09-28 06:40:46 +00:00
|
|
|
guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
|
2017-01-18 23:45:02 +00:00
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
2023-08-24 08:07:10 +00:00
|
|
|
/* With the paravisor, the VM must also write the ID via GHCB/GHCI */
|
|
|
|
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
2021-10-25 12:21:11 +00:00
|
|
|
|
2023-08-24 08:07:09 +00:00
|
|
|
/* A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg */
|
|
|
|
if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
|
2023-08-24 08:07:04 +00:00
|
|
|
goto skip_hypercall_pg_init;
|
2021-10-25 12:21:11 +00:00
|
|
|
|
2020-06-26 03:30:40 +00:00
|
|
|
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
|
|
|
|
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
|
2020-07-03 22:15:27 +00:00
|
|
|
VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
|
|
|
|
__builtin_return_address(0));
|
2021-10-25 12:21:06 +00:00
|
|
|
if (hv_hypercall_pg == NULL)
|
|
|
|
goto clean_guest_os_id;
|
2017-01-18 23:45:02 +00:00
|
|
|
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
hypercall_msr.enable = 1;
|
2021-02-03 15:04:26 +00:00
|
|
|
|
|
|
|
if (hv_root_partition) {
|
|
|
|
struct page *pg;
|
2022-10-20 08:38:20 +00:00
|
|
|
void *src;
|
2021-02-03 15:04:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For the root partition, the hypervisor will set up its
|
|
|
|
* hypercall page. The hypervisor guarantees it will not show
|
|
|
|
* up in the root's address space. The root can't change the
|
|
|
|
* location of the hypercall page.
|
|
|
|
*
|
|
|
|
* Order is important here. We must enable the hypercall page
|
|
|
|
* so it is populated with code, then copy the code to an
|
|
|
|
* executable page.
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
|
|
|
pg = vmalloc_to_page(hv_hypercall_pg);
|
|
|
|
src = memremap(hypercall_msr.guest_physical_address << PAGE_SHIFT, PAGE_SIZE,
|
|
|
|
MEMREMAP_WB);
|
2022-10-20 08:38:20 +00:00
|
|
|
BUG_ON(!src);
|
|
|
|
memcpy_to_page(pg, 0, src, HV_HYP_PAGE_SIZE);
|
2021-02-03 15:04:26 +00:00
|
|
|
memunmap(src);
|
2022-11-04 20:40:55 +00:00
|
|
|
|
|
|
|
hv_remap_tsc_clocksource();
|
2021-02-03 15:04:26 +00:00
|
|
|
} else {
|
|
|
|
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hv_hypercall_pg);
|
|
|
|
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
}
|
2017-01-19 18:51:46 +00:00
|
|
|
|
2023-08-24 08:07:04 +00:00
|
|
|
skip_hypercall_pg_init:
|
2023-07-22 04:51:16 +00:00
|
|
|
/*
|
|
|
|
* Some versions of Hyper-V that provide IBT in guest VMs have a bug
|
|
|
|
* in that there's no ENDBR64 instruction at the entry to the
|
|
|
|
* hypercall page. Because hypercalls are invoked via an indirect call
|
|
|
|
* to the hypercall page, all hypercall attempts fail when IBT is
|
|
|
|
* enabled, and Linux panics. For such buggy versions, disable IBT.
|
|
|
|
*
|
|
|
|
* Fixed versions of Hyper-V always provide ENDBR64 on the hypercall
|
|
|
|
* page, so if future Linux kernel versions enable IBT for 32-bit
|
|
|
|
* builds, additional hypercall page hackery will be required here
|
|
|
|
* to provide an ENDBR32.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_KERNEL_IBT
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_IBT) &&
|
|
|
|
*(u32 *)hv_hypercall_pg != gen_endbr()) {
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_IBT);
|
2023-09-19 11:36:01 +00:00
|
|
|
pr_warn("Disabling IBT because of Hyper-V bug\n");
|
2023-07-22 04:51:16 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 01:11:49 +00:00
|
|
|
/*
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-16 22:31:36 +00:00
|
|
|
* hyperv_init() is called before LAPIC is initialized: see
|
|
|
|
* apic_intr_mode_init() -> x86_platform.apic_post_init() and
|
|
|
|
* apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
|
|
|
|
* depends on LAPIC, so hv_stimer_alloc() should be called from
|
|
|
|
* x86_init.timers.setup_percpu_clockev.
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 01:11:49 +00:00
|
|
|
*/
|
x86/hyperv: Initialize clockevents after LAPIC is initialized
With commit 4df4cb9e99f8, the Hyper-V direct-mode STIMER is actually
initialized before LAPIC is initialized: see
apic_intr_mode_init()
x86_platform.apic_post_init()
hyperv_init()
hv_stimer_alloc()
apic_bsp_setup()
setup_local_APIC()
setup_local_APIC() temporarily disables LAPIC, initializes it and
re-eanble it. The direct-mode STIMER depends on LAPIC, and when it's
registered, it can be programmed immediately and the timer can fire
very soon:
hv_stimer_init
clockevents_config_and_register
clockevents_register_device
tick_check_new_device
tick_setup_device
tick_setup_periodic(), tick_setup_oneshot()
clockevents_program_event
When the timer fires in the hypervisor, if the LAPIC is in the
disabled state, new versions of Hyper-V ignore the event and don't inject
the timer interrupt into the VM, and hence the VM hangs when it boots.
Note: when the VM starts/reboots, the LAPIC is pre-enabled by the
firmware, so the window of LAPIC being temporarily disabled is pretty
small, and the issue can only happen once out of 100~200 reboots for
a 40-vCPU VM on one dev host, and on another host the issue doesn't
reproduce after 2000 reboots.
The issue is more noticeable for kdump/kexec, because the LAPIC is
disabled by the first kernel, and stays disabled until the kdump/kexec
kernel enables it. This is especially an issue to a Generation-2 VM
(for which Hyper-V doesn't emulate the PIT timer) when CONFIG_HZ=1000
(rather than CONFIG_HZ=250) is used.
Fix the issue by moving hv_stimer_alloc() to a later place where the
LAPIC timer is initialized.
Fixes: 4df4cb9e99f8 ("x86/hyperv: Initialize clockevents earlier in CPU onlining")
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20210116223136.13892-1-decui@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
2021-01-16 22:31:36 +00:00
|
|
|
old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
|
|
|
|
x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
|
x86/hyperv: Initialize clockevents earlier in CPU onlining
Hyper-V has historically initialized stimer-based clockevents late in the
process of onlining a CPU because clockevents depend on stimer
interrupts. In the original Hyper-V design, stimer interrupts generate a
VMbus message, so the VMbus machinery must be running first, and VMbus
can't be initialized until relatively late. On x86/64, LAPIC timer based
clockevents are used during early initialization before VMbus and
stimer-based clockevents are ready, and again during CPU offlining after
the stimer clockevents have been shut down.
Unfortunately, this design creates problems when offlining CPUs for
hibernation or other purposes. stimer-based clockevents are shut down
relatively early in the offlining process, so clockevents_unbind_device()
must be used to fallback to the LAPIC-based clockevents for the remainder
of the offlining process. Furthermore, the late initialization and early
shutdown of stimer-based clockevents doesn't work well on ARM64 since there
is no other timer like the LAPIC to fallback to. So CPU onlining and
offlining doesn't work properly.
Fix this by recognizing that stimer Direct Mode is the normal path for
newer versions of Hyper-V on x86/64, and the only path on other
architectures. With stimer Direct Mode, stimer interrupts don't require any
VMbus machinery. stimer clockevents can be initialized and shut down
consistent with how it is done for other clockevent devices. While the old
VMbus-based stimer interrupts must still be supported for backward
compatibility on x86, that mode of operation can be treated as legacy.
So add a new Hyper-V stimer entry in the CPU hotplug state list, and use
that new state when in Direct Mode. Update the Hyper-V clocksource driver
to allocate and initialize stimer clockevents earlier during boot. Update
Hyper-V initialization and the VMbus driver to use this new design. As a
result, the LAPIC timer is no longer used during boot or CPU
onlining/offlining and clockevents_unbind_device() is not called. But
retain the old design as a legacy implementation for older versions of
Hyper-V that don't support Direct Mode.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Dexuan Cui <decui@microsoft.com>
Link: https://lkml.kernel.org/r/1573607467-9456-1-git-send-email-mikelley@microsoft.com
2019-11-13 01:11:49 +00:00
|
|
|
|
2018-05-16 21:53:30 +00:00
|
|
|
hv_apic_init();
|
|
|
|
|
2018-09-18 22:29:50 +00:00
|
|
|
x86_init.pci.arch_init = hv_pci_init;
|
|
|
|
|
2020-01-06 22:42:39 +00:00
|
|
|
register_syscore_ops(&hv_syscore_ops);
|
|
|
|
|
2021-02-03 15:04:25 +00:00
|
|
|
if (cpuid_ebx(HYPERV_CPUID_FEATURES) & HV_ACCESS_PARTITION_ID)
|
|
|
|
hv_get_partition_id();
|
|
|
|
|
|
|
|
BUG_ON(hv_root_partition && hv_current_partition_id == ~0ull);
|
|
|
|
|
2021-02-03 15:04:34 +00:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
|
|
|
/*
|
|
|
|
* If we're running as root, we want to create our own PCI MSI domain.
|
|
|
|
* We can't set this in hv_pci_init because that would be too late.
|
|
|
|
*/
|
|
|
|
if (hv_root_partition)
|
|
|
|
x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
|
|
|
|
#endif
|
|
|
|
|
2021-03-23 18:47:16 +00:00
|
|
|
/* Query the VMs extended capability once, so that it can be cached. */
|
|
|
|
hv_query_ext_cap(0);
|
2021-12-13 07:14:04 +00:00
|
|
|
|
2023-08-18 10:29:12 +00:00
|
|
|
/* Find the VTL */
|
2023-09-20 04:04:35 +00:00
|
|
|
ms_hyperv.vtl = get_vtl();
|
2023-08-18 10:29:12 +00:00
|
|
|
|
2023-09-22 04:58:40 +00:00
|
|
|
if (ms_hyperv.vtl > 0) /* non default VTL */
|
|
|
|
hv_vtl_early_init();
|
|
|
|
|
2017-08-02 16:09:18 +00:00
|
|
|
return;
|
|
|
|
|
2021-10-25 12:21:06 +00:00
|
|
|
clean_guest_os_id:
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
2023-08-24 08:07:10 +00:00
|
|
|
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
|
2024-08-28 11:21:56 +00:00
|
|
|
cpuhp_remove_state(CPUHP_AP_HYPERV_ONLINE);
|
2021-10-25 12:21:06 +00:00
|
|
|
free_ghcb_page:
|
|
|
|
free_percpu(hv_ghcb_pg);
|
2018-03-20 14:02:08 +00:00
|
|
|
free_vp_assist_page:
|
|
|
|
kfree(hv_vp_assist_page);
|
|
|
|
hv_vp_assist_page = NULL;
|
2021-07-14 18:34:45 +00:00
|
|
|
common_free:
|
|
|
|
hv_common_free();
|
2017-01-18 23:45:02 +00:00
|
|
|
}
|
2017-01-18 23:45:03 +00:00
|
|
|
|
2017-01-28 19:37:14 +00:00
|
|
|
/*
|
|
|
|
* This routine is called before kexec/kdump, it does the required cleanup.
|
|
|
|
*/
|
|
|
|
void hyperv_cleanup(void)
|
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
2022-10-27 09:57:29 +00:00
|
|
|
union hv_reference_tsc_msr tsc_msr;
|
2017-01-28 19:37:14 +00:00
|
|
|
|
|
|
|
/* Reset our OS id */
|
|
|
|
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
2023-08-24 08:07:10 +00:00
|
|
|
hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
|
2017-01-28 19:37:14 +00:00
|
|
|
|
2019-03-06 11:18:27 +00:00
|
|
|
/*
|
|
|
|
* Reset hypercall page reference before reset the page,
|
|
|
|
* let hypercall operations fail safely rather than
|
|
|
|
* panic the kernel for using invalid hypercall page
|
|
|
|
*/
|
|
|
|
hv_hypercall_pg = NULL;
|
|
|
|
|
2017-01-28 19:37:14 +00:00
|
|
|
/* Reset the hypercall page */
|
hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*
The HV_REGISTER_ are used as arguments to hv_set/get_register(), which
delegate to arch-specific mechanisms for getting/setting synthetic
Hyper-V MSRs.
On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via
the get/set vp registers hypercalls. The naming matches the TLFS
document, although these register names are not specific to arm64.
However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed
via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where
HV_REGISTER_ is *only* used for used for VP register names used by
the get/set register hypercalls.
To fix this inconsistency and prevent future confusion, change the
arch-generic aliases used by callers of hv_set/get_register() to have
the prefix HV_MSR_ instead of HV_REGISTER_.
Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the
generic HV_MSR_'s point to the corresponding HV_X64_MSR_.
Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h,
since these are not specific to arm64. On arm64, the generic HV_MSR_'s
point to the corresponding HV_REGISTER_.
While at it, rename hv_get/set_registers() and related functions to
hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for
Hyper-V MSRs and this naming makes that clear.
Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com>
Reviewed-by: Wei Liu <wei.liu@kernel.org>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>
2024-02-20 14:55:33 +00:00
|
|
|
hypercall_msr.as_uint64 = hv_get_msr(HV_X64_MSR_HYPERCALL);
|
2022-10-27 09:57:29 +00:00
|
|
|
hypercall_msr.enable = 0;
|
hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*
The HV_REGISTER_ are used as arguments to hv_set/get_register(), which
delegate to arch-specific mechanisms for getting/setting synthetic
Hyper-V MSRs.
On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via
the get/set vp registers hypercalls. The naming matches the TLFS
document, although these register names are not specific to arm64.
However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed
via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where
HV_REGISTER_ is *only* used for used for VP register names used by
the get/set register hypercalls.
To fix this inconsistency and prevent future confusion, change the
arch-generic aliases used by callers of hv_set/get_register() to have
the prefix HV_MSR_ instead of HV_REGISTER_.
Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the
generic HV_MSR_'s point to the corresponding HV_X64_MSR_.
Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h,
since these are not specific to arm64. On arm64, the generic HV_MSR_'s
point to the corresponding HV_REGISTER_.
While at it, rename hv_get/set_registers() and related functions to
hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for
Hyper-V MSRs and this naming makes that clear.
Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com>
Reviewed-by: Wei Liu <wei.liu@kernel.org>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>
2024-02-20 14:55:33 +00:00
|
|
|
hv_set_msr(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
2017-01-28 19:37:15 +00:00
|
|
|
|
|
|
|
/* Reset the TSC page */
|
hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*
The HV_REGISTER_ are used as arguments to hv_set/get_register(), which
delegate to arch-specific mechanisms for getting/setting synthetic
Hyper-V MSRs.
On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via
the get/set vp registers hypercalls. The naming matches the TLFS
document, although these register names are not specific to arm64.
However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed
via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where
HV_REGISTER_ is *only* used for used for VP register names used by
the get/set register hypercalls.
To fix this inconsistency and prevent future confusion, change the
arch-generic aliases used by callers of hv_set/get_register() to have
the prefix HV_MSR_ instead of HV_REGISTER_.
Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the
generic HV_MSR_'s point to the corresponding HV_X64_MSR_.
Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h,
since these are not specific to arm64. On arm64, the generic HV_MSR_'s
point to the corresponding HV_REGISTER_.
While at it, rename hv_get/set_registers() and related functions to
hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for
Hyper-V MSRs and this naming makes that clear.
Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com>
Reviewed-by: Wei Liu <wei.liu@kernel.org>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>
2024-02-20 14:55:33 +00:00
|
|
|
tsc_msr.as_uint64 = hv_get_msr(HV_X64_MSR_REFERENCE_TSC);
|
2022-10-27 09:57:29 +00:00
|
|
|
tsc_msr.enable = 0;
|
hyperv-tlfs: Change prefix of generic HV_REGISTER_* MSRs to HV_MSR_*
The HV_REGISTER_ are used as arguments to hv_set/get_register(), which
delegate to arch-specific mechanisms for getting/setting synthetic
Hyper-V MSRs.
On arm64, HV_REGISTER_ defines are synthetic VP registers accessed via
the get/set vp registers hypercalls. The naming matches the TLFS
document, although these register names are not specific to arm64.
However, on x86 the prefix HV_REGISTER_ indicates Hyper-V MSRs accessed
via rdmsrl()/wrmsrl(). This is not consistent with the TLFS doc, where
HV_REGISTER_ is *only* used for used for VP register names used by
the get/set register hypercalls.
To fix this inconsistency and prevent future confusion, change the
arch-generic aliases used by callers of hv_set/get_register() to have
the prefix HV_MSR_ instead of HV_REGISTER_.
Use the prefix HV_X64_MSR_ for the x86-only Hyper-V MSRs. On x86, the
generic HV_MSR_'s point to the corresponding HV_X64_MSR_.
Move the arm64 HV_REGISTER_* defines to the asm-generic hyperv-tlfs.h,
since these are not specific to arm64. On arm64, the generic HV_MSR_'s
point to the corresponding HV_REGISTER_.
While at it, rename hv_get/set_registers() and related functions to
hv_get/set_msr(), hv_get/set_nested_msr(), etc. These are only used for
Hyper-V MSRs and this naming makes that clear.
Signed-off-by: Nuno Das Neves <nunodasneves@linux.microsoft.com>
Reviewed-by: Wei Liu <wei.liu@kernel.org>
Reviewed-by: Michael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Message-ID: <1708440933-27125-1-git-send-email-nunodasneves@linux.microsoft.com>
2024-02-20 14:55:33 +00:00
|
|
|
hv_set_msr(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
2017-01-28 19:37:14 +00:00
|
|
|
}
|
|
|
|
|
2020-04-06 15:53:31 +00:00
|
|
|
void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
|
2017-01-19 18:51:48 +00:00
|
|
|
{
|
|
|
|
static bool panic_reported;
|
2017-10-29 18:33:41 +00:00
|
|
|
u64 guest_id;
|
2017-01-19 18:51:48 +00:00
|
|
|
|
2020-04-06 15:53:31 +00:00
|
|
|
if (in_die && !panic_on_oops)
|
|
|
|
return;
|
|
|
|
|
2017-01-19 18:51:48 +00:00
|
|
|
/*
|
|
|
|
* We prefer to report panic on 'die' chain as we have proper
|
|
|
|
* registers to report, but if we miss it (e.g. on BUG()) we need
|
|
|
|
* to report it on 'panic'.
|
|
|
|
*/
|
|
|
|
if (panic_reported)
|
|
|
|
return;
|
|
|
|
panic_reported = true;
|
|
|
|
|
2017-10-29 18:33:41 +00:00
|
|
|
rdmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
|
|
|
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P0, err);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P1, guest_id);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P2, regs->ip);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P3, regs->ax);
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_P4, regs->sp);
|
2017-01-19 18:51:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Let Hyper-V know there is crash data available
|
|
|
|
*/
|
|
|
|
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hyperv_report_panic);
|
2017-01-19 18:51:49 +00:00
|
|
|
|
2017-12-22 18:19:02 +00:00
|
|
|
bool hv_is_hyperv_initialized(void)
|
2017-01-19 18:51:49 +00:00
|
|
|
{
|
|
|
|
union hv_x64_msr_hypercall_contents hypercall_msr;
|
|
|
|
|
2017-12-22 18:19:02 +00:00
|
|
|
/*
|
|
|
|
* Ensure that we're really on Hyper-V, and not a KVM or Xen
|
|
|
|
* emulation of Hyper-V
|
|
|
|
*/
|
|
|
|
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
|
|
|
|
return false;
|
|
|
|
|
x86/hyperv: Introduce a global variable hyperv_paravisor_present
The new variable hyperv_paravisor_present is set only when the VM
is a SNP/TDX VM with the paravisor running: see ms_hyperv_init_platform().
We introduce hyperv_paravisor_present because we can not use
ms_hyperv.paravisor_present in arch/x86/include/asm/mshyperv.h:
struct ms_hyperv_info is defined in include/asm-generic/mshyperv.h, which
is included at the end of arch/x86/include/asm/mshyperv.h, but at the
beginning of arch/x86/include/asm/mshyperv.h, we would already need to use
struct ms_hyperv_info in hv_do_hypercall().
We use hyperv_paravisor_present only in include/asm-generic/mshyperv.h,
and use ms_hyperv.paravisor_present elsewhere. In the future, we'll
introduce a hypercall function structure for different VM types, and
at boot time, the right function pointers would be written into the
structure so that runtime testing of TDX vs. SNP vs. normal will be
avoided and hyperv_paravisor_present will no longer be needed.
Call hv_vtom_init() when it's a VBS VM or when ms_hyperv.paravisor_present
is true, i.e. the VM is a SNP VM or TDX VM with the paravisor.
Enhance hv_vtom_init() for a TDX VM with the paravisor.
In hv_common_cpu_init(), don't decrypt the hyperv_pcpu_input_arg
for a TDX VM with the paravisor, just like we don't decrypt the page
for a SNP VM with the paravisor.
Signed-off-by: Dexuan Cui <decui@microsoft.com>
Reviewed-by: Tianyu Lan <tiala@microsoft.com>
Reviewed-by: Michael Kelley <mikelley@microsoft.com>
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Link: https://lore.kernel.org/r/20230824080712.30327-7-decui@microsoft.com
2023-08-24 08:07:08 +00:00
|
|
|
/* A TDX VM with no paravisor uses TDX GHCI call rather than hv_hypercall_pg */
|
|
|
|
if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
|
2023-08-24 08:07:04 +00:00
|
|
|
return true;
|
2017-12-22 18:19:02 +00:00
|
|
|
/*
|
|
|
|
* Verify that earlier initialization succeeded by checking
|
|
|
|
* that the hypercall page is setup
|
|
|
|
*/
|
2017-01-19 18:51:49 +00:00
|
|
|
hypercall_msr.as_uint64 = 0;
|
|
|
|
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
|
|
|
|
2017-12-22 18:19:02 +00:00
|
|
|
return hypercall_msr.enable;
|
2017-01-19 18:51:49 +00:00
|
|
|
}
|
2017-12-22 18:19:02 +00:00
|
|
|
EXPORT_SYMBOL_GPL(hv_is_hyperv_initialized);
|