mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-29 17:22:07 +00:00
S390:
* Changes to FPU handling came in via the main s390 pull request * Only deliver to the guest the SCLP events that userspace has requested. * More virtual vs physical address fixes (only a cleanup since virtual and physical address spaces are currently the same). * Fix selftests undefined behavior. x86: * Fix a restriction that the guest can't program a PMU event whose encoding matches an architectural event that isn't included in the guest CPUID. The enumeration of an architectural event only says that if a CPU supports an architectural event, then the event can be programmed *using the architectural encoding*. The enumeration does NOT say anything about the encoding when the CPU doesn't report support the event *in general*. It might support it, and it might support it using the same encoding that made it into the architectural PMU spec. * Fix a variety of bugs in KVM's emulation of RDPMC (more details on individual commits) and add a selftest to verify KVM correctly emulates RDMPC, counter availability, and a variety of other PMC-related behaviors that depend on guest CPUID and therefore are easier to validate with selftests than with custom guests (aka kvm-unit-tests). * Zero out PMU state on AMD if the virtual PMU is disabled, it does not cause any bug but it wastes time in various cases where KVM would check if a PMC event needs to be synthesized. * Optimize triggering of emulated events, with a nice ~10% performance improvement in VM-Exit microbenchmarks when a vPMU is exposed to the guest. * Tighten the check for "PMI in guest" to reduce false positives if an NMI arrives in the host while KVM is handling an IRQ VM-Exit. * Fix a bug where KVM would report stale/bogus exit qualification information when exiting to userspace with an internal error exit code. * Add a VMX flag in /proc/cpuinfo to report 5-level EPT support. * Rework TDP MMU root unload, free, and alloc to run with mmu_lock held for read, e.g. to avoid serializing vCPUs when userspace deletes a memslot. * Tear down TDP MMU page tables at 4KiB granularity (used to be 1GiB). KVM doesn't support yielding in the middle of processing a zap, and 1GiB granularity resulted in multi-millisecond lags that are quite impolite for CONFIG_PREEMPT kernels. * Allocate write-tracking metadata on-demand to avoid the memory overhead when a kernel is built with i915 virtualization support but the workloads use neither shadow paging nor i915 virtualization. * Explicitly initialize a variety of on-stack variables in the emulator that triggered KMSAN false positives. * Fix the debugregs ABI for 32-bit KVM. * Rework the "force immediate exit" code so that vendor code ultimately decides how and when to force the exit, which allowed some optimization for both Intel and AMD. * Fix a long-standing bug where kvm_has_noapic_vcpu could be left elevated if vCPU creation ultimately failed, causing extra unnecessary work. * Cleanup the logic for checking if the currently loaded vCPU is in-kernel. * Harden against underflowing the active mmu_notifier invalidation count, so that "bad" invalidations (usually due to bugs elsehwere in the kernel) are detected earlier and are less likely to hang the kernel. x86 Xen emulation: * Overlay pages can now be cached based on host virtual address, instead of guest physical addresses. This removes the need to reconfigure and invalidate the cache if the guest changes the gpa but the underlying host virtual address remains the same. * When possible, use a single host TSC value when computing the deadline for Xen timers in order to improve the accuracy of the timer emulation. * Inject pending upcall events when the vCPU software-enables its APIC to fix a bug where an upcall can be lost (and to follow Xen's behavior). * Fall back to the slow path instead of warning if "fast" IRQ delivery of Xen events fails, e.g. if the guest has aliased xAPIC IDs. RISC-V: * Support exception and interrupt handling in selftests * New self test for RISC-V architectural timer (Sstc extension) * New extension support (Ztso, Zacas) * Support userspace emulation of random number seed CSRs. ARM: * Infrastructure for building KVM's trap configuration based on the architectural features (or lack thereof) advertised in the VM's ID registers * Support for mapping vfio-pci BARs as Normal-NC (vaguely similar to x86's WC) at stage-2, improving the performance of interacting with assigned devices that can tolerate it * Conversion of KVM's representation of LPIs to an xarray, utilized to address serialization some of the serialization on the LPI injection path * Support for _architectural_ VHE-only systems, advertised through the absence of FEAT_E2H0 in the CPU's ID register * Miscellaneous cleanups, fixes, and spelling corrections to KVM and selftests LoongArch: * Set reserved bits as zero in CPUCFG. * Start SW timer only when vcpu is blocking. * Do not restart SW timer when it is expired. * Remove unnecessary CSR register saving during enter guest. * Misc cleanups and fixes as usual. Generic: * cleanup Kconfig by removing CONFIG_HAVE_KVM, which was basically always true on all architectures except MIPS (where Kconfig determines the available depending on CPU capabilities). It is replaced either by an architecture-dependent symbol for MIPS, and IS_ENABLED(CONFIG_KVM) everywhere else. * Factor common "select" statements in common code instead of requiring each architecture to specify it * Remove thoroughly obsolete APIs from the uapi headers. * Move architecture-dependent stuff to uapi/asm/kvm.h * Always flush the async page fault workqueue when a work item is being removed, especially during vCPU destruction, to ensure that there are no workers running in KVM code when all references to KVM-the-module are gone, i.e. to prevent a very unlikely use-after-free if kvm.ko is unloaded. * Grab a reference to the VM's mm_struct in the async #PF worker itself instead of gifting the worker a reference, so that there's no need to remember to *conditionally* clean up after the worker. Selftests: * Reduce boilerplate especially when utilize selftest TAP infrastructure. * Add basic smoke tests for SEV and SEV-ES, along with a pile of library support for handling private/encrypted/protected memory. * Fix benign bugs where tests neglect to close() guest_memfd files. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmX0iP8UHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroND7wf+JZoNvwZ+bmwWe/4jn/YwNoYi/C5z eypn8M1gsWEccpCpqPBwznVm9T29rF4uOlcMvqLEkHfTpaL1EKUUjP1lXPz/ileP 6a2RdOGxAhyTiFC9fjy+wkkjtLbn1kZf6YsS0hjphP9+w0chNbdn0w81dFVnXryd j7XYI8R/bFAthNsJOuZXSEjCfIHxvTTG74OrTf1B1FEBB+arPmrgUeJftMVhffQK Sowgg8L/Ii/x6fgV5NZQVSIyVf1rp8z7c6UaHT4Fwb0+RAMW8p9pYv9Qp1YkKp8y 5j0V9UzOHP7FRaYimZ5BtwQoqiZXYylQ+VuU/Y2f4X85cvlLzSqxaEMAPA== =mqOV -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm updates from Paolo Bonzini: "S390: - Changes to FPU handling came in via the main s390 pull request - Only deliver to the guest the SCLP events that userspace has requested - More virtual vs physical address fixes (only a cleanup since virtual and physical address spaces are currently the same) - Fix selftests undefined behavior x86: - Fix a restriction that the guest can't program a PMU event whose encoding matches an architectural event that isn't included in the guest CPUID. The enumeration of an architectural event only says that if a CPU supports an architectural event, then the event can be programmed *using the architectural encoding*. The enumeration does NOT say anything about the encoding when the CPU doesn't report support the event *in general*. It might support it, and it might support it using the same encoding that made it into the architectural PMU spec - Fix a variety of bugs in KVM's emulation of RDPMC (more details on individual commits) and add a selftest to verify KVM correctly emulates RDMPC, counter availability, and a variety of other PMC-related behaviors that depend on guest CPUID and therefore are easier to validate with selftests than with custom guests (aka kvm-unit-tests) - Zero out PMU state on AMD if the virtual PMU is disabled, it does not cause any bug but it wastes time in various cases where KVM would check if a PMC event needs to be synthesized - Optimize triggering of emulated events, with a nice ~10% performance improvement in VM-Exit microbenchmarks when a vPMU is exposed to the guest - Tighten the check for "PMI in guest" to reduce false positives if an NMI arrives in the host while KVM is handling an IRQ VM-Exit - Fix a bug where KVM would report stale/bogus exit qualification information when exiting to userspace with an internal error exit code - Add a VMX flag in /proc/cpuinfo to report 5-level EPT support - Rework TDP MMU root unload, free, and alloc to run with mmu_lock held for read, e.g. to avoid serializing vCPUs when userspace deletes a memslot - Tear down TDP MMU page tables at 4KiB granularity (used to be 1GiB). KVM doesn't support yielding in the middle of processing a zap, and 1GiB granularity resulted in multi-millisecond lags that are quite impolite for CONFIG_PREEMPT kernels - Allocate write-tracking metadata on-demand to avoid the memory overhead when a kernel is built with i915 virtualization support but the workloads use neither shadow paging nor i915 virtualization - Explicitly initialize a variety of on-stack variables in the emulator that triggered KMSAN false positives - Fix the debugregs ABI for 32-bit KVM - Rework the "force immediate exit" code so that vendor code ultimately decides how and when to force the exit, which allowed some optimization for both Intel and AMD - Fix a long-standing bug where kvm_has_noapic_vcpu could be left elevated if vCPU creation ultimately failed, causing extra unnecessary work - Cleanup the logic for checking if the currently loaded vCPU is in-kernel - Harden against underflowing the active mmu_notifier invalidation count, so that "bad" invalidations (usually due to bugs elsehwere in the kernel) are detected earlier and are less likely to hang the kernel x86 Xen emulation: - Overlay pages can now be cached based on host virtual address, instead of guest physical addresses. This removes the need to reconfigure and invalidate the cache if the guest changes the gpa but the underlying host virtual address remains the same - When possible, use a single host TSC value when computing the deadline for Xen timers in order to improve the accuracy of the timer emulation - Inject pending upcall events when the vCPU software-enables its APIC to fix a bug where an upcall can be lost (and to follow Xen's behavior) - Fall back to the slow path instead of warning if "fast" IRQ delivery of Xen events fails, e.g. if the guest has aliased xAPIC IDs RISC-V: - Support exception and interrupt handling in selftests - New self test for RISC-V architectural timer (Sstc extension) - New extension support (Ztso, Zacas) - Support userspace emulation of random number seed CSRs ARM: - Infrastructure for building KVM's trap configuration based on the architectural features (or lack thereof) advertised in the VM's ID registers - Support for mapping vfio-pci BARs as Normal-NC (vaguely similar to x86's WC) at stage-2, improving the performance of interacting with assigned devices that can tolerate it - Conversion of KVM's representation of LPIs to an xarray, utilized to address serialization some of the serialization on the LPI injection path - Support for _architectural_ VHE-only systems, advertised through the absence of FEAT_E2H0 in the CPU's ID register - Miscellaneous cleanups, fixes, and spelling corrections to KVM and selftests LoongArch: - Set reserved bits as zero in CPUCFG - Start SW timer only when vcpu is blocking - Do not restart SW timer when it is expired - Remove unnecessary CSR register saving during enter guest - Misc cleanups and fixes as usual Generic: - Clean up Kconfig by removing CONFIG_HAVE_KVM, which was basically always true on all architectures except MIPS (where Kconfig determines the available depending on CPU capabilities). It is replaced either by an architecture-dependent symbol for MIPS, and IS_ENABLED(CONFIG_KVM) everywhere else - Factor common "select" statements in common code instead of requiring each architecture to specify it - Remove thoroughly obsolete APIs from the uapi headers - Move architecture-dependent stuff to uapi/asm/kvm.h - Always flush the async page fault workqueue when a work item is being removed, especially during vCPU destruction, to ensure that there are no workers running in KVM code when all references to KVM-the-module are gone, i.e. to prevent a very unlikely use-after-free if kvm.ko is unloaded - Grab a reference to the VM's mm_struct in the async #PF worker itself instead of gifting the worker a reference, so that there's no need to remember to *conditionally* clean up after the worker Selftests: - Reduce boilerplate especially when utilize selftest TAP infrastructure - Add basic smoke tests for SEV and SEV-ES, along with a pile of library support for handling private/encrypted/protected memory - Fix benign bugs where tests neglect to close() guest_memfd files" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (246 commits) selftests: kvm: remove meaningless assignments in Makefiles KVM: riscv: selftests: Add Zacas extension to get-reg-list test RISC-V: KVM: Allow Zacas extension for Guest/VM KVM: riscv: selftests: Add Ztso extension to get-reg-list test RISC-V: KVM: Allow Ztso extension for Guest/VM RISC-V: KVM: Forward SEED CSR access to user space KVM: riscv: selftests: Add sstc timer test KVM: riscv: selftests: Change vcpu_has_ext to a common function KVM: riscv: selftests: Add guest helper to get vcpu id KVM: riscv: selftests: Add exception handling support LoongArch: KVM: Remove unnecessary CSR register saving during enter guest LoongArch: KVM: Do not restart SW timer when it is expired LoongArch: KVM: Start SW timer only when vcpu is blocking LoongArch: KVM: Set reserved bits as zero in CPUCFG KVM: selftests: Explicitly close guest_memfd files in some gmem tests KVM: x86/xen: fix recursive deadlock in timer injection KVM: pfncache: simplify locking and make more self-contained KVM: x86/xen: remove WARN_ON_ONCE() with false positives in evtchn delivery KVM: x86/xen: inject vCPU upcall vector when local APIC is enabled KVM: x86/xen: improve accuracy of Xen timers ...
This commit is contained in:
commit
4f712ee0cb
@ -372,7 +372,7 @@ The bits in the dirty bitmap are cleared before the ioctl returns, unless
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
|
||||
see the description of the capability.
|
||||
|
||||
Note that the Xen shared info page, if configured, shall always be assumed
|
||||
Note that the Xen shared_info page, if configured, shall always be assumed
|
||||
to be dirty. KVM will not explicitly mark it such.
|
||||
|
||||
|
||||
@ -5487,8 +5487,9 @@ KVM_PV_ASYNC_CLEANUP_PERFORM
|
||||
__u8 long_mode;
|
||||
__u8 vector;
|
||||
__u8 runstate_update_flag;
|
||||
struct {
|
||||
union {
|
||||
__u64 gfn;
|
||||
__u64 hva;
|
||||
} shared_info;
|
||||
struct {
|
||||
__u32 send_port;
|
||||
@ -5516,19 +5517,20 @@ type values:
|
||||
|
||||
KVM_XEN_ATTR_TYPE_LONG_MODE
|
||||
Sets the ABI mode of the VM to 32-bit or 64-bit (long mode). This
|
||||
determines the layout of the shared info pages exposed to the VM.
|
||||
determines the layout of the shared_info page exposed to the VM.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_SHARED_INFO
|
||||
Sets the guest physical frame number at which the Xen "shared info"
|
||||
Sets the guest physical frame number at which the Xen shared_info
|
||||
page resides. Note that although Xen places vcpu_info for the first
|
||||
32 vCPUs in the shared_info page, KVM does not automatically do so
|
||||
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
|
||||
explicitly even when the vcpu_info for a given vCPU resides at the
|
||||
"default" location in the shared_info page. This is because KVM may
|
||||
not be aware of the Xen CPU id which is used as the index into the
|
||||
vcpu_info[] array, so may know the correct default location.
|
||||
and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO or
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA be used explicitly even when
|
||||
the vcpu_info for a given vCPU resides at the "default" location
|
||||
in the shared_info page. This is because KVM may not be aware of
|
||||
the Xen CPU id which is used as the index into the vcpu_info[]
|
||||
array, so may know the correct default location.
|
||||
|
||||
Note that the shared info page may be constantly written to by KVM;
|
||||
Note that the shared_info page may be constantly written to by KVM;
|
||||
it contains the event channel bitmap used to deliver interrupts to
|
||||
a Xen guest, amongst other things. It is exempt from dirty tracking
|
||||
mechanisms — KVM will not explicitly mark the page as dirty each
|
||||
@ -5537,9 +5539,21 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
|
||||
any vCPU has been running or any event channel interrupts can be
|
||||
routed to the guest.
|
||||
|
||||
Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info
|
||||
Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared_info
|
||||
page.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA
|
||||
If the KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA flag is also set in the
|
||||
Xen capabilities, then this attribute may be used to set the
|
||||
userspace address at which the shared_info page resides, which
|
||||
will always be fixed in the VMM regardless of where it is mapped
|
||||
in guest physical address space. This attribute should be used in
|
||||
preference to KVM_XEN_ATTR_TYPE_SHARED_INFO as it avoids
|
||||
unnecessary invalidation of an internal cache when the page is
|
||||
re-mapped in guest physcial address space.
|
||||
|
||||
Setting the hva to zero will disable the shared_info page.
|
||||
|
||||
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
|
||||
Sets the exception vector used to deliver Xen event channel upcalls.
|
||||
This is the HVM-wide vector injected directly by the hypervisor
|
||||
@ -5636,6 +5650,21 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
|
||||
on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
|
||||
the vcpu_info.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA
|
||||
If the KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA flag is also set in the
|
||||
Xen capabilities, then this attribute may be used to set the
|
||||
userspace address of the vcpu_info for a given vCPU. It should
|
||||
only be used when the vcpu_info resides at the "default" location
|
||||
in the shared_info page. In this case it is safe to assume the
|
||||
userspace address will not change, because the shared_info page is
|
||||
an overlay on guest memory and remains at a fixed host address
|
||||
regardless of where it is mapped in guest physical address space
|
||||
and hence unnecessary invalidation of an internal cache may be
|
||||
avoided if the guest memory layout is modified.
|
||||
If the vcpu_info does not reside at the "default" location then
|
||||
it is not guaranteed to remain at the same host address and
|
||||
hence the aforementioned cache invalidation is required.
|
||||
|
||||
KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
|
||||
Sets the guest physical address of an additional pvclock structure
|
||||
for a given vCPU. This is typically used for guest vsyscall support.
|
||||
|
@ -216,7 +216,6 @@ config ARM64
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS
|
||||
select HAVE_IOREMAP_PROT
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
select HAVE_KVM
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PERF_EVENTS
|
||||
|
@ -57,6 +57,7 @@ struct cpuinfo_arm64 {
|
||||
u64 reg_id_aa64mmfr1;
|
||||
u64 reg_id_aa64mmfr2;
|
||||
u64 reg_id_aa64mmfr3;
|
||||
u64 reg_id_aa64mmfr4;
|
||||
u64 reg_id_aa64pfr0;
|
||||
u64 reg_id_aa64pfr1;
|
||||
u64 reg_id_aa64pfr2;
|
||||
|
@ -364,6 +364,7 @@ struct arm64_cpu_capabilities {
|
||||
u8 field_pos;
|
||||
u8 field_width;
|
||||
u8 min_field_value;
|
||||
u8 max_field_value;
|
||||
u8 hwcap_type;
|
||||
bool sign;
|
||||
unsigned long hwcap;
|
||||
|
@ -102,9 +102,7 @@
|
||||
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
#define HCRX_GUEST_FLAGS \
|
||||
(HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \
|
||||
(cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0))
|
||||
#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En)
|
||||
#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
|
@ -209,7 +209,8 @@ static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
|
||||
return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
|
||||
(ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
|
||||
}
|
||||
|
||||
static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
|
||||
|
@ -238,9 +238,32 @@ static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
|
||||
return index;
|
||||
}
|
||||
|
||||
struct kvm_sysreg_masks;
|
||||
|
||||
enum fgt_group_id {
|
||||
__NO_FGT_GROUP__,
|
||||
HFGxTR_GROUP,
|
||||
HDFGRTR_GROUP,
|
||||
HDFGWTR_GROUP = HDFGRTR_GROUP,
|
||||
HFGITR_GROUP,
|
||||
HAFGRTR_GROUP,
|
||||
|
||||
/* Must be last */
|
||||
__NR_FGT_GROUP_IDS__
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
struct kvm_s2_mmu mmu;
|
||||
|
||||
/*
|
||||
* Fine-Grained UNDEF, mimicking the FGT layout defined by the
|
||||
* architecture. We track them globally, as we present the
|
||||
* same feature-set to all vcpus.
|
||||
*
|
||||
* Index 0 is currently spare.
|
||||
*/
|
||||
u64 fgu[__NR_FGT_GROUP_IDS__];
|
||||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
|
||||
@ -274,6 +297,8 @@ struct kvm_arch {
|
||||
#define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
|
||||
/* Initial ID reg values loaded */
|
||||
#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
|
||||
/* Fine-Grained UNDEF initialised */
|
||||
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
|
||||
unsigned long flags;
|
||||
|
||||
/* VM-wide vCPU feature set */
|
||||
@ -294,6 +319,9 @@ struct kvm_arch {
|
||||
/* PMCR_EL0.N value for the guest */
|
||||
u8 pmcr_n;
|
||||
|
||||
/* Iterator for idreg debugfs */
|
||||
u8 idreg_debugfs_iter;
|
||||
|
||||
/* Hypercall features firmware registers' descriptor */
|
||||
struct kvm_smccc_features smccc_feat;
|
||||
struct maple_tree smccc_filter;
|
||||
@ -312,6 +340,9 @@ struct kvm_arch {
|
||||
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
|
||||
u64 id_regs[KVM_ARM_ID_REG_NUM];
|
||||
|
||||
/* Masks for VNCR-baked sysregs */
|
||||
struct kvm_sysreg_masks *sysreg_masks;
|
||||
|
||||
/*
|
||||
* For an untrusted host VM, 'pkvm.handle' is used to lookup
|
||||
* the associated pKVM instance in the hypervisor.
|
||||
@ -474,6 +505,13 @@ enum vcpu_sysreg {
|
||||
NR_SYS_REGS /* Nothing after this line! */
|
||||
};
|
||||
|
||||
struct kvm_sysreg_masks {
|
||||
struct {
|
||||
u64 res0;
|
||||
u64 res1;
|
||||
} mask[NR_SYS_REGS - __VNCR_START__];
|
||||
};
|
||||
|
||||
struct kvm_cpu_context {
|
||||
struct user_pt_regs regs; /* sp = sp_el0 */
|
||||
|
||||
@ -550,6 +588,7 @@ struct kvm_vcpu_arch {
|
||||
|
||||
/* Values of trap registers for the guest. */
|
||||
u64 hcr_el2;
|
||||
u64 hcrx_el2;
|
||||
u64 mdcr_el2;
|
||||
u64 cptr_el2;
|
||||
|
||||
@ -869,7 +908,15 @@ static inline u64 *__ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
|
||||
|
||||
#define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
|
||||
|
||||
#define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
|
||||
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
|
||||
#define __vcpu_sys_reg(v,r) \
|
||||
(*({ \
|
||||
const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
|
||||
u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
|
||||
if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
|
||||
*__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
|
||||
__r; \
|
||||
}))
|
||||
|
||||
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
|
||||
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
|
||||
@ -1056,14 +1103,20 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
|
||||
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
|
||||
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_sys_regs_create_debugfs(struct kvm *kvm);
|
||||
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
|
||||
|
||||
int __init kvm_sys_reg_table_init(void);
|
||||
struct sys_reg_desc;
|
||||
int __init populate_sysreg_config(const struct sys_reg_desc *sr,
|
||||
unsigned int idx);
|
||||
int __init populate_nv_trap_config(void);
|
||||
|
||||
bool lock_all_vcpus(struct kvm *kvm);
|
||||
void unlock_all_vcpus(struct kvm *kvm);
|
||||
|
||||
void kvm_init_sysreg(struct kvm_vcpu *);
|
||||
|
||||
/* MMIO helpers */
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||
@ -1234,4 +1287,48 @@ static inline void kvm_hyp_reserve(void) { }
|
||||
void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
|
||||
|
||||
#define __expand_field_sign_unsigned(id, fld, val) \
|
||||
((u64)SYS_FIELD_VALUE(id, fld, val))
|
||||
|
||||
#define __expand_field_sign_signed(id, fld, val) \
|
||||
({ \
|
||||
u64 __val = SYS_FIELD_VALUE(id, fld, val); \
|
||||
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
|
||||
})
|
||||
|
||||
#define expand_field_sign(id, fld, val) \
|
||||
(id##_##fld##_SIGNED ? \
|
||||
__expand_field_sign_signed(id, fld, val) : \
|
||||
__expand_field_sign_unsigned(id, fld, val))
|
||||
|
||||
#define get_idreg_field_unsigned(kvm, id, fld) \
|
||||
({ \
|
||||
u64 __val = IDREG((kvm), SYS_##id); \
|
||||
FIELD_GET(id##_##fld##_MASK, __val); \
|
||||
})
|
||||
|
||||
#define get_idreg_field_signed(kvm, id, fld) \
|
||||
({ \
|
||||
u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
|
||||
sign_extend64(__val, id##_##fld##_WIDTH - 1); \
|
||||
})
|
||||
|
||||
#define get_idreg_field_enum(kvm, id, fld) \
|
||||
get_idreg_field_unsigned(kvm, id, fld)
|
||||
|
||||
#define get_idreg_field(kvm, id, fld) \
|
||||
(id##_##fld##_SIGNED ? \
|
||||
get_idreg_field_signed(kvm, id, fld) : \
|
||||
get_idreg_field_unsigned(kvm, id, fld))
|
||||
|
||||
#define kvm_has_feat(kvm, id, fld, limit) \
|
||||
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, limit))
|
||||
|
||||
#define kvm_has_feat_enum(kvm, id, fld, val) \
|
||||
(get_idreg_field_unsigned((kvm), id, fld) == __expand_field_sign_unsigned(id, fld, val))
|
||||
|
||||
#define kvm_has_feat_range(kvm, id, fld, min, max) \
|
||||
(get_idreg_field((kvm), id, fld) >= expand_field_sign(id, fld, min) && \
|
||||
get_idreg_field((kvm), id, fld) <= expand_field_sign(id, fld, max))
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
|
||||
/*
|
||||
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
|
||||
* static inline can allow the compiler to out-of-line this. KVM always wants
|
||||
* the macro version as its always inlined.
|
||||
* the macro version as it's always inlined.
|
||||
*/
|
||||
#define __kvm_swab32(x) ___constant_swab32(x)
|
||||
|
||||
|
@ -53,27 +53,6 @@
|
||||
|
||||
#include <asm/alternative.h>
|
||||
|
||||
/*
|
||||
* Convert a kernel VA into a HYP VA.
|
||||
* reg: VA to be converted.
|
||||
*
|
||||
* The actual code generation takes place in kvm_update_va_mask, and
|
||||
* the instructions below are only there to reserve the space and
|
||||
* perform the register allocation (kvm_update_va_mask uses the
|
||||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
.macro kern_hyp_va reg
|
||||
#ifndef __KVM_VHE_HYPERVISOR__
|
||||
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
|
||||
and \reg, \reg, #1 /* mask with va_mask */
|
||||
ror \reg, \reg, #1 /* rotate to the first tag bit */
|
||||
add \reg, \reg, #0 /* insert the low 12 bits of the tag */
|
||||
add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
|
||||
ror \reg, \reg, #63 /* rotate back */
|
||||
alternative_cb_end
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Convert a hypervisor VA to a PA
|
||||
* reg: hypervisor address to be converted in place
|
||||
@ -127,14 +106,29 @@ void kvm_apply_hyp_relocations(void);
|
||||
|
||||
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
|
||||
|
||||
/*
|
||||
* Convert a kernel VA into a HYP VA.
|
||||
*
|
||||
* Can be called from hyp or non-hyp context.
|
||||
*
|
||||
* The actual code generation takes place in kvm_update_va_mask(), and
|
||||
* the instructions below are only there to reserve the space and
|
||||
* perform the register allocation (kvm_update_va_mask() uses the
|
||||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
/*
|
||||
* This #ifndef is an optimisation for when this is called from VHE hyp
|
||||
* context. When called from a VHE non-hyp context, kvm_update_va_mask() will
|
||||
* replace the instructions with `nop`s.
|
||||
*/
|
||||
#ifndef __KVM_VHE_HYPERVISOR__
|
||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
|
||||
"ror %0, %0, #1\n"
|
||||
"add %0, %0, #0\n"
|
||||
"add %0, %0, #0, lsl 12\n"
|
||||
"ror %0, %0, #63\n",
|
||||
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" /* mask with va_mask */
|
||||
"ror %0, %0, #1\n" /* rotate to the first tag bit */
|
||||
"add %0, %0, #0\n" /* insert the low 12 bits of the tag */
|
||||
"add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */
|
||||
"ror %0, %0, #63\n", /* rotate back */
|
||||
ARM64_ALWAYS_SYSTEM,
|
||||
kvm_update_va_mask)
|
||||
: "+r" (v));
|
||||
|
@ -60,7 +60,6 @@ static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
|
||||
return ttbr0 & ~GENMASK_ULL(63, 48);
|
||||
}
|
||||
|
||||
extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_init_nv_sysregs(struct kvm *kvm);
|
||||
|
||||
|
@ -197,6 +197,7 @@ enum kvm_pgtable_stage2_flags {
|
||||
* @KVM_PGTABLE_PROT_W: Write permission.
|
||||
* @KVM_PGTABLE_PROT_R: Read permission.
|
||||
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
|
||||
* @KVM_PGTABLE_PROT_NORMAL_NC: Normal noncacheable attributes.
|
||||
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
|
||||
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
|
||||
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
|
||||
@ -208,6 +209,7 @@ enum kvm_pgtable_prot {
|
||||
KVM_PGTABLE_PROT_R = BIT(2),
|
||||
|
||||
KVM_PGTABLE_PROT_DEVICE = BIT(3),
|
||||
KVM_PGTABLE_PROT_NORMAL_NC = BIT(4),
|
||||
|
||||
KVM_PGTABLE_PROT_SW0 = BIT(55),
|
||||
KVM_PGTABLE_PROT_SW1 = BIT(56),
|
||||
|
@ -177,6 +177,7 @@
|
||||
* Memory types for Stage-2 translation
|
||||
*/
|
||||
#define MT_S2_NORMAL 0xf
|
||||
#define MT_S2_NORMAL_NC 0x5
|
||||
#define MT_S2_DEVICE_nGnRE 0x1
|
||||
|
||||
/*
|
||||
@ -184,6 +185,7 @@
|
||||
* Stage-2 enforces Normal-WB and Device-nGnRE
|
||||
*/
|
||||
#define MT_S2_FWB_NORMAL 6
|
||||
#define MT_S2_FWB_NORMAL_NC 5
|
||||
#define MT_S2_FWB_DEVICE_nGnRE 1
|
||||
|
||||
#ifdef CONFIG_ARM64_4K_PAGES
|
||||
|
@ -1181,6 +1181,8 @@
|
||||
par; \
|
||||
})
|
||||
|
||||
#define SYS_FIELD_VALUE(reg, field, val) reg##_##field##_##val
|
||||
|
||||
#define SYS_FIELD_GET(reg, field, val) \
|
||||
FIELD_GET(reg##_##field##_MASK, val)
|
||||
|
||||
@ -1188,7 +1190,8 @@
|
||||
FIELD_PREP(reg##_##field##_MASK, val)
|
||||
|
||||
#define SYS_FIELD_PREP_ENUM(reg, field, val) \
|
||||
FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val)
|
||||
FIELD_PREP(reg##_##field##_MASK, \
|
||||
SYS_FIELD_VALUE(reg, field, val))
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -37,9 +37,7 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sve_context.h>
|
||||
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
#define __KVM_HAVE_VCPU_EVENTS
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
@ -76,11 +74,11 @@ struct kvm_regs {
|
||||
|
||||
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
|
||||
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_SHIFT 16
|
||||
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_ID_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_ID_SHIFT)
|
||||
|
||||
/* Supported device IDs */
|
||||
#define KVM_ARM_DEVICE_VGIC_V2 0
|
||||
@ -162,6 +160,11 @@ struct kvm_sync_regs {
|
||||
__u64 device_irq_level;
|
||||
};
|
||||
|
||||
/* Bits for run->s.regs.device_irq_level */
|
||||
#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
|
||||
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
|
||||
#define KVM_ARM_DEV_PMU (1 << 2)
|
||||
|
||||
/*
|
||||
* PMU filter structure. Describe a range of events with a particular
|
||||
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
|
||||
|
@ -140,12 +140,42 @@ void dump_cpu_features(void)
|
||||
pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps);
|
||||
}
|
||||
|
||||
#define __ARM64_MAX_POSITIVE(reg, field) \
|
||||
((reg##_##field##_SIGNED ? \
|
||||
BIT(reg##_##field##_WIDTH - 1) : \
|
||||
BIT(reg##_##field##_WIDTH)) - 1)
|
||||
|
||||
#define __ARM64_MIN_NEGATIVE(reg, field) BIT(reg##_##field##_WIDTH - 1)
|
||||
|
||||
#define __ARM64_CPUID_FIELDS(reg, field, min_value, max_value) \
|
||||
.sys_reg = SYS_##reg, \
|
||||
.field_pos = reg##_##field##_SHIFT, \
|
||||
.field_width = reg##_##field##_WIDTH, \
|
||||
.sign = reg##_##field##_SIGNED, \
|
||||
.min_field_value = min_value, \
|
||||
.max_field_value = max_value,
|
||||
|
||||
/*
|
||||
* ARM64_CPUID_FIELDS() encodes a field with a range from min_value to
|
||||
* an implicit maximum that depends on the sign-ess of the field.
|
||||
*
|
||||
* An unsigned field will be capped at all ones, while a signed field
|
||||
* will be limited to the positive half only.
|
||||
*/
|
||||
#define ARM64_CPUID_FIELDS(reg, field, min_value) \
|
||||
.sys_reg = SYS_##reg, \
|
||||
.field_pos = reg##_##field##_SHIFT, \
|
||||
.field_width = reg##_##field##_WIDTH, \
|
||||
.sign = reg##_##field##_SIGNED, \
|
||||
.min_field_value = reg##_##field##_##min_value,
|
||||
__ARM64_CPUID_FIELDS(reg, field, \
|
||||
SYS_FIELD_VALUE(reg, field, min_value), \
|
||||
__ARM64_MAX_POSITIVE(reg, field))
|
||||
|
||||
/*
|
||||
* ARM64_CPUID_FIELDS_NEG() encodes a field with a range from an
|
||||
* implicit minimal value to max_value. This should be used when
|
||||
* matching a non-implemented property.
|
||||
*/
|
||||
#define ARM64_CPUID_FIELDS_NEG(reg, field, max_value) \
|
||||
__ARM64_CPUID_FIELDS(reg, field, \
|
||||
__ARM64_MIN_NEGATIVE(reg, field), \
|
||||
SYS_FIELD_VALUE(reg, field, max_value))
|
||||
|
||||
#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
|
||||
{ \
|
||||
@ -440,6 +470,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = {
|
||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_ctr[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
|
||||
@ -764,6 +799,7 @@ static const struct __ftr_reg_entry {
|
||||
ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
|
||||
&id_aa64mmfr2_override),
|
||||
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
|
||||
ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
|
||||
|
||||
/* Op1 = 1, CRn = 0, CRm = 0 */
|
||||
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
|
||||
@ -959,7 +995,8 @@ static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
||||
pr_warn("%s[%d:%d]: %s to %llx\n",
|
||||
reg->name,
|
||||
ftrp->shift + ftrp->width - 1,
|
||||
ftrp->shift, str, tmp);
|
||||
ftrp->shift, str,
|
||||
tmp & (BIT(ftrp->width) - 1));
|
||||
} else if ((ftr_mask & reg->override->val) == ftr_mask) {
|
||||
reg->override->val &= ~ftr_mask;
|
||||
pr_warn("%s[%d:%d]: impossible override, ignored\n",
|
||||
@ -1088,6 +1125,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64MMFR4_EL1, info->reg_id_aa64mmfr4);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
|
||||
init_cpu_ftr_reg(SYS_ID_AA64PFR2_EL1, info->reg_id_aa64pfr2);
|
||||
@ -1470,6 +1508,7 @@ u64 __read_sysreg_by_encoding(u32 sys_id)
|
||||
read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64MMFR3_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64MMFR4_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
|
||||
read_sysreg_case(SYS_ID_AA64ISAR2_EL1);
|
||||
@ -1504,11 +1543,28 @@ has_always(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
static bool
|
||||
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
int val = cpuid_feature_extract_field_width(reg, entry->field_pos,
|
||||
entry->field_width,
|
||||
entry->sign);
|
||||
int val, min, max;
|
||||
u64 tmp;
|
||||
|
||||
return val >= entry->min_field_value;
|
||||
val = cpuid_feature_extract_field_width(reg, entry->field_pos,
|
||||
entry->field_width,
|
||||
entry->sign);
|
||||
|
||||
tmp = entry->min_field_value;
|
||||
tmp <<= entry->field_pos;
|
||||
|
||||
min = cpuid_feature_extract_field_width(tmp, entry->field_pos,
|
||||
entry->field_width,
|
||||
entry->sign);
|
||||
|
||||
tmp = entry->max_field_value;
|
||||
tmp <<= entry->field_pos;
|
||||
|
||||
max = cpuid_feature_extract_field_width(tmp, entry->field_pos,
|
||||
entry->field_width,
|
||||
entry->sign);
|
||||
|
||||
return val >= min && val <= max;
|
||||
}
|
||||
|
||||
static u64
|
||||
@ -1752,6 +1808,28 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
return !meltdown_safe;
|
||||
}
|
||||
|
||||
static bool has_nv1(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
/*
|
||||
* Although the Apple M2 family appears to support NV1, the
|
||||
* PTW barfs on the nVHE EL2 S1 page table format. Pretend
|
||||
* that it doesn't support NV1 at all.
|
||||
*/
|
||||
static const struct midr_range nv1_ni_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
|
||||
MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
|
||||
{}
|
||||
};
|
||||
|
||||
return (__system_matches_cap(ARM64_HAS_NESTED_VIRT) &&
|
||||
!(has_cpuid_feature(entry, scope) ||
|
||||
is_midr_in_range_list(read_cpuid_id(), nv1_ni_list)));
|
||||
}
|
||||
|
||||
#if defined(ID_AA64MMFR0_EL1_TGRAN_LPA2) && defined(ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_LPA2)
|
||||
static bool has_lpa2_at_stage1(u64 mmfr0)
|
||||
{
|
||||
@ -2776,6 +2854,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
#endif
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.desc = "NV1",
|
||||
.capability = ARM64_HAS_HCR_NV1,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_nv1,
|
||||
ARM64_CPUID_FIELDS_NEG(ID_AA64MMFR4_EL1, E2H0, NI_NV1)
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
@ -463,6 +463,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
||||
info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
|
||||
info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
|
||||
info->reg_id_aa64mmfr3 = read_cpuid(ID_AA64MMFR3_EL1);
|
||||
info->reg_id_aa64mmfr4 = read_cpuid(ID_AA64MMFR4_EL1);
|
||||
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
|
||||
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
|
||||
info->reg_id_aa64pfr2 = read_cpuid(ID_AA64PFR2_EL1);
|
||||
|
@ -304,25 +304,32 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
|
||||
|
||||
/*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
|
||||
* making it impossible to start in nVHE mode. Is that
|
||||
* compliant with the architecture? Absolutely not!
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x0, SYS_ID_AA64MMFR4_EL1
|
||||
ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
mrs x0, hcr_el2
|
||||
and x0, x0, #HCR_E2H
|
||||
cbz x0, 1f
|
||||
|
||||
cbz x0, 2f
|
||||
1:
|
||||
/* Set a sane SCTLR_EL1, the VHE way */
|
||||
pre_disable_mmu_workaround
|
||||
msr_s SYS_SCTLR_EL12, x1
|
||||
mov x2, #BOOT_CPU_FLAG_E2H
|
||||
b 2f
|
||||
b 3f
|
||||
|
||||
1:
|
||||
2:
|
||||
pre_disable_mmu_workaround
|
||||
msr sctlr_el1, x1
|
||||
mov x2, xzr
|
||||
2:
|
||||
3:
|
||||
__init_el2_nvhe_prepare_eret
|
||||
|
||||
mov w0, #BOOT_CPU_MODE_EL2
|
||||
|
@ -19,7 +19,6 @@ if VIRTUALIZATION
|
||||
|
||||
menuconfig KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
select KVM_COMMON
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
@ -33,12 +32,11 @@ menuconfig KVM
|
||||
select HAVE_KVM_MSI
|
||||
select HAVE_KVM_IRQCHIP
|
||||
select HAVE_KVM_IRQ_ROUTING
|
||||
select IRQ_BYPASS_MANAGER
|
||||
select HAVE_KVM_IRQ_BYPASS
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
select HAVE_KVM_VCPU_RUN_PID_CHANGE
|
||||
select SCHED_INFO
|
||||
select GUEST_PERF_EVENTS if PERF_EVENTS
|
||||
select XARRAY_MULTI
|
||||
help
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
@ -67,4 +65,15 @@ config PROTECTED_NVHE_STACKTRACE
|
||||
|
||||
If unsure, or not using protected nVHE (pKVM), say N.
|
||||
|
||||
config KVM_ARM64_RES_BITS_PARANOIA
|
||||
bool "Build-time check of RES0/RES1 bits"
|
||||
depends on KVM
|
||||
default n
|
||||
help
|
||||
Say Y here to validate that KVM's knowledge of most system
|
||||
registers' RES0/RES1 bits matches when the rest of the kernel
|
||||
defines. Expect the build to fail badly if you enable this.
|
||||
|
||||
Just say N.
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
/*
|
||||
* The virtual offset behaviour is "interresting", as it
|
||||
* The virtual offset behaviour is "interesting", as it
|
||||
* always applies when HCR_EL2.E2H==0, but only when
|
||||
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
|
||||
* track E2H when putting the HV timer in "direct" mode.
|
||||
|
@ -190,6 +190,10 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
|
||||
{
|
||||
kvm_sys_regs_create_debugfs(kvm);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_destroy_vm - destroy the VM data structure
|
||||
@ -206,6 +210,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
|
||||
pkvm_destroy_hyp_vm(kvm);
|
||||
|
||||
kfree(kvm->arch.mpidr_data);
|
||||
kfree(kvm->arch.sysreg_masks);
|
||||
kvm_destroy_vcpus(kvm);
|
||||
|
||||
kvm_unshare_hyp(kvm, kvm + 1);
|
||||
@ -674,6 +679,12 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This needs to happen after NV has imposed its own restrictions on
|
||||
* the feature set
|
||||
*/
|
||||
kvm_init_sysreg(vcpu);
|
||||
|
||||
ret = kvm_timer_enable(vcpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -2591,7 +2602,8 @@ static __init int kvm_arm_init(void)
|
||||
} else if (in_hyp_mode) {
|
||||
kvm_info("VHE mode initialized successfully\n");
|
||||
} else {
|
||||
kvm_info("Hyp mode initialized successfully\n");
|
||||
char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
|
||||
kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
|
||||
}
|
||||
|
||||
/*
|
||||
|
125
arch/arm64/kvm/check-res-bits.h
Normal file
125
arch/arm64/kvm/check-res-bits.h
Normal file
@ -0,0 +1,125 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2024 - Google LLC
|
||||
* Author: Marc Zyngier <maz@kernel.org>
|
||||
*/
|
||||
|
||||
#include <asm/sysreg-defs.h>
|
||||
|
||||
/*
|
||||
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
*
|
||||
* If any of these BUILD_BUG_ON() fails, that's because some bits that
|
||||
* were reserved have gained some other meaning, and KVM needs to know
|
||||
* about those.
|
||||
*
|
||||
* In such case, do *NOT* blindly change the assertion so that it
|
||||
* passes, but also teach the rest of the code about the actual
|
||||
* change.
|
||||
*
|
||||
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
|
||||
*/
|
||||
static inline void check_res_bits(void)
|
||||
{
|
||||
#ifdef CONFIG_KVM_ARM64_RES_BITS_PARANOIA
|
||||
|
||||
BUILD_BUG_ON(OSDTRRX_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(MDCCINT_EL1_RES0 != (GENMASK_ULL(63, 31) | GENMASK_ULL(28, 0)));
|
||||
BUILD_BUG_ON(MDSCR_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(28, 28) | GENMASK_ULL(25, 24) | GENMASK_ULL(20, 20) | GENMASK_ULL(18, 16) | GENMASK_ULL(11, 7) | GENMASK_ULL(5, 1)));
|
||||
BUILD_BUG_ON(OSDTRTX_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(OSECCR_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(OSLAR_EL1_RES0 != (GENMASK_ULL(63, 1)));
|
||||
BUILD_BUG_ON(ID_PFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_PFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_DFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_AFR0_EL1_RES0 != (GENMASK_ULL(63, 16)));
|
||||
BUILD_BUG_ON(ID_MMFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_MMFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_MMFR2_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_MMFR3_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_ISAR0_EL1_RES0 != (GENMASK_ULL(63, 28)));
|
||||
BUILD_BUG_ON(ID_ISAR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_ISAR2_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_ISAR3_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_ISAR4_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_ISAR5_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(23, 20)));
|
||||
BUILD_BUG_ON(ID_ISAR6_EL1_RES0 != (GENMASK_ULL(63, 28)));
|
||||
BUILD_BUG_ON(ID_MMFR4_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(MVFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(MVFR1_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(MVFR2_EL1_RES0 != (GENMASK_ULL(63, 8)));
|
||||
BUILD_BUG_ON(ID_PFR2_EL1_RES0 != (GENMASK_ULL(63, 12)));
|
||||
BUILD_BUG_ON(ID_DFR1_EL1_RES0 != (GENMASK_ULL(63, 8)));
|
||||
BUILD_BUG_ON(ID_MMFR5_EL1_RES0 != (GENMASK_ULL(63, 8)));
|
||||
BUILD_BUG_ON(ID_AA64PFR1_EL1_RES0 != (GENMASK_ULL(23, 20)));
|
||||
BUILD_BUG_ON(ID_AA64PFR2_EL1_RES0 != (GENMASK_ULL(63, 36) | GENMASK_ULL(31, 12)));
|
||||
BUILD_BUG_ON(ID_AA64ZFR0_EL1_RES0 != (GENMASK_ULL(63, 60) | GENMASK_ULL(51, 48) | GENMASK_ULL(39, 36) | GENMASK_ULL(31, 28) | GENMASK_ULL(15, 8)));
|
||||
BUILD_BUG_ON(ID_AA64SMFR0_EL1_RES0 != (GENMASK_ULL(62, 61) | GENMASK_ULL(51, 49) | GENMASK_ULL(31, 31) | GENMASK_ULL(27, 0)));
|
||||
BUILD_BUG_ON(ID_AA64FPFR0_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 2)));
|
||||
BUILD_BUG_ON(ID_AA64DFR0_EL1_RES0 != (GENMASK_ULL(27, 24) | GENMASK_ULL(19, 16)));
|
||||
BUILD_BUG_ON(ID_AA64DFR1_EL1_RES0 != (GENMASK_ULL(63, 0)));
|
||||
BUILD_BUG_ON(ID_AA64AFR0_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(ID_AA64AFR1_EL1_RES0 != (GENMASK_ULL(63, 0)));
|
||||
BUILD_BUG_ON(ID_AA64ISAR0_EL1_RES0 != (GENMASK_ULL(3, 0)));
|
||||
BUILD_BUG_ON(ID_AA64ISAR2_EL1_RES0 != (GENMASK_ULL(47, 44)));
|
||||
BUILD_BUG_ON(ID_AA64ISAR3_EL1_RES0 != (GENMASK_ULL(63, 16)));
|
||||
BUILD_BUG_ON(ID_AA64MMFR0_EL1_RES0 != (GENMASK_ULL(55, 48)));
|
||||
BUILD_BUG_ON(ID_AA64MMFR2_EL1_RES0 != (GENMASK_ULL(47, 44)));
|
||||
BUILD_BUG_ON(ID_AA64MMFR3_EL1_RES0 != (GENMASK_ULL(51, 48)));
|
||||
BUILD_BUG_ON(ID_AA64MMFR4_EL1_RES0 != (GENMASK_ULL(63, 40) | GENMASK_ULL(35, 28) | GENMASK_ULL(3, 0)));
|
||||
BUILD_BUG_ON(SCTLR_EL1_RES0 != (GENMASK_ULL(17, 17)));
|
||||
BUILD_BUG_ON(CPACR_ELx_RES0 != (GENMASK_ULL(63, 30) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | GENMASK_ULL(19, 18) | GENMASK_ULL(15, 0)));
|
||||
BUILD_BUG_ON(SMPRI_EL1_RES0 != (GENMASK_ULL(63, 4)));
|
||||
BUILD_BUG_ON(ZCR_ELx_RES0 != (GENMASK_ULL(63, 9)));
|
||||
BUILD_BUG_ON(SMCR_ELx_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(29, 9)));
|
||||
BUILD_BUG_ON(GCSCR_ELx_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(7, 7) | GENMASK_ULL(4, 1)));
|
||||
BUILD_BUG_ON(GCSPR_ELx_RES0 != (GENMASK_ULL(2, 0)));
|
||||
BUILD_BUG_ON(GCSCRE0_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(7, 6) | GENMASK_ULL(4, 1)));
|
||||
BUILD_BUG_ON(ALLINT_RES0 != (GENMASK_ULL(63, 14) | GENMASK_ULL(12, 0)));
|
||||
BUILD_BUG_ON(PMSCR_EL1_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2)));
|
||||
BUILD_BUG_ON(PMSICR_EL1_RES0 != (GENMASK_ULL(55, 32)));
|
||||
BUILD_BUG_ON(PMSIRR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(7, 1)));
|
||||
BUILD_BUG_ON(PMSFCR_EL1_RES0 != (GENMASK_ULL(63, 19) | GENMASK_ULL(15, 4)));
|
||||
BUILD_BUG_ON(PMSLATFR_EL1_RES0 != (GENMASK_ULL(63, 16)));
|
||||
BUILD_BUG_ON(PMSIDR_EL1_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(7, 7)));
|
||||
BUILD_BUG_ON(PMBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 6) | GENMASK_ULL(4, 3)));
|
||||
BUILD_BUG_ON(PMBSR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(25, 20)));
|
||||
BUILD_BUG_ON(PMBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6)));
|
||||
BUILD_BUG_ON(CONTEXTIDR_ELx_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(CCSIDR_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(CLIDR_EL1_RES0 != (GENMASK_ULL(63, 47)));
|
||||
BUILD_BUG_ON(CCSIDR2_EL1_RES0 != (GENMASK_ULL(63, 24)));
|
||||
BUILD_BUG_ON(GMID_EL1_RES0 != (GENMASK_ULL(63, 4)));
|
||||
BUILD_BUG_ON(SMIDR_EL1_RES0 != (GENMASK_ULL(63, 32) | GENMASK_ULL(14, 12)));
|
||||
BUILD_BUG_ON(CSSELR_EL1_RES0 != (GENMASK_ULL(63, 5)));
|
||||
BUILD_BUG_ON(CTR_EL0_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(30, 30) | GENMASK_ULL(13, 4)));
|
||||
BUILD_BUG_ON(CTR_EL0_RES1 != (GENMASK_ULL(31, 31)));
|
||||
BUILD_BUG_ON(DCZID_EL0_RES0 != (GENMASK_ULL(63, 5)));
|
||||
BUILD_BUG_ON(SVCR_RES0 != (GENMASK_ULL(63, 2)));
|
||||
BUILD_BUG_ON(FPMR_RES0 != (GENMASK_ULL(63, 38) | GENMASK_ULL(23, 23) | GENMASK_ULL(13, 9)));
|
||||
BUILD_BUG_ON(HFGxTR_EL2_RES0 != (GENMASK_ULL(51, 51)));
|
||||
BUILD_BUG_ON(HFGITR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(61, 61)));
|
||||
BUILD_BUG_ON(HDFGRTR_EL2_RES0 != (GENMASK_ULL(49, 49) | GENMASK_ULL(42, 42) | GENMASK_ULL(39, 38) | GENMASK_ULL(21, 20) | GENMASK_ULL(8, 8)));
|
||||
BUILD_BUG_ON(HDFGWTR_EL2_RES0 != (GENMASK_ULL(63, 63) | GENMASK_ULL(59, 58) | GENMASK_ULL(51, 51) | GENMASK_ULL(47, 47) | GENMASK_ULL(43, 43) | GENMASK_ULL(40, 38) | GENMASK_ULL(34, 34) | GENMASK_ULL(30, 30) | GENMASK_ULL(22, 22) | GENMASK_ULL(9, 9) | GENMASK_ULL(6, 6)));
|
||||
BUILD_BUG_ON(HAFGRTR_EL2_RES0 != (GENMASK_ULL(63, 50) | GENMASK_ULL(16, 5)));
|
||||
BUILD_BUG_ON(HCRX_EL2_RES0 != (GENMASK_ULL(63, 25) | GENMASK_ULL(13, 12)));
|
||||
BUILD_BUG_ON(DACR32_EL2_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(PMSCR_EL2_RES0 != (GENMASK_ULL(63, 8) | GENMASK_ULL(2, 2)));
|
||||
BUILD_BUG_ON(TCR2_EL1x_RES0 != (GENMASK_ULL(63, 16) | GENMASK_ULL(13, 12) | GENMASK_ULL(9, 6)));
|
||||
BUILD_BUG_ON(TCR2_EL2_RES0 != (GENMASK_ULL(63, 16)));
|
||||
BUILD_BUG_ON(LORSA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 1)));
|
||||
BUILD_BUG_ON(LOREA_EL1_RES0 != (GENMASK_ULL(63, 52) | GENMASK_ULL(15, 0)));
|
||||
BUILD_BUG_ON(LORN_EL1_RES0 != (GENMASK_ULL(63, 8)));
|
||||
BUILD_BUG_ON(LORC_EL1_RES0 != (GENMASK_ULL(63, 10) | GENMASK_ULL(1, 1)));
|
||||
BUILD_BUG_ON(LORID_EL1_RES0 != (GENMASK_ULL(63, 24) | GENMASK_ULL(15, 8)));
|
||||
BUILD_BUG_ON(ISR_EL1_RES0 != (GENMASK_ULL(63, 11) | GENMASK_ULL(5, 0)));
|
||||
BUILD_BUG_ON(ICC_NMIAR1_EL1_RES0 != (GENMASK_ULL(63, 24)));
|
||||
BUILD_BUG_ON(TRBLIMITR_EL1_RES0 != (GENMASK_ULL(11, 7)));
|
||||
BUILD_BUG_ON(TRBBASER_EL1_RES0 != (GENMASK_ULL(11, 0)));
|
||||
BUILD_BUG_ON(TRBSR_EL1_RES0 != (GENMASK_ULL(63, 56) | GENMASK_ULL(25, 24) | GENMASK_ULL(19, 19) | GENMASK_ULL(16, 16)));
|
||||
BUILD_BUG_ON(TRBMAR_EL1_RES0 != (GENMASK_ULL(63, 12)));
|
||||
BUILD_BUG_ON(TRBTRG_EL1_RES0 != (GENMASK_ULL(63, 32)));
|
||||
BUILD_BUG_ON(TRBIDR_EL1_RES0 != (GENMASK_ULL(63, 12) | GENMASK_ULL(7, 6)));
|
||||
|
||||
#endif
|
||||
}
|
@ -23,7 +23,7 @@
|
||||
|
||||
static DEFINE_PER_CPU(u64, mdcr_el2);
|
||||
|
||||
/**
|
||||
/*
|
||||
* save/restore_guest_debug_regs
|
||||
*
|
||||
* For some debug operations we need to tweak some guest registers. As
|
||||
@ -143,6 +143,7 @@ void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
|
||||
|
||||
/**
|
||||
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
|
||||
* @vcpu: the vcpu pointer
|
||||
*/
|
||||
|
||||
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
|
||||
|
@ -427,12 +427,14 @@ static const complex_condition_check ccc[] = {
|
||||
* [19:14] bit number in the FGT register (6 bits)
|
||||
* [20] trap polarity (1 bit)
|
||||
* [25:21] FG filter (5 bits)
|
||||
* [62:26] Unused (37 bits)
|
||||
* [35:26] Main SysReg table index (10 bits)
|
||||
* [62:36] Unused (27 bits)
|
||||
* [63] RES0 - Must be zero, as lost on insertion in the xarray
|
||||
*/
|
||||
#define TC_CGT_BITS 10
|
||||
#define TC_FGT_BITS 4
|
||||
#define TC_FGF_BITS 5
|
||||
#define TC_SRI_BITS 10
|
||||
|
||||
union trap_config {
|
||||
u64 val;
|
||||
@ -442,7 +444,8 @@ union trap_config {
|
||||
unsigned long bit:6; /* Bit number */
|
||||
unsigned long pol:1; /* Polarity */
|
||||
unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
|
||||
unsigned long unused:37; /* Unused, should be zero */
|
||||
unsigned long sri:TC_SRI_BITS; /* SysReg Index */
|
||||
unsigned long unused:27; /* Unused, should be zero */
|
||||
unsigned long mbz:1; /* Must Be Zero */
|
||||
};
|
||||
};
|
||||
@ -1006,18 +1009,6 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
|
||||
|
||||
static DEFINE_XARRAY(sr_forward_xa);
|
||||
|
||||
enum fgt_group_id {
|
||||
__NO_FGT_GROUP__,
|
||||
HFGxTR_GROUP,
|
||||
HDFGRTR_GROUP,
|
||||
HDFGWTR_GROUP,
|
||||
HFGITR_GROUP,
|
||||
HAFGRTR_GROUP,
|
||||
|
||||
/* Must be last */
|
||||
__NR_FGT_GROUP_IDS__
|
||||
};
|
||||
|
||||
enum fg_filter_id {
|
||||
__NO_FGF__,
|
||||
HCRX_FGTnXS,
|
||||
@ -1757,6 +1748,28 @@ static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
|
||||
err);
|
||||
}
|
||||
|
||||
static u32 encoding_next(u32 encoding)
|
||||
{
|
||||
u8 op0, op1, crn, crm, op2;
|
||||
|
||||
op0 = sys_reg_Op0(encoding);
|
||||
op1 = sys_reg_Op1(encoding);
|
||||
crn = sys_reg_CRn(encoding);
|
||||
crm = sys_reg_CRm(encoding);
|
||||
op2 = sys_reg_Op2(encoding);
|
||||
|
||||
if (op2 < Op2_mask)
|
||||
return sys_reg(op0, op1, crn, crm, op2 + 1);
|
||||
if (crm < CRm_mask)
|
||||
return sys_reg(op0, op1, crn, crm + 1, 0);
|
||||
if (crn < CRn_mask)
|
||||
return sys_reg(op0, op1, crn + 1, 0, 0);
|
||||
if (op1 < Op1_mask)
|
||||
return sys_reg(op0, op1 + 1, 0, 0, 0);
|
||||
|
||||
return sys_reg(op0 + 1, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
int __init populate_nv_trap_config(void)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1775,23 +1788,18 @@ int __init populate_nv_trap_config(void)
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (cgt->encoding != cgt->end) {
|
||||
prev = xa_store_range(&sr_forward_xa,
|
||||
cgt->encoding, cgt->end,
|
||||
xa_mk_value(cgt->tc.val),
|
||||
GFP_KERNEL);
|
||||
} else {
|
||||
prev = xa_store(&sr_forward_xa, cgt->encoding,
|
||||
for (u32 enc = cgt->encoding; enc <= cgt->end; enc = encoding_next(enc)) {
|
||||
prev = xa_store(&sr_forward_xa, enc,
|
||||
xa_mk_value(cgt->tc.val), GFP_KERNEL);
|
||||
if (prev && !xa_is_err(prev)) {
|
||||
ret = -EINVAL;
|
||||
print_nv_trap_error(cgt, "Duplicate CGT", ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (xa_is_err(prev)) {
|
||||
ret = xa_err(prev);
|
||||
print_nv_trap_error(cgt, "Failed CGT insertion", ret);
|
||||
if (xa_is_err(prev)) {
|
||||
ret = xa_err(prev);
|
||||
print_nv_trap_error(cgt, "Failed CGT insertion", ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1804,6 +1812,7 @@ int __init populate_nv_trap_config(void)
|
||||
for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
|
||||
const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
|
||||
union trap_config tc;
|
||||
void *prev;
|
||||
|
||||
if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
|
||||
ret = -EINVAL;
|
||||
@ -1818,8 +1827,13 @@ int __init populate_nv_trap_config(void)
|
||||
}
|
||||
|
||||
tc.val |= fgt->tc.val;
|
||||
xa_store(&sr_forward_xa, fgt->encoding,
|
||||
xa_mk_value(tc.val), GFP_KERNEL);
|
||||
prev = xa_store(&sr_forward_xa, fgt->encoding,
|
||||
xa_mk_value(tc.val), GFP_KERNEL);
|
||||
|
||||
if (xa_is_err(prev)) {
|
||||
ret = xa_err(prev);
|
||||
print_nv_trap_error(fgt, "Failed FGT insertion", ret);
|
||||
}
|
||||
}
|
||||
|
||||
kvm_info("nv: %ld fine grained trap handlers\n",
|
||||
@ -1845,6 +1859,38 @@ int __init populate_nv_trap_config(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __init populate_sysreg_config(const struct sys_reg_desc *sr,
|
||||
unsigned int idx)
|
||||
{
|
||||
union trap_config tc;
|
||||
u32 encoding;
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
* 0 is a valid value for the index, but not for the storage.
|
||||
* We'll store (idx+1), so check against an offset'd limit.
|
||||
*/
|
||||
if (idx >= (BIT(TC_SRI_BITS) - 1)) {
|
||||
kvm_err("sysreg %s (%d) out of range\n", sr->name, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
encoding = sys_reg(sr->Op0, sr->Op1, sr->CRn, sr->CRm, sr->Op2);
|
||||
tc = get_trap_config(encoding);
|
||||
|
||||
if (tc.sri) {
|
||||
kvm_err("sysreg %s (%d) duplicate entry (%d)\n",
|
||||
sr->name, idx - 1, tc.sri);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
tc.sri = idx + 1;
|
||||
ret = xa_store(&sr_forward_xa, encoding,
|
||||
xa_mk_value(tc.val), GFP_KERNEL);
|
||||
|
||||
return xa_err(ret);
|
||||
}
|
||||
|
||||
static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
|
||||
const struct trap_bits *tb)
|
||||
{
|
||||
@ -1892,20 +1938,64 @@ static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
|
||||
return __compute_trap_behaviour(vcpu, tc.cgt, b);
|
||||
}
|
||||
|
||||
static bool check_fgt_bit(u64 val, const union trap_config tc)
|
||||
static u64 kvm_get_sysreg_res0(struct kvm *kvm, enum vcpu_sysreg sr)
|
||||
{
|
||||
return ((val >> tc.bit) & 1) == tc.pol;
|
||||
struct kvm_sysreg_masks *masks;
|
||||
|
||||
/* Only handle the VNCR-backed regs for now */
|
||||
if (sr < __VNCR_START__)
|
||||
return 0;
|
||||
|
||||
masks = kvm->arch.sysreg_masks;
|
||||
|
||||
return masks->mask[sr - __VNCR_START__].res0;
|
||||
}
|
||||
|
||||
#define sanitised_sys_reg(vcpu, reg) \
|
||||
({ \
|
||||
u64 __val; \
|
||||
__val = __vcpu_sys_reg(vcpu, reg); \
|
||||
__val &= ~__ ## reg ## _RES0; \
|
||||
(__val); \
|
||||
})
|
||||
static bool check_fgt_bit(struct kvm *kvm, bool is_read,
|
||||
u64 val, const union trap_config tc)
|
||||
{
|
||||
enum vcpu_sysreg sr;
|
||||
|
||||
bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
if (tc.pol)
|
||||
return (val & BIT(tc.bit));
|
||||
|
||||
/*
|
||||
* FGTs with negative polarities are an absolute nightmare, as
|
||||
* we need to evaluate the bit in the light of the feature
|
||||
* that defines it. WTF were they thinking?
|
||||
*
|
||||
* So let's check if the bit has been earmarked as RES0, as
|
||||
* this indicates an unimplemented feature.
|
||||
*/
|
||||
if (val & BIT(tc.bit))
|
||||
return false;
|
||||
|
||||
switch ((enum fgt_group_id)tc.fgt) {
|
||||
case HFGxTR_GROUP:
|
||||
sr = is_read ? HFGRTR_EL2 : HFGWTR_EL2;
|
||||
break;
|
||||
|
||||
case HDFGRTR_GROUP:
|
||||
sr = is_read ? HDFGRTR_EL2 : HDFGWTR_EL2;
|
||||
break;
|
||||
|
||||
case HAFGRTR_GROUP:
|
||||
sr = HAFGRTR_EL2;
|
||||
break;
|
||||
|
||||
case HFGITR_GROUP:
|
||||
sr = HFGITR_EL2;
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ONCE(1, "Unhandled FGT group");
|
||||
return false;
|
||||
}
|
||||
|
||||
return !(kvm_get_sysreg_res0(kvm, sr) & BIT(tc.bit));
|
||||
}
|
||||
|
||||
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index)
|
||||
{
|
||||
union trap_config tc;
|
||||
enum trap_behaviour b;
|
||||
@ -1913,9 +2003,6 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
u32 sysreg;
|
||||
u64 esr, val;
|
||||
|
||||
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
|
||||
return false;
|
||||
|
||||
esr = kvm_vcpu_get_esr(vcpu);
|
||||
sysreg = esr_sys64_to_sysreg(esr);
|
||||
is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
|
||||
@ -1926,13 +2013,27 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
* A value of 0 for the whole entry means that we know nothing
|
||||
* for this sysreg, and that it cannot be re-injected into the
|
||||
* nested hypervisor. In this situation, let's cut it short.
|
||||
*
|
||||
* Note that ultimately, we could also make use of the xarray
|
||||
* to store the index of the sysreg in the local descriptor
|
||||
* array, avoiding another search... Hint, hint...
|
||||
*/
|
||||
if (!tc.val)
|
||||
return false;
|
||||
goto local;
|
||||
|
||||
/*
|
||||
* If a sysreg can be trapped using a FGT, first check whether we
|
||||
* trap for the purpose of forbidding the feature. In that case,
|
||||
* inject an UNDEF.
|
||||
*/
|
||||
if (tc.fgt != __NO_FGT_GROUP__ &&
|
||||
(vcpu->kvm->arch.fgu[tc.fgt] & BIT(tc.bit))) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we're not nesting, immediately return to the caller, with the
|
||||
* sysreg index, should we have it.
|
||||
*/
|
||||
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
|
||||
goto local;
|
||||
|
||||
switch ((enum fgt_group_id)tc.fgt) {
|
||||
case __NO_FGT_GROUP__:
|
||||
@ -1940,25 +2041,24 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
|
||||
case HFGxTR_GROUP:
|
||||
if (is_read)
|
||||
val = sanitised_sys_reg(vcpu, HFGRTR_EL2);
|
||||
val = __vcpu_sys_reg(vcpu, HFGRTR_EL2);
|
||||
else
|
||||
val = sanitised_sys_reg(vcpu, HFGWTR_EL2);
|
||||
val = __vcpu_sys_reg(vcpu, HFGWTR_EL2);
|
||||
break;
|
||||
|
||||
case HDFGRTR_GROUP:
|
||||
case HDFGWTR_GROUP:
|
||||
if (is_read)
|
||||
val = sanitised_sys_reg(vcpu, HDFGRTR_EL2);
|
||||
val = __vcpu_sys_reg(vcpu, HDFGRTR_EL2);
|
||||
else
|
||||
val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
|
||||
val = __vcpu_sys_reg(vcpu, HDFGWTR_EL2);
|
||||
break;
|
||||
|
||||
case HAFGRTR_GROUP:
|
||||
val = sanitised_sys_reg(vcpu, HAFGRTR_EL2);
|
||||
val = __vcpu_sys_reg(vcpu, HAFGRTR_EL2);
|
||||
break;
|
||||
|
||||
case HFGITR_GROUP:
|
||||
val = sanitised_sys_reg(vcpu, HFGITR_EL2);
|
||||
val = __vcpu_sys_reg(vcpu, HFGITR_EL2);
|
||||
switch (tc.fgf) {
|
||||
u64 tmp;
|
||||
|
||||
@ -1966,7 +2066,7 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case HCRX_FGTnXS:
|
||||
tmp = sanitised_sys_reg(vcpu, HCRX_EL2);
|
||||
tmp = __vcpu_sys_reg(vcpu, HCRX_EL2);
|
||||
if (tmp & HCRX_EL2_FGTnXS)
|
||||
tc.fgt = __NO_FGT_GROUP__;
|
||||
}
|
||||
@ -1975,10 +2075,11 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
case __NR_FGT_GROUP_IDS__:
|
||||
/* Something is really wrong, bail out */
|
||||
WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
|
||||
return false;
|
||||
goto local;
|
||||
}
|
||||
|
||||
if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc))
|
||||
if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(vcpu->kvm, is_read,
|
||||
val, tc))
|
||||
goto inject;
|
||||
|
||||
b = compute_trap_behaviour(vcpu, tc);
|
||||
@ -1987,6 +2088,26 @@ bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
|
||||
((b & BEHAVE_FORWARD_WRITE) && !is_read))
|
||||
goto inject;
|
||||
|
||||
local:
|
||||
if (!tc.sri) {
|
||||
struct sys_reg_params params;
|
||||
|
||||
params = esr_sys64_to_params(esr);
|
||||
|
||||
/*
|
||||
* Check for the IMPDEF range, as per DDI0487 J.a,
|
||||
* D18.3.2 Reserved encodings for IMPLEMENTATION
|
||||
* DEFINED registers.
|
||||
*/
|
||||
if (!(params.Op0 == 3 && (params.CRn & 0b1011) == 0b1011))
|
||||
print_sys_reg_msg(¶ms,
|
||||
"Unsupported guest access at: %lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
kvm_inject_undefined(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
*sr_index = tc.sri - 1;
|
||||
return false;
|
||||
|
||||
inject:
|
||||
|
@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/*
|
||||
* Called just before entering the guest once we are no longer preemptable
|
||||
* Called just before entering the guest once we are no longer preemptible
|
||||
* and interrupts are disabled. If we have managed to run anything using
|
||||
* FP while we were preemptible (such as off the back of an interrupt),
|
||||
* then neither the host nor the guest own the FP hardware (and it was the
|
||||
|
@ -711,6 +711,7 @@ static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu,
|
||||
|
||||
/**
|
||||
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
|
||||
* @vcpu: the vCPU pointer
|
||||
*
|
||||
* This is for all registers.
|
||||
*/
|
||||
@ -729,6 +730,8 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
||||
|
||||
/**
|
||||
* kvm_arm_copy_reg_indices - get indices of all registers.
|
||||
* @vcpu: the vCPU pointer
|
||||
* @uindices: register list to copy
|
||||
*
|
||||
* We do core registers right here, then we append system regs.
|
||||
*/
|
||||
@ -902,8 +905,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
||||
|
||||
/**
|
||||
* kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
|
||||
* @kvm: pointer to the KVM struct
|
||||
* @kvm_guest_debug: the ioctl data buffer
|
||||
* @vcpu: the vCPU pointer
|
||||
* @dbg: the ioctl data buffer
|
||||
*
|
||||
* This sets up and enables the VM for guest debugging. Userspace
|
||||
* passes in a control flag to enable different debug types and
|
||||
|
@ -84,7 +84,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
|
||||
* kvm_adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* When exceptions occur while instructions are executed in Thumb IF-THEN
|
||||
@ -120,7 +120,7 @@ static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_skip_instr - skip a trapped instruction and proceed to the next
|
||||
* kvm_skip_instr32 - skip a trapped instruction and proceed to the next
|
||||
* @vcpu: The vcpu pointer
|
||||
*/
|
||||
void kvm_skip_instr32(struct kvm_vcpu *vcpu)
|
||||
|
@ -79,14 +79,48 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
|
||||
clr |= ~hfg & __ ## reg ## _nMASK; \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps_cs(vcpu, reg, clr, set) \
|
||||
#define reg_to_fgt_group_id(reg) \
|
||||
({ \
|
||||
enum fgt_group_id id; \
|
||||
switch(reg) { \
|
||||
case HFGRTR_EL2: \
|
||||
case HFGWTR_EL2: \
|
||||
id = HFGxTR_GROUP; \
|
||||
break; \
|
||||
case HFGITR_EL2: \
|
||||
id = HFGITR_GROUP; \
|
||||
break; \
|
||||
case HDFGRTR_EL2: \
|
||||
case HDFGWTR_EL2: \
|
||||
id = HDFGRTR_GROUP; \
|
||||
break; \
|
||||
case HAFGRTR_EL2: \
|
||||
id = HAFGRTR_GROUP; \
|
||||
break; \
|
||||
default: \
|
||||
BUILD_BUG_ON(1); \
|
||||
} \
|
||||
\
|
||||
id; \
|
||||
})
|
||||
|
||||
#define compute_undef_clr_set(vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
u64 hfg = kvm->arch.fgu[reg_to_fgt_group_id(reg)]; \
|
||||
set |= hfg & __ ## reg ## _MASK; \
|
||||
clr |= hfg & __ ## reg ## _nMASK; \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps_cs(hctxt, vcpu, kvm, reg, clr, set) \
|
||||
do { \
|
||||
struct kvm_cpu_context *hctxt = \
|
||||
&this_cpu_ptr(&kvm_host_data)->host_ctxt; \
|
||||
u64 c = 0, s = 0; \
|
||||
\
|
||||
ctxt_sys_reg(hctxt, reg) = read_sysreg_s(SYS_ ## reg); \
|
||||
compute_clr_set(vcpu, reg, c, s); \
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) \
|
||||
compute_clr_set(vcpu, reg, c, s); \
|
||||
\
|
||||
compute_undef_clr_set(vcpu, kvm, reg, c, s); \
|
||||
\
|
||||
s |= set; \
|
||||
c |= clr; \
|
||||
if (c || s) { \
|
||||
@ -97,8 +131,8 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define update_fgt_traps(vcpu, reg) \
|
||||
update_fgt_traps_cs(vcpu, reg, 0, 0)
|
||||
#define update_fgt_traps(hctxt, vcpu, kvm, reg) \
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, reg, 0, 0)
|
||||
|
||||
/*
|
||||
* Validate the fine grain trap masks.
|
||||
@ -122,8 +156,7 @@ static inline bool cpu_has_amu(void)
|
||||
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
|
||||
u64 r_val, w_val;
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
CHECK_FGT_MASKS(HFGRTR_EL2);
|
||||
CHECK_FGT_MASKS(HFGWTR_EL2);
|
||||
@ -136,72 +169,45 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
|
||||
ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_SME)) {
|
||||
tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
|
||||
|
||||
r_clr |= tmp;
|
||||
w_clr |= tmp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD.
|
||||
*/
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
|
||||
w_set |= HFGxTR_EL2_TCR_EL1_MASK;
|
||||
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
|
||||
compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
|
||||
compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
|
||||
}
|
||||
|
||||
/* The default to trap everything not handled or supported in KVM. */
|
||||
tmp = HFGxTR_EL2_nAMAIR2_EL1 | HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nS2POR_EL1 |
|
||||
HFGxTR_EL2_nPOR_EL1 | HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nACCDATA_EL1;
|
||||
|
||||
r_val = __HFGRTR_EL2_nMASK & ~tmp;
|
||||
r_val |= r_set;
|
||||
r_val &= ~r_clr;
|
||||
|
||||
w_val = __HFGWTR_EL2_nMASK & ~tmp;
|
||||
w_val |= w_set;
|
||||
w_val &= ~w_clr;
|
||||
|
||||
write_sysreg_s(r_val, SYS_HFGRTR_EL2);
|
||||
write_sysreg_s(w_val, SYS_HFGWTR_EL2);
|
||||
|
||||
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
|
||||
return;
|
||||
|
||||
update_fgt_traps(vcpu, HFGITR_EL2);
|
||||
update_fgt_traps(vcpu, HDFGRTR_EL2);
|
||||
update_fgt_traps(vcpu, HDFGWTR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGRTR_EL2);
|
||||
update_fgt_traps_cs(hctxt, vcpu, kvm, HFGWTR_EL2, 0,
|
||||
cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) ?
|
||||
HFGxTR_EL2_TCR_EL1_MASK : 0);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HFGITR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGRTR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HDFGWTR_EL2);
|
||||
|
||||
if (cpu_has_amu())
|
||||
update_fgt_traps(vcpu, HAFGRTR_EL2);
|
||||
update_fgt_traps(hctxt, vcpu, kvm, HAFGRTR_EL2);
|
||||
}
|
||||
|
||||
#define __deactivate_fgt(htcxt, vcpu, kvm, reg) \
|
||||
do { \
|
||||
if ((vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) || \
|
||||
kvm->arch.fgu[reg_to_fgt_group_id(reg)]) \
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, reg), \
|
||||
SYS_ ## reg); \
|
||||
} while(0)
|
||||
|
||||
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_FGT))
|
||||
return;
|
||||
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
|
||||
|
||||
if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
|
||||
return;
|
||||
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HFGRTR_EL2);
|
||||
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
|
||||
else
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HFGWTR_EL2);
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HFGITR_EL2);
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HDFGRTR_EL2);
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HDFGWTR_EL2);
|
||||
|
||||
if (cpu_has_amu())
|
||||
write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
|
||||
__deactivate_fgt(hctxt, vcpu, kvm, HAFGRTR_EL2);
|
||||
}
|
||||
|
||||
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
@ -230,7 +236,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
|
||||
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
|
||||
u64 hcrx = HCRX_GUEST_FLAGS;
|
||||
u64 hcrx = vcpu->arch.hcrx_el2;
|
||||
if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
|
||||
u64 clr = 0, set = 0;
|
||||
|
||||
|
@ -27,16 +27,34 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
|
||||
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
|
||||
}
|
||||
|
||||
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
|
||||
static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
|
||||
|
||||
if (!vcpu)
|
||||
vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
|
||||
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = ctxt_to_vcpu(ctxt);
|
||||
|
||||
return kvm_has_mte(kern_hyp_va(vcpu->kvm));
|
||||
}
|
||||
|
||||
static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_S1PIE))
|
||||
return false;
|
||||
|
||||
vcpu = ctxt_to_vcpu(ctxt);
|
||||
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1PIE, IMP);
|
||||
}
|
||||
|
||||
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
|
||||
@ -55,7 +73,7 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
|
||||
ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
|
||||
ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
|
||||
if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
|
||||
if (ctxt_has_s1pie(ctxt)) {
|
||||
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
|
||||
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
|
||||
}
|
||||
@ -131,7 +149,7 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
|
||||
if (cpus_have_final_cap(ARM64_HAS_S1PIE)) {
|
||||
if (ctxt_has_s1pie(ctxt)) {
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
|
||||
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
|
||||
}
|
||||
|
@ -31,8 +31,8 @@ static void __debug_save_spe(u64 *pmscr_el1)
|
||||
return;
|
||||
|
||||
/* Yes; save the control register and disable data generation */
|
||||
*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
|
||||
write_sysreg_s(0, SYS_PMSCR_EL1);
|
||||
*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
|
||||
write_sysreg_el1(0, SYS_PMSCR);
|
||||
isb();
|
||||
|
||||
/* Now drain all buffered data to memory */
|
||||
@ -48,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
|
||||
isb();
|
||||
|
||||
/* Re-enable data generation */
|
||||
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
|
||||
write_sysreg_el1(pmscr_el1, SYS_PMSCR);
|
||||
}
|
||||
|
||||
static void __debug_save_trace(u64 *trfcr_el1)
|
||||
@ -63,8 +63,8 @@ static void __debug_save_trace(u64 *trfcr_el1)
|
||||
* Since access to TRFCR_EL1 is trapped, the guest can't
|
||||
* modify the filtering set by the host.
|
||||
*/
|
||||
*trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
|
||||
write_sysreg_s(0, SYS_TRFCR_EL1);
|
||||
*trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
|
||||
write_sysreg_el1(0, SYS_TRFCR);
|
||||
isb();
|
||||
/* Drain the trace buffer to memory */
|
||||
tsb_csync();
|
||||
@ -76,7 +76,7 @@ static void __debug_restore_trace(u64 trfcr_el1)
|
||||
return;
|
||||
|
||||
/* Restore trace filter controls */
|
||||
write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
|
||||
write_sysreg_el1(trfcr_el1, SYS_TRFCR);
|
||||
}
|
||||
|
||||
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
|
||||
|
@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
|
||||
* u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
/* Prepare and exit to the host's panic function. */
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, lr
|
||||
|
@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
|
||||
start = hyp_memory[i].base;
|
||||
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
|
||||
/*
|
||||
* The begining of the hyp_vmemmap region for the current
|
||||
* The beginning of the hyp_vmemmap region for the current
|
||||
* memblock may already be backed by the page backing the end
|
||||
* the previous region, so avoid mapping it twice.
|
||||
*/
|
||||
@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
|
||||
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
|
||||
}
|
||||
|
||||
/* Refill our local memcache by poping pages from the one provided by the host. */
|
||||
/* Refill our local memcache by popping pages from the one provided by the host. */
|
||||
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
|
||||
struct kvm_hyp_memcache *host_mc)
|
||||
{
|
||||
|
@ -717,15 +717,29 @@ void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
|
||||
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
|
||||
kvm_pte_t *ptep)
|
||||
{
|
||||
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
|
||||
kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
|
||||
KVM_S2_MEMATTR(pgt, NORMAL);
|
||||
kvm_pte_t attr;
|
||||
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
|
||||
|
||||
switch (prot & (KVM_PGTABLE_PROT_DEVICE |
|
||||
KVM_PGTABLE_PROT_NORMAL_NC)) {
|
||||
case KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_NORMAL_NC:
|
||||
return -EINVAL;
|
||||
case KVM_PGTABLE_PROT_DEVICE:
|
||||
if (prot & KVM_PGTABLE_PROT_X)
|
||||
return -EINVAL;
|
||||
attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
|
||||
break;
|
||||
case KVM_PGTABLE_PROT_NORMAL_NC:
|
||||
if (prot & KVM_PGTABLE_PROT_X)
|
||||
return -EINVAL;
|
||||
attr = KVM_S2_MEMATTR(pgt, NORMAL_NC);
|
||||
break;
|
||||
default:
|
||||
attr = KVM_S2_MEMATTR(pgt, NORMAL);
|
||||
}
|
||||
|
||||
if (!(prot & KVM_PGTABLE_PROT_X))
|
||||
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
|
||||
else if (device)
|
||||
return -EINVAL;
|
||||
|
||||
if (prot & KVM_PGTABLE_PROT_R)
|
||||
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
|
||||
|
@ -95,7 +95,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* __vcpu_put_switch_syregs - Restore host system registers to the physical CPU
|
||||
* __vcpu_put_switch_sysregs - Restore host system registers to the physical CPU
|
||||
*
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
|
@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
|
||||
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
|
||||
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
|
||||
} else {
|
||||
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
|
||||
/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
|
||||
fsr = DFSR_FSC_EXTABT_nLPAE;
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ static void invalidate_icache_guest_page(void *va, size_t size)
|
||||
* does.
|
||||
*/
|
||||
/**
|
||||
* unmap_stage2_range -- Clear stage2 page table entries to unmap a range
|
||||
* __unmap_stage2_range -- Clear stage2 page table entries to unmap a range
|
||||
* @mmu: The KVM stage-2 MMU pointer
|
||||
* @start: The intermediate physical base address of the range to unmap
|
||||
* @size: The size of the area to unmap
|
||||
@ -1381,7 +1381,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
int ret = 0;
|
||||
bool write_fault, writable, force_pte = false;
|
||||
bool exec_fault, mte_allowed;
|
||||
bool device = false;
|
||||
bool device = false, vfio_allow_any_uc = false;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
|
||||
@ -1472,6 +1472,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
gfn = fault_ipa >> PAGE_SHIFT;
|
||||
mte_allowed = kvm_vma_mte_allowed(vma);
|
||||
|
||||
vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
|
||||
|
||||
/* Don't use the VMA after the unlock -- it may have vanished */
|
||||
vma = NULL;
|
||||
|
||||
@ -1557,10 +1559,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
if (exec_fault)
|
||||
prot |= KVM_PGTABLE_PROT_X;
|
||||
|
||||
if (device)
|
||||
prot |= KVM_PGTABLE_PROT_DEVICE;
|
||||
else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC))
|
||||
if (device) {
|
||||
if (vfio_allow_any_uc)
|
||||
prot |= KVM_PGTABLE_PROT_NORMAL_NC;
|
||||
else
|
||||
prot |= KVM_PGTABLE_PROT_DEVICE;
|
||||
} else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) {
|
||||
prot |= KVM_PGTABLE_PROT_X;
|
||||
}
|
||||
|
||||
/*
|
||||
* Under the premise of getting a FSC_PERM fault, we just need to relax
|
||||
|
@ -133,6 +133,13 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
|
||||
val |= FIELD_PREP(NV_FTR(MMFR2, TTL), 0b0001);
|
||||
break;
|
||||
|
||||
case SYS_ID_AA64MMFR4_EL1:
|
||||
val = 0;
|
||||
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
|
||||
val |= FIELD_PREP(NV_FTR(MMFR4, E2H0),
|
||||
ID_AA64MMFR4_EL1_E2H0_NI_NV1);
|
||||
break;
|
||||
|
||||
case SYS_ID_AA64DFR0_EL1:
|
||||
/* Only limited support for PMU, Debug, BPs and WPs */
|
||||
val &= (NV_FTR(DFR0, PMUVer) |
|
||||
@ -156,15 +163,280 @@ static u64 limit_nv_id_reg(u32 id, u64 val)
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *vcpu, enum vcpu_sysreg sr)
|
||||
{
|
||||
u64 v = ctxt_sys_reg(&vcpu->arch.ctxt, sr);
|
||||
struct kvm_sysreg_masks *masks;
|
||||
|
||||
masks = vcpu->kvm->arch.sysreg_masks;
|
||||
|
||||
if (masks) {
|
||||
sr -= __VNCR_START__;
|
||||
|
||||
v &= ~masks->mask[sr].res0;
|
||||
v |= masks->mask[sr].res1;
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
static void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
|
||||
{
|
||||
int i = sr - __VNCR_START__;
|
||||
|
||||
kvm->arch.sysreg_masks->mask[i].res0 = res0;
|
||||
kvm->arch.sysreg_masks->mask[i].res1 = res1;
|
||||
}
|
||||
|
||||
int kvm_init_nv_sysregs(struct kvm *kvm)
|
||||
{
|
||||
u64 res0, res1;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
if (kvm->arch.sysreg_masks)
|
||||
goto out;
|
||||
|
||||
kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
|
||||
GFP_KERNEL);
|
||||
if (!kvm->arch.sysreg_masks) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (int i = 0; i < KVM_ARM_ID_REG_NUM; i++)
|
||||
kvm->arch.id_regs[i] = limit_nv_id_reg(IDX_IDREG(i),
|
||||
kvm->arch.id_regs[i]);
|
||||
|
||||
/* VTTBR_EL2 */
|
||||
res0 = res1 = 0;
|
||||
if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
|
||||
res0 |= GENMASK(63, 56);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
|
||||
res0 |= VTTBR_CNP_BIT;
|
||||
set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
|
||||
|
||||
/* VTCR_EL2 */
|
||||
res0 = GENMASK(63, 32) | GENMASK(30, 20);
|
||||
res1 = BIT(31);
|
||||
set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
|
||||
|
||||
/* VMPIDR_EL2 */
|
||||
res0 = GENMASK(63, 40) | GENMASK(30, 24);
|
||||
res1 = BIT(31);
|
||||
set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
|
||||
|
||||
/* HCR_EL2 */
|
||||
res0 = BIT(48);
|
||||
res1 = HCR_RW;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, TWED, IMP))
|
||||
res0 |= GENMASK(63, 59);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, MTE, MTE2))
|
||||
res0 |= (HCR_TID5 | HCR_DCT | HCR_ATA);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, TTLBxS))
|
||||
res0 |= (HCR_TTLBIS | HCR_TTLBOS);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
|
||||
!kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
|
||||
res0 |= HCR_ENSCXT;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, EVT, IMP))
|
||||
res0 |= (HCR_TOCU | HCR_TICAB | HCR_TID4);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
|
||||
res0 |= HCR_AMVOFFEN;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, V1P1))
|
||||
res0 |= HCR_FIEN;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, FWB, IMP))
|
||||
res0 |= HCR_FWB;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, NV2))
|
||||
res0 |= HCR_NV2;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, NV, IMP))
|
||||
res0 |= (HCR_AT | HCR_NV1 | HCR_NV);
|
||||
if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
|
||||
__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
|
||||
res0 |= (HCR_API | HCR_APK);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TME, IMP))
|
||||
res0 |= BIT(39);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
|
||||
res0 |= (HCR_TEA | HCR_TERR);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
|
||||
res0 |= HCR_TLOR;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, E2H0, IMP))
|
||||
res1 |= HCR_E2H;
|
||||
set_sysreg_masks(kvm, HCR_EL2, res0, res1);
|
||||
|
||||
/* HCRX_EL2 */
|
||||
res0 = HCRX_EL2_RES0;
|
||||
res1 = HCRX_EL2_RES1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
|
||||
res0 |= HCRX_EL2_PACMEn;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
|
||||
res0 |= HCRX_EL2_EnFPM;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
|
||||
res0 |= HCRX_EL2_GCSEn;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
|
||||
res0 |= HCRX_EL2_EnIDCP128;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
|
||||
res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
|
||||
res0 |= HCRX_EL2_TMEA;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
|
||||
res0 |= HCRX_EL2_D128En;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
|
||||
res0 |= HCRX_EL2_PTTWI;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
|
||||
res0 |= HCRX_EL2_SCTLR2En;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP))
|
||||
res0 |= HCRX_EL2_TCR2En;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
|
||||
res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
|
||||
res0 |= HCRX_EL2_CMOW;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
|
||||
res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
|
||||
!(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
|
||||
res0 |= HCRX_EL2_SMPME;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
|
||||
res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
|
||||
res0 |= HCRX_EL2_EnASR;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
|
||||
res0 |= HCRX_EL2_EnALS;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
|
||||
res0 |= HCRX_EL2_EnAS0;
|
||||
set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
|
||||
|
||||
/* HFG[RW]TR_EL2 */
|
||||
res0 = res1 = 0;
|
||||
if (!(__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_ADDRESS) &&
|
||||
__vcpu_has_feature(&kvm->arch, KVM_ARM_VCPU_PTRAUTH_GENERIC)))
|
||||
res0 |= (HFGxTR_EL2_APDAKey | HFGxTR_EL2_APDBKey |
|
||||
HFGxTR_EL2_APGAKey | HFGxTR_EL2_APIAKey |
|
||||
HFGxTR_EL2_APIBKey);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, LO, IMP))
|
||||
res0 |= (HFGxTR_EL2_LORC_EL1 | HFGxTR_EL2_LOREA_EL1 |
|
||||
HFGxTR_EL2_LORID_EL1 | HFGxTR_EL2_LORN_EL1 |
|
||||
HFGxTR_EL2_LORSA_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, CSV2, CSV2_2) &&
|
||||
!kvm_has_feat(kvm, ID_AA64PFR1_EL1, CSV2_frac, CSV2_1p2))
|
||||
res0 |= (HFGxTR_EL2_SCXTNUM_EL1 | HFGxTR_EL2_SCXTNUM_EL0);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, GIC, IMP))
|
||||
res0 |= HFGxTR_EL2_ICC_IGRPENn_EL1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RAS, IMP))
|
||||
res0 |= (HFGxTR_EL2_ERRIDR_EL1 | HFGxTR_EL2_ERRSELR_EL1 |
|
||||
HFGxTR_EL2_ERXFR_EL1 | HFGxTR_EL2_ERXCTLR_EL1 |
|
||||
HFGxTR_EL2_ERXSTATUS_EL1 | HFGxTR_EL2_ERXMISCn_EL1 |
|
||||
HFGxTR_EL2_ERXPFGF_EL1 | HFGxTR_EL2_ERXPFGCTL_EL1 |
|
||||
HFGxTR_EL2_ERXPFGCDN_EL1 | HFGxTR_EL2_ERXADDR_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
|
||||
res0 |= HFGxTR_EL2_nACCDATA_EL1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
|
||||
res0 |= (HFGxTR_EL2_nGCS_EL0 | HFGxTR_EL2_nGCS_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP))
|
||||
res0 |= (HFGxTR_EL2_nSMPRI_EL1 | HFGxTR_EL2_nTPIDR2_EL0);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
|
||||
res0 |= HFGxTR_EL2_nRCWMASK_EL1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
|
||||
res0 |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
|
||||
res0 |= (HFGxTR_EL2_nPOR_EL0 | HFGxTR_EL2_nPOR_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S2POE, IMP))
|
||||
res0 |= HFGxTR_EL2_nS2POR_EL1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
|
||||
res0 |= (HFGxTR_EL2_nMAIR2_EL1 | HFGxTR_EL2_nAMAIR2_EL1);
|
||||
set_sysreg_masks(kvm, HFGRTR_EL2, res0 | __HFGRTR_EL2_RES0, res1);
|
||||
set_sysreg_masks(kvm, HFGWTR_EL2, res0 | __HFGWTR_EL2_RES0, res1);
|
||||
|
||||
/* HDFG[RW]TR_EL2 */
|
||||
res0 = res1 = 0;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DoubleLock, IMP))
|
||||
res0 |= HDFGRTR_EL2_OSDLR_EL1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
|
||||
res0 |= (HDFGRTR_EL2_PMEVCNTRn_EL0 | HDFGRTR_EL2_PMEVTYPERn_EL0 |
|
||||
HDFGRTR_EL2_PMCCFILTR_EL0 | HDFGRTR_EL2_PMCCNTR_EL0 |
|
||||
HDFGRTR_EL2_PMCNTEN | HDFGRTR_EL2_PMINTEN |
|
||||
HDFGRTR_EL2_PMOVS | HDFGRTR_EL2_PMSELR_EL0 |
|
||||
HDFGRTR_EL2_PMMIR_EL1 | HDFGRTR_EL2_PMUSERENR_EL0 |
|
||||
HDFGRTR_EL2_PMCEIDn_EL0);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
|
||||
res0 |= (HDFGRTR_EL2_PMBLIMITR_EL1 | HDFGRTR_EL2_PMBPTR_EL1 |
|
||||
HDFGRTR_EL2_PMBSR_EL1 | HDFGRTR_EL2_PMSCR_EL1 |
|
||||
HDFGRTR_EL2_PMSEVFR_EL1 | HDFGRTR_EL2_PMSFCR_EL1 |
|
||||
HDFGRTR_EL2_PMSICR_EL1 | HDFGRTR_EL2_PMSIDR_EL1 |
|
||||
HDFGRTR_EL2_PMSIRR_EL1 | HDFGRTR_EL2_PMSLATFR_EL1 |
|
||||
HDFGRTR_EL2_PMBIDR_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
|
||||
res0 |= (HDFGRTR_EL2_TRC | HDFGRTR_EL2_TRCAUTHSTATUS |
|
||||
HDFGRTR_EL2_TRCAUXCTLR | HDFGRTR_EL2_TRCCLAIM |
|
||||
HDFGRTR_EL2_TRCCNTVRn | HDFGRTR_EL2_TRCID |
|
||||
HDFGRTR_EL2_TRCIMSPECn | HDFGRTR_EL2_TRCOSLSR |
|
||||
HDFGRTR_EL2_TRCPRGCTLR | HDFGRTR_EL2_TRCSEQSTR |
|
||||
HDFGRTR_EL2_TRCSSCSRn | HDFGRTR_EL2_TRCSTATR |
|
||||
HDFGRTR_EL2_TRCVICTLR);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
|
||||
res0 |= (HDFGRTR_EL2_TRBBASER_EL1 | HDFGRTR_EL2_TRBIDR_EL1 |
|
||||
HDFGRTR_EL2_TRBLIMITR_EL1 | HDFGRTR_EL2_TRBMAR_EL1 |
|
||||
HDFGRTR_EL2_TRBPTR_EL1 | HDFGRTR_EL2_TRBSR_EL1 |
|
||||
HDFGRTR_EL2_TRBTRG_EL1);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
|
||||
res0 |= (HDFGRTR_EL2_nBRBIDR | HDFGRTR_EL2_nBRBCTL |
|
||||
HDFGRTR_EL2_nBRBDATA);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
|
||||
res0 |= HDFGRTR_EL2_nPMSNEVFR_EL1;
|
||||
set_sysreg_masks(kvm, HDFGRTR_EL2, res0 | HDFGRTR_EL2_RES0, res1);
|
||||
|
||||
/* Reuse the bits from the read-side and add the write-specific stuff */
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
|
||||
res0 |= (HDFGWTR_EL2_PMCR_EL0 | HDFGWTR_EL2_PMSWINC_EL0);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceVer, IMP))
|
||||
res0 |= HDFGWTR_EL2_TRCOSLAR;
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
|
||||
res0 |= HDFGWTR_EL2_TRFCR_EL1;
|
||||
set_sysreg_masks(kvm, HFGWTR_EL2, res0 | HDFGWTR_EL2_RES0, res1);
|
||||
|
||||
/* HFGITR_EL2 */
|
||||
res0 = HFGITR_EL2_RES0;
|
||||
res1 = HFGITR_EL2_RES1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, DPB, DPB2))
|
||||
res0 |= HFGITR_EL2_DCCVADP;
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN2))
|
||||
res0 |= (HFGITR_EL2_ATS1E1RP | HFGITR_EL2_ATS1E1WP);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
res0 |= (HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
|
||||
HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS |
|
||||
HFGITR_EL2_TLBIVAALE1OS | HFGITR_EL2_TLBIVALE1OS |
|
||||
HFGITR_EL2_TLBIVAAE1OS | HFGITR_EL2_TLBIASIDE1OS |
|
||||
HFGITR_EL2_TLBIVAE1OS | HFGITR_EL2_TLBIVMALLE1OS);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
||||
res0 |= (HFGITR_EL2_TLBIRVAALE1 | HFGITR_EL2_TLBIRVALE1 |
|
||||
HFGITR_EL2_TLBIRVAAE1 | HFGITR_EL2_TLBIRVAE1 |
|
||||
HFGITR_EL2_TLBIRVAALE1IS | HFGITR_EL2_TLBIRVALE1IS |
|
||||
HFGITR_EL2_TLBIRVAAE1IS | HFGITR_EL2_TLBIRVAE1IS |
|
||||
HFGITR_EL2_TLBIRVAALE1OS | HFGITR_EL2_TLBIRVALE1OS |
|
||||
HFGITR_EL2_TLBIRVAAE1OS | HFGITR_EL2_TLBIRVAE1OS);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, IMP))
|
||||
res0 |= (HFGITR_EL2_CFPRCTX | HFGITR_EL2_DVPRCTX |
|
||||
HFGITR_EL2_CPPRCTX);
|
||||
if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, BRBE, IMP))
|
||||
res0 |= (HFGITR_EL2_nBRBINJ | HFGITR_EL2_nBRBIALL);
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
|
||||
res0 |= (HFGITR_EL2_nGCSPUSHM_EL1 | HFGITR_EL2_nGCSSTR_EL1 |
|
||||
HFGITR_EL2_nGCSEPP);
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, SPECRES, COSP_RCTX))
|
||||
res0 |= HFGITR_EL2_COSPRCTX;
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, ATS1A, IMP))
|
||||
res0 |= HFGITR_EL2_ATS1E1A;
|
||||
set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
|
||||
|
||||
/* HAFGRTR_EL2 - not a lot to see here */
|
||||
res0 = HAFGRTR_EL2_RES0;
|
||||
res1 = HAFGRTR_EL2_RES1;
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, V1P1))
|
||||
res0 |= ~(res0 | res1);
|
||||
set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
@ -64,12 +64,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||
{
|
||||
u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 |
|
||||
kvm_pmu_event_mask(kvm);
|
||||
u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1);
|
||||
|
||||
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0))
|
||||
if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL2, IMP))
|
||||
mask |= ARMV8_PMU_INCLUDE_EL2;
|
||||
|
||||
if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0))
|
||||
if (kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
|
||||
mask |= ARMV8_PMU_EXCLUDE_NS_EL0 |
|
||||
ARMV8_PMU_EXCLUDE_NS_EL1 |
|
||||
ARMV8_PMU_EXCLUDE_EL3;
|
||||
@ -83,8 +82,10 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm)
|
||||
*/
|
||||
static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
|
||||
|
||||
return (pmc->idx == ARMV8_PMU_CYCLE_IDX ||
|
||||
kvm_pmu_is_3p5(kvm_pmc_to_vcpu(pmc)));
|
||||
kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5));
|
||||
}
|
||||
|
||||
static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc)
|
||||
@ -419,7 +420,7 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||
kvm_pmu_update_state(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
|
||||
* to the event.
|
||||
* This is why we need a callback to do it once outside of the NMI context.
|
||||
@ -490,7 +491,7 @@ static u64 compute_period(struct kvm_pmc *pmc, u64 counter)
|
||||
return val;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* When the perf event overflows, set the overflow status and inform the vcpu.
|
||||
*/
|
||||
static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
|
||||
@ -556,7 +557,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||
return;
|
||||
|
||||
/* Fixup PMCR_EL0 to reconcile the PMU version and the LP bit */
|
||||
if (!kvm_pmu_is_3p5(vcpu))
|
||||
if (!kvm_has_feat(vcpu->kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
|
||||
val &= ~ARMV8_PMU_PMCR_LP;
|
||||
|
||||
/* The reset bits don't indicate any state, and shouldn't be saved. */
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/cacheinfo.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/printk.h>
|
||||
@ -31,6 +32,7 @@
|
||||
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "check-res-bits.h"
|
||||
#include "sys_regs.h"
|
||||
|
||||
#include "trace.h"
|
||||
@ -505,10 +507,9 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1);
|
||||
u32 sr = reg_to_encoding(r);
|
||||
|
||||
if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
|
||||
if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
@ -1685,7 +1686,8 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
|
||||
u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \
|
||||
(val) &= ~reg##_##field##_MASK; \
|
||||
(val) |= FIELD_PREP(reg##_##field##_MASK, \
|
||||
min(__f_val, (u64)reg##_##field##_##limit)); \
|
||||
min(__f_val, \
|
||||
(u64)SYS_FIELD_VALUE(reg, field, limit))); \
|
||||
(val); \
|
||||
})
|
||||
|
||||
@ -2174,6 +2176,16 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 val = r->val;
|
||||
|
||||
if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
|
||||
val |= HCR_E2H;
|
||||
|
||||
return __vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
@ -2186,16 +2198,6 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
||||
* guest...
|
||||
*/
|
||||
static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
||||
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(0),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(1),
|
||||
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
|
||||
@ -2349,7 +2351,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
ID_AA64MMFR2_EL1_NV |
|
||||
ID_AA64MMFR2_EL1_CCIDX)),
|
||||
ID_SANITISED(ID_AA64MMFR3_EL1),
|
||||
ID_UNALLOCATED(7,4),
|
||||
ID_SANITISED(ID_AA64MMFR4_EL1),
|
||||
ID_UNALLOCATED(7,5),
|
||||
ID_UNALLOCATED(7,6),
|
||||
ID_UNALLOCATED(7,7),
|
||||
@ -2665,7 +2667,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
|
||||
EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
|
||||
EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG_VNCR(HCR_EL2, reset_val, 0),
|
||||
EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
|
||||
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
|
||||
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
|
||||
EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
|
||||
@ -2727,6 +2729,18 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
EL2_REG(SP_EL2, NULL, reset_unknown, 0),
|
||||
};
|
||||
|
||||
static struct sys_reg_desc sys_insn_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc *first_idreg;
|
||||
|
||||
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||
@ -2737,8 +2751,7 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||
return ignore_write(vcpu, p);
|
||||
} else {
|
||||
u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
|
||||
u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1);
|
||||
u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr);
|
||||
u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
|
||||
|
||||
p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
|
||||
(SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
|
||||
@ -3159,7 +3172,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
||||
/**
|
||||
* kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run struct
|
||||
* @global: &struct sys_reg_desc
|
||||
* @nr_global: size of the @global array
|
||||
*/
|
||||
static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *global,
|
||||
@ -3326,7 +3340,9 @@ static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
|
||||
/**
|
||||
* kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The kvm_run struct
|
||||
* @params: &struct sys_reg_params
|
||||
* @global: &struct sys_reg_desc
|
||||
* @nr_global: size of the @global array
|
||||
*/
|
||||
static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params,
|
||||
@ -3384,12 +3400,6 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
|
||||
return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
|
||||
}
|
||||
|
||||
static bool is_imp_def_sys_reg(struct sys_reg_params *params)
|
||||
{
|
||||
// See ARM DDI 0487E.a, section D12.3.2
|
||||
return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
|
||||
}
|
||||
|
||||
/**
|
||||
* emulate_sys_reg - Emulate a guest access to an AArch64 system register
|
||||
* @vcpu: The VCPU pointer
|
||||
@ -3398,28 +3408,108 @@ static bool is_imp_def_sys_reg(struct sys_reg_params *params)
|
||||
* Return: true if the system register access was successful, false otherwise.
|
||||
*/
|
||||
static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params)
|
||||
struct sys_reg_params *params)
|
||||
{
|
||||
const struct sys_reg_desc *r;
|
||||
|
||||
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
|
||||
if (likely(r)) {
|
||||
perform_access(vcpu, params, r);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_imp_def_sys_reg(params)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
} else {
|
||||
print_sys_reg_msg(params,
|
||||
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
|
||||
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
|
||||
kvm_inject_undefined(vcpu);
|
||||
}
|
||||
print_sys_reg_msg(params,
|
||||
"Unsupported guest sys_reg access at: %lx [%08lx]\n",
|
||||
*vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct kvm *kvm = s->private;
|
||||
u8 *iter;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
iter = &kvm->arch.idreg_debugfs_iter;
|
||||
if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
|
||||
*iter == (u8)~0) {
|
||||
*iter = *pos;
|
||||
if (*iter >= KVM_ARM_ID_REG_NUM)
|
||||
iter = NULL;
|
||||
} else {
|
||||
iter = ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
struct kvm *kvm = s->private;
|
||||
|
||||
(*pos)++;
|
||||
|
||||
if ((kvm->arch.idreg_debugfs_iter + 1) < KVM_ARM_ID_REG_NUM) {
|
||||
kvm->arch.idreg_debugfs_iter++;
|
||||
|
||||
return &kvm->arch.idreg_debugfs_iter;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void idregs_debug_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
struct kvm *kvm = s->private;
|
||||
|
||||
if (IS_ERR(v))
|
||||
return;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
kvm->arch.idreg_debugfs_iter = ~0;
|
||||
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
}
|
||||
|
||||
static int idregs_debug_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct kvm *kvm = s->private;
|
||||
const struct sys_reg_desc *desc;
|
||||
|
||||
desc = first_idreg + kvm->arch.idreg_debugfs_iter;
|
||||
|
||||
if (!desc->name)
|
||||
return 0;
|
||||
|
||||
seq_printf(s, "%20s:\t%016llx\n",
|
||||
desc->name, IDREG(kvm, IDX_IDREG(kvm->arch.idreg_debugfs_iter)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct seq_operations idregs_debug_sops = {
|
||||
.start = idregs_debug_start,
|
||||
.next = idregs_debug_next,
|
||||
.stop = idregs_debug_stop,
|
||||
.show = idregs_debug_show,
|
||||
};
|
||||
|
||||
DEFINE_SEQ_ATTRIBUTE(idregs_debug);
|
||||
|
||||
void kvm_sys_regs_create_debugfs(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.idreg_debugfs_iter = ~0;
|
||||
|
||||
debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
|
||||
&idregs_debug_fops);
|
||||
}
|
||||
|
||||
static void kvm_reset_id_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct sys_reg_desc *idreg = first_idreg;
|
||||
@ -3467,28 +3557,39 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
|
||||
* kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
|
||||
* trap on a guest execution
|
||||
* @vcpu: The VCPU pointer
|
||||
*/
|
||||
int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const struct sys_reg_desc *desc = NULL;
|
||||
struct sys_reg_params params;
|
||||
unsigned long esr = kvm_vcpu_get_esr(vcpu);
|
||||
int Rt = kvm_vcpu_sys_get_rt(vcpu);
|
||||
int sr_idx;
|
||||
|
||||
trace_kvm_handle_sys_reg(esr);
|
||||
|
||||
if (__check_nv_sr_forward(vcpu))
|
||||
if (triage_sysreg_trap(vcpu, &sr_idx))
|
||||
return 1;
|
||||
|
||||
params = esr_sys64_to_params(esr);
|
||||
params.regval = vcpu_get_reg(vcpu, Rt);
|
||||
|
||||
if (!emulate_sys_reg(vcpu, ¶ms))
|
||||
return 1;
|
||||
/* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
|
||||
if (params.Op0 == 2 || params.Op0 == 3)
|
||||
desc = &sys_reg_descs[sr_idx];
|
||||
else
|
||||
desc = &sys_insn_descs[sr_idx];
|
||||
|
||||
if (!params.is_write)
|
||||
perform_access(vcpu, ¶ms, desc);
|
||||
|
||||
/* Read from system register? */
|
||||
if (!params.is_write &&
|
||||
(params.Op0 == 2 || params.Op0 == 3))
|
||||
vcpu_set_reg(vcpu, Rt, params.regval);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3930,11 +4031,86 @@ int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_init_sysreg(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
/*
|
||||
* In the absence of FGT, we cannot independently trap TLBI
|
||||
* Range instructions. This isn't great, but trapping all
|
||||
* TLBIs would be far worse. Live with it...
|
||||
*/
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
vcpu->arch.hcr_el2 |= HCR_TTLBOS;
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_HCX)) {
|
||||
vcpu->arch.hcrx_el2 = HCRX_GUEST_FLAGS;
|
||||
|
||||
if (kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
|
||||
vcpu->arch.hcrx_el2 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
|
||||
}
|
||||
|
||||
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
|
||||
goto out;
|
||||
|
||||
kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1 |
|
||||
HFGxTR_EL2_nMAIR2_EL1 |
|
||||
HFGxTR_EL2_nS2POR_EL1 |
|
||||
HFGxTR_EL2_nPOR_EL1 |
|
||||
HFGxTR_EL2_nPOR_EL0 |
|
||||
HFGxTR_EL2_nACCDATA_EL1 |
|
||||
HFGxTR_EL2_nSMPRI_EL1_MASK |
|
||||
HFGxTR_EL2_nTPIDR2_EL0_MASK);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
|
||||
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1OS|
|
||||
HFGITR_EL2_TLBIRVALE1OS |
|
||||
HFGITR_EL2_TLBIRVAAE1OS |
|
||||
HFGITR_EL2_TLBIRVAE1OS |
|
||||
HFGITR_EL2_TLBIVAALE1OS |
|
||||
HFGITR_EL2_TLBIVALE1OS |
|
||||
HFGITR_EL2_TLBIVAAE1OS |
|
||||
HFGITR_EL2_TLBIASIDE1OS |
|
||||
HFGITR_EL2_TLBIVAE1OS |
|
||||
HFGITR_EL2_TLBIVMALLE1OS);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
|
||||
kvm->arch.fgu[HFGITR_GROUP] |= (HFGITR_EL2_TLBIRVAALE1 |
|
||||
HFGITR_EL2_TLBIRVALE1 |
|
||||
HFGITR_EL2_TLBIRVAAE1 |
|
||||
HFGITR_EL2_TLBIRVAE1 |
|
||||
HFGITR_EL2_TLBIRVAALE1IS|
|
||||
HFGITR_EL2_TLBIRVALE1IS |
|
||||
HFGITR_EL2_TLBIRVAAE1IS |
|
||||
HFGITR_EL2_TLBIRVAE1IS |
|
||||
HFGITR_EL2_TLBIRVAALE1OS|
|
||||
HFGITR_EL2_TLBIRVALE1OS |
|
||||
HFGITR_EL2_TLBIRVAAE1OS |
|
||||
HFGITR_EL2_TLBIRVAE1OS);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1PIE, IMP))
|
||||
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
|
||||
HFGxTR_EL2_nPIR_EL1);
|
||||
|
||||
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
|
||||
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
|
||||
HAFGRTR_EL2_RES1);
|
||||
|
||||
set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
}
|
||||
|
||||
int __init kvm_sys_reg_table_init(void)
|
||||
{
|
||||
struct sys_reg_params params;
|
||||
bool valid = true;
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
check_res_bits();
|
||||
|
||||
/* Make sure tables are unique and in order. */
|
||||
valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
|
||||
@ -3943,6 +4119,7 @@ int __init kvm_sys_reg_table_init(void)
|
||||
valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
|
||||
valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
|
||||
valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
|
||||
valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
|
||||
|
||||
if (!valid)
|
||||
return -EINVAL;
|
||||
@ -3957,8 +4134,13 @@ int __init kvm_sys_reg_table_init(void)
|
||||
if (!first_idreg)
|
||||
return -EINVAL;
|
||||
|
||||
if (kvm_get_mode() == KVM_MODE_NV)
|
||||
return populate_nv_trap_config();
|
||||
ret = populate_nv_trap_config();
|
||||
|
||||
return 0;
|
||||
for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
|
||||
ret = populate_sysreg_config(sys_reg_descs + i, i);
|
||||
|
||||
for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
|
||||
ret = populate_sysreg_config(sys_insn_descs + i, i);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -233,6 +233,8 @@ int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
|
||||
const struct sys_reg_desc table[], unsigned int num);
|
||||
|
||||
bool triage_sysreg_trap(struct kvm_vcpu *vcpu, int *sr_index);
|
||||
|
||||
#define AA32(_x) .aarch32_map = AA32_##_x
|
||||
#define Op0(_x) .Op0 = _x
|
||||
#define Op1(_x) .Op1 = _x
|
||||
|
@ -149,7 +149,7 @@ static void print_dist_state(struct seq_file *s, struct vgic_dist *dist)
|
||||
seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
|
||||
seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
|
||||
if (v3)
|
||||
seq_printf(s, "nr_lpis:\t%d\n", dist->lpi_list_count);
|
||||
seq_printf(s, "nr_lpis:\t%d\n", atomic_read(&dist->lpi_count));
|
||||
seq_printf(s, "enabled:\t%d\n", dist->enabled);
|
||||
seq_printf(s, "\n");
|
||||
|
||||
|
@ -53,9 +53,9 @@ void kvm_vgic_early_init(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
INIT_LIST_HEAD(&dist->lpi_list_head);
|
||||
INIT_LIST_HEAD(&dist->lpi_translation_cache);
|
||||
raw_spin_lock_init(&dist->lpi_list_lock);
|
||||
xa_init_flags(&dist->lpi_xa, XA_FLAGS_LOCK_IRQ);
|
||||
}
|
||||
|
||||
/* CREATION */
|
||||
@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
|
||||
vgic_lpi_translation_cache_init(kvm);
|
||||
|
||||
/*
|
||||
* If we have GICv4.1 enabled, unconditionnaly request enable the
|
||||
* If we have GICv4.1 enabled, unconditionally request enable the
|
||||
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
|
||||
* enable it if we present a virtual ITS to the guest.
|
||||
*/
|
||||
@ -366,6 +366,8 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
|
||||
|
||||
if (vgic_supports_direct_msis(kvm))
|
||||
vgic_v4_teardown(kvm);
|
||||
|
||||
xa_destroy(&dist->lpi_xa);
|
||||
}
|
||||
|
||||
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
@ -445,13 +447,15 @@ int vgic_lazy_init(struct kvm *kvm)
|
||||
/* RESOURCE MAPPING */
|
||||
|
||||
/**
|
||||
* kvm_vgic_map_resources - map the MMIO regions
|
||||
* @kvm: kvm struct pointer
|
||||
*
|
||||
* Map the MMIO regions depending on the VGIC model exposed to the guest
|
||||
* called on the first VCPU run.
|
||||
* Also map the virtual CPU interface into the VM.
|
||||
* v2 calls vgic_init() if not already done.
|
||||
* v3 and derivatives return an error if the VGIC is not initialized.
|
||||
* vgic_ready() returns true if this function has succeeded.
|
||||
* @kvm: kvm struct pointer
|
||||
*/
|
||||
int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
{
|
||||
|
@ -52,7 +52,12 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||
if (!irq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&irq->lpi_list);
|
||||
ret = xa_reserve_irq(&dist->lpi_xa, intid, GFP_KERNEL_ACCOUNT);
|
||||
if (ret) {
|
||||
kfree(irq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&irq->ap_list);
|
||||
raw_spin_lock_init(&irq->irq_lock);
|
||||
|
||||
@ -68,30 +73,30 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
|
||||
* There could be a race with another vgic_add_lpi(), so we need to
|
||||
* check that we don't add a second list entry with the same LPI.
|
||||
*/
|
||||
list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
|
||||
if (oldirq->intid != intid)
|
||||
continue;
|
||||
|
||||
oldirq = xa_load(&dist->lpi_xa, intid);
|
||||
if (vgic_try_get_irq_kref(oldirq)) {
|
||||
/* Someone was faster with adding this LPI, lets use that. */
|
||||
kfree(irq);
|
||||
irq = oldirq;
|
||||
|
||||
/*
|
||||
* This increases the refcount, the caller is expected to
|
||||
* call vgic_put_irq() on the returned pointer once it's
|
||||
* finished with the IRQ.
|
||||
*/
|
||||
vgic_get_irq_kref(irq);
|
||||
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
|
||||
dist->lpi_list_count++;
|
||||
ret = xa_err(xa_store(&dist->lpi_xa, intid, irq, 0));
|
||||
if (ret) {
|
||||
xa_release(&dist->lpi_xa, intid);
|
||||
kfree(irq);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
atomic_inc(&dist->lpi_count);
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/*
|
||||
* We "cache" the configuration table entries in our struct vgic_irq's.
|
||||
* However we only have those structs for mapped IRQs, so we read in
|
||||
@ -158,7 +163,7 @@ struct vgic_translation_cache_entry {
|
||||
* @cte_esz: collection table entry size
|
||||
* @dte_esz: device table entry size
|
||||
* @ite_esz: interrupt translation table entry size
|
||||
* @save tables: save the ITS tables into guest RAM
|
||||
* @save_tables: save the ITS tables into guest RAM
|
||||
* @restore_tables: restore the ITS internal structs from tables
|
||||
* stored in guest RAM
|
||||
* @commit: initialize the registers which expose the ABI settings,
|
||||
@ -311,6 +316,8 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define GIC_LPI_MAX_INTID ((1 << INTERRUPT_ID_BITS_ITS) - 1)
|
||||
|
||||
/*
|
||||
* Create a snapshot of the current LPIs targeting @vcpu, so that we can
|
||||
* enumerate those LPIs without holding any lock.
|
||||
@ -319,6 +326,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
|
||||
int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
XA_STATE(xas, &dist->lpi_xa, GIC_LPI_OFFSET);
|
||||
struct vgic_irq *irq;
|
||||
unsigned long flags;
|
||||
u32 *intids;
|
||||
@ -331,13 +339,15 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
||||
* command). If coming from another path (such as enabling LPIs),
|
||||
* we must be careful not to overrun the array.
|
||||
*/
|
||||
irq_count = READ_ONCE(dist->lpi_list_count);
|
||||
irq_count = atomic_read(&dist->lpi_count);
|
||||
intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
|
||||
if (!intids)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||
rcu_read_lock();
|
||||
|
||||
xas_for_each(&xas, irq, GIC_LPI_MAX_INTID) {
|
||||
if (i == irq_count)
|
||||
break;
|
||||
/* We don't need to "get" the IRQ, as we hold the list lock. */
|
||||
@ -345,6 +355,8 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
|
||||
continue;
|
||||
intids[i++] = irq->intid;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
*intid_ptr = intids;
|
||||
@ -595,8 +607,8 @@ static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
|
||||
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
|
||||
irq = __vgic_its_check_cache(dist, db, devid, eventid);
|
||||
if (irq)
|
||||
vgic_get_irq_kref(irq);
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
irq = NULL;
|
||||
|
||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
|
||||
@ -640,8 +652,13 @@ static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
|
||||
* was in the cache, and increment it on the new interrupt.
|
||||
*/
|
||||
if (cte->irq)
|
||||
__vgic_put_lpi_locked(kvm, cte->irq);
|
||||
vgic_put_irq(kvm, cte->irq);
|
||||
|
||||
/*
|
||||
* The irq refcount is guaranteed to be nonzero while holding the
|
||||
* its_lock, as the ITE (and the reference it holds) cannot be freed.
|
||||
*/
|
||||
lockdep_assert_held(&its->its_lock);
|
||||
vgic_get_irq_kref(irq);
|
||||
|
||||
cte->db = db;
|
||||
@ -672,7 +689,7 @@ void vgic_its_invalidate_cache(struct kvm *kvm)
|
||||
if (!cte->irq)
|
||||
break;
|
||||
|
||||
__vgic_put_lpi_locked(kvm, cte->irq);
|
||||
vgic_put_irq(kvm, cte->irq);
|
||||
cte->irq = NULL;
|
||||
}
|
||||
|
||||
@ -1345,8 +1362,8 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
|
||||
}
|
||||
|
||||
/**
|
||||
* vgic_its_invall - invalidate all LPIs targetting a given vcpu
|
||||
* @vcpu: the vcpu for which the RD is targetted by an invalidation
|
||||
* vgic_its_invall - invalidate all LPIs targeting a given vcpu
|
||||
* @vcpu: the vcpu for which the RD is targeted by an invalidation
|
||||
*
|
||||
* Contrary to the INVALL command, this targets a RD instead of a
|
||||
* collection, and we don't need to hold the its_lock, since no ITS is
|
||||
@ -2144,7 +2161,7 @@ static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
|
||||
}
|
||||
|
||||
/**
|
||||
* entry_fn_t - Callback called on a table entry restore path
|
||||
* typedef entry_fn_t - Callback called on a table entry restore path
|
||||
* @its: its handle
|
||||
* @id: id of the entry
|
||||
* @entry: pointer to the entry
|
||||
|
@ -380,6 +380,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
|
||||
struct vgic_irq *irq;
|
||||
gpa_t last_ptr = ~(gpa_t)0;
|
||||
bool vlpi_avail = false;
|
||||
unsigned long index;
|
||||
int ret = 0;
|
||||
u8 val;
|
||||
|
||||
@ -396,7 +397,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
|
||||
vlpi_avail = true;
|
||||
}
|
||||
|
||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||
xa_for_each(&dist->lpi_xa, index, irq) {
|
||||
int byte_offset, bit_nr;
|
||||
struct kvm_vcpu *vcpu;
|
||||
gpa_t pendbase, ptr;
|
||||
|
@ -30,7 +30,8 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
||||
* its->its_lock (mutex)
|
||||
* vgic_cpu->ap_list_lock must be taken with IRQs disabled
|
||||
* kvm->lpi_list_lock must be taken with IRQs disabled
|
||||
* vgic_irq->irq_lock must be taken with IRQs disabled
|
||||
* vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
|
||||
* vgic_irq->irq_lock must be taken with IRQs disabled
|
||||
*
|
||||
* As the ap_list_lock might be taken from the timer interrupt handler,
|
||||
* we have to disable IRQs before taking this lock and everything lower
|
||||
@ -54,32 +55,22 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
||||
*/
|
||||
|
||||
/*
|
||||
* Iterate over the VM's list of mapped LPIs to find the one with a
|
||||
* matching interrupt ID and return a reference to the IRQ structure.
|
||||
* Index the VM's xarray of mapped LPIs and return a reference to the IRQ
|
||||
* structure. The caller is expected to call vgic_put_irq() later once it's
|
||||
* finished with the IRQ.
|
||||
*/
|
||||
static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct vgic_irq *irq = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
|
||||
if (irq->intid != intid)
|
||||
continue;
|
||||
irq = xa_load(&dist->lpi_xa, intid);
|
||||
if (!vgic_try_get_irq_kref(irq))
|
||||
irq = NULL;
|
||||
|
||||
/*
|
||||
* This increases the refcount, the caller is expected to
|
||||
* call vgic_put_irq() later once it's finished with the IRQ.
|
||||
*/
|
||||
vgic_get_irq_kref(irq);
|
||||
goto out_unlock;
|
||||
}
|
||||
irq = NULL;
|
||||
|
||||
out_unlock:
|
||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
rcu_read_unlock();
|
||||
|
||||
return irq;
|
||||
}
|
||||
@ -120,22 +111,6 @@ static void vgic_irq_release(struct kref *ref)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Drop the refcount on the LPI. Must be called with lpi_list_lock held.
|
||||
*/
|
||||
void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
|
||||
if (!kref_put(&irq->refcount, vgic_irq_release))
|
||||
return;
|
||||
|
||||
list_del(&irq->lpi_list);
|
||||
dist->lpi_list_count--;
|
||||
|
||||
kfree(irq);
|
||||
}
|
||||
|
||||
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
@ -144,9 +119,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
|
||||
__vgic_put_lpi_locked(kvm, irq);
|
||||
raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
|
||||
if (!kref_put(&irq->refcount, vgic_irq_release))
|
||||
return;
|
||||
|
||||
xa_lock_irqsave(&dist->lpi_xa, flags);
|
||||
__xa_erase(&dist->lpi_xa, irq->intid);
|
||||
xa_unlock_irqrestore(&dist->lpi_xa, flags);
|
||||
|
||||
atomic_dec(&dist->lpi_count);
|
||||
kfree_rcu(irq, rcu);
|
||||
}
|
||||
|
||||
void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
|
||||
@ -203,7 +184,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vgic_target_oracle - compute the target vcpu for an irq
|
||||
* vgic_target_oracle - compute the target vcpu for an irq
|
||||
*
|
||||
* @irq: The irq to route. Must be already locked.
|
||||
*
|
||||
@ -404,7 +385,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
|
||||
|
||||
/*
|
||||
* Grab a reference to the irq to reflect the fact that it is
|
||||
* now in the ap_list.
|
||||
* now in the ap_list. This is safe as the caller must already hold a
|
||||
* reference on the irq.
|
||||
*/
|
||||
vgic_get_irq_kref(irq);
|
||||
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
|
||||
|
@ -180,7 +180,6 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
|
||||
gpa_t addr, int len);
|
||||
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
||||
u32 intid);
|
||||
void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
|
||||
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
|
||||
bool vgic_get_phys_line_level(struct vgic_irq *irq);
|
||||
void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
|
||||
@ -220,12 +219,20 @@ void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
||||
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline bool vgic_try_get_irq_kref(struct vgic_irq *irq)
|
||||
{
|
||||
if (!irq)
|
||||
return false;
|
||||
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
return true;
|
||||
|
||||
return kref_get_unless_zero(&irq->refcount);
|
||||
}
|
||||
|
||||
static inline void vgic_get_irq_kref(struct vgic_irq *irq)
|
||||
{
|
||||
if (irq->intid < VGIC_MIN_LPI)
|
||||
return;
|
||||
|
||||
kref_get(&irq->refcount);
|
||||
WARN_ON_ONCE(!vgic_try_get_irq_kref(irq));
|
||||
}
|
||||
|
||||
void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
|
||||
|
@ -36,6 +36,7 @@ HAS_GENERIC_AUTH_IMP_DEF
|
||||
HAS_GIC_CPUIF_SYSREGS
|
||||
HAS_GIC_PRIO_MASKING
|
||||
HAS_GIC_PRIO_RELAXED_SYNC
|
||||
HAS_HCR_NV1
|
||||
HAS_HCX
|
||||
HAS_LDAPR
|
||||
HAS_LPA2
|
||||
|
@ -1399,6 +1399,7 @@ EndEnum
|
||||
UnsignedEnum 43:40 SPECRES
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
0b0010 COSP_RCTX
|
||||
EndEnum
|
||||
UnsignedEnum 39:36 SB
|
||||
0b0000 NI
|
||||
@ -1525,7 +1526,12 @@ EndEnum
|
||||
EndSysreg
|
||||
|
||||
Sysreg ID_AA64ISAR3_EL1 3 0 0 6 3
|
||||
Res0 63:12
|
||||
Res0 63:16
|
||||
UnsignedEnum 15:12 PACM
|
||||
0b0000 NI
|
||||
0b0001 TRIVIAL_IMP
|
||||
0b0010 FULL_IMP
|
||||
EndEnum
|
||||
UnsignedEnum 11:8 TLBIW
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
@ -1824,6 +1830,43 @@ UnsignedEnum 3:0 TCRX
|
||||
EndEnum
|
||||
EndSysreg
|
||||
|
||||
Sysreg ID_AA64MMFR4_EL1 3 0 0 7 4
|
||||
Res0 63:40
|
||||
UnsignedEnum 39:36 E3DSE
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
Res0 35:28
|
||||
SignedEnum 27:24 E2H0
|
||||
0b0000 IMP
|
||||
0b1110 NI_NV1
|
||||
0b1111 NI
|
||||
EndEnum
|
||||
UnsignedEnum 23:20 NV_frac
|
||||
0b0000 NV_NV2
|
||||
0b0001 NV2_ONLY
|
||||
EndEnum
|
||||
UnsignedEnum 19:16 FGWTE3
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
UnsignedEnum 15:12 HACDBS
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
UnsignedEnum 11:8 ASID2
|
||||
0b0000 NI
|
||||
0b0001 IMP
|
||||
EndEnum
|
||||
SignedEnum 7:4 EIESB
|
||||
0b0000 NI
|
||||
0b0001 ToEL3
|
||||
0b0010 ToELx
|
||||
0b1111 ANY
|
||||
EndEnum
|
||||
Res0 3:0
|
||||
EndSysreg
|
||||
|
||||
Sysreg SCTLR_EL1 3 0 1 0 0
|
||||
Field 63 TIDCP
|
||||
Field 62 SPINTMASK
|
||||
|
@ -133,7 +133,6 @@ config LOONGARCH
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KPROBES_ON_FTRACE
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_KVM
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_NMI
|
||||
select HAVE_PCI
|
||||
|
@ -14,8 +14,6 @@
|
||||
* Some parts derived from the x86 version of this file.
|
||||
*/
|
||||
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
|
||||
|
||||
|
@ -20,7 +20,6 @@ if VIRTUALIZATION
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on AS_HAS_LVZ_EXTENSION
|
||||
depends on HAVE_KVM
|
||||
select HAVE_KVM_DIRTY_RING_ACQ_REL
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select KVM_COMMON
|
||||
@ -28,6 +27,7 @@ config KVM
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_MMIO
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
select KVM_XFER_TO_GUEST_WORK
|
||||
help
|
||||
Support hosting virtualized guest machines using
|
||||
|
@ -213,12 +213,6 @@ SYM_FUNC_START(kvm_enter_guest)
|
||||
/* Save host GPRs */
|
||||
kvm_save_host_gpr a2
|
||||
|
||||
/* Save host CRMD, PRMD to stack */
|
||||
csrrd a3, LOONGARCH_CSR_CRMD
|
||||
st.d a3, a2, PT_CRMD
|
||||
csrrd a3, LOONGARCH_CSR_PRMD
|
||||
st.d a3, a2, PT_PRMD
|
||||
|
||||
addi.d a2, a1, KVM_VCPU_ARCH
|
||||
st.d sp, a2, KVM_ARCH_HSP
|
||||
st.d tp, a2, KVM_ARCH_HTP
|
||||
|
@ -23,24 +23,6 @@ static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
|
||||
return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
|
||||
}
|
||||
|
||||
/*
|
||||
* Push timer forward on timeout.
|
||||
* Handle an hrtimer event by push the hrtimer forward a period.
|
||||
*/
|
||||
static enum hrtimer_restart kvm_count_timeout(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cfg, period;
|
||||
|
||||
/* Add periodic tick to current expire time */
|
||||
cfg = kvm_read_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG);
|
||||
if (cfg & CSR_TCFG_PERIOD) {
|
||||
period = tick_to_ns(vcpu, cfg & CSR_TCFG_VAL);
|
||||
hrtimer_add_expires_ns(&vcpu->arch.swtimer, period);
|
||||
return HRTIMER_RESTART;
|
||||
} else
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/* Low level hrtimer wake routine */
|
||||
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
|
||||
{
|
||||
@ -50,7 +32,7 @@ enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
|
||||
kvm_queue_irq(vcpu, INT_TI);
|
||||
rcuwait_wake_up(&vcpu->wait);
|
||||
|
||||
return kvm_count_timeout(vcpu);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -93,7 +75,8 @@ void kvm_restore_timer(struct kvm_vcpu *vcpu)
|
||||
/*
|
||||
* Freeze the soft-timer and sync the guest stable timer with it.
|
||||
*/
|
||||
hrtimer_cancel(&vcpu->arch.swtimer);
|
||||
if (kvm_vcpu_is_blocking(vcpu))
|
||||
hrtimer_cancel(&vcpu->arch.swtimer);
|
||||
|
||||
/*
|
||||
* From LoongArch Reference Manual Volume 1 Chapter 7.6.2
|
||||
@ -168,26 +151,20 @@ static void _kvm_save_timer(struct kvm_vcpu *vcpu)
|
||||
* Here judge one-shot timer fired by checking whether TVAL is larger
|
||||
* than TCFG
|
||||
*/
|
||||
if (ticks < cfg) {
|
||||
if (ticks < cfg)
|
||||
delta = tick_to_ns(vcpu, ticks);
|
||||
expire = ktime_add_ns(ktime_get(), delta);
|
||||
vcpu->arch.expire = expire;
|
||||
else
|
||||
delta = 0;
|
||||
|
||||
expire = ktime_add_ns(ktime_get(), delta);
|
||||
vcpu->arch.expire = expire;
|
||||
if (kvm_vcpu_is_blocking(vcpu)) {
|
||||
|
||||
/*
|
||||
* HRTIMER_MODE_PINNED is suggested since vcpu may run in
|
||||
* the same physical cpu in next time
|
||||
*/
|
||||
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
|
||||
} else if (vcpu->stat.generic.blocking) {
|
||||
/*
|
||||
* Inject timer interrupt so that halt polling can dectect and exit.
|
||||
* VCPU is scheduled out already and sleeps in rcuwait queue and
|
||||
* will not poll pending events again. kvm_queue_irq() is not enough,
|
||||
* hrtimer swtimer should be used here.
|
||||
*/
|
||||
expire = ktime_add_ns(ktime_get(), 10);
|
||||
vcpu->arch.expire = expire;
|
||||
hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -304,11 +304,18 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
|
||||
return -EINVAL;
|
||||
|
||||
switch (id) {
|
||||
case 2:
|
||||
case LOONGARCH_CPUCFG0:
|
||||
*v = GENMASK(31, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG1:
|
||||
/* CPUCFG1_MSGINT is not supported by KVM */
|
||||
*v = GENMASK(25, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG2:
|
||||
/* CPUCFG2 features unconditionally supported by KVM */
|
||||
*v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP |
|
||||
CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV |
|
||||
CPUCFG2_LAM;
|
||||
CPUCFG2_LSPW | CPUCFG2_LAM;
|
||||
/*
|
||||
* For the ISA extensions listed below, if one is supported
|
||||
* by the host, then it is also supported by KVM.
|
||||
@ -318,14 +325,26 @@ static int _kvm_get_cpucfg_mask(int id, u64 *v)
|
||||
if (cpu_has_lasx)
|
||||
*v |= CPUCFG2_LASX;
|
||||
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG3:
|
||||
*v = GENMASK(16, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG4:
|
||||
case LOONGARCH_CPUCFG5:
|
||||
*v = GENMASK(31, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG16:
|
||||
*v = GENMASK(16, 0);
|
||||
return 0;
|
||||
case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20:
|
||||
*v = GENMASK(30, 0);
|
||||
return 0;
|
||||
default:
|
||||
/*
|
||||
* No restrictions on other valid CPUCFG IDs' values, but
|
||||
* CPUCFG data is limited to 32 bits as the LoongArch ISA
|
||||
* manual says (Volume 1, Section 2.2.10.5 "CPUCFG").
|
||||
* CPUCFG bits should be zero if reserved by HW or not
|
||||
* supported by KVM.
|
||||
*/
|
||||
*v = U32_MAX;
|
||||
*v = 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -344,7 +363,7 @@ static int kvm_check_cpucfg(int id, u64 val)
|
||||
return -EINVAL;
|
||||
|
||||
switch (id) {
|
||||
case 2:
|
||||
case LOONGARCH_CPUCFG2:
|
||||
if (!(val & CPUCFG2_LLFTP))
|
||||
/* Guests must have a constant timer */
|
||||
return -EINVAL;
|
||||
|
@ -1313,6 +1313,7 @@ config CPU_LOONGSON64
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_HUGEPAGES
|
||||
select CPU_SUPPORTS_MSA
|
||||
select CPU_SUPPORTS_VZ
|
||||
select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select DMA_NONCOHERENT
|
||||
@ -1324,7 +1325,6 @@ config CPU_LOONGSON64
|
||||
select MIPS_FP_SUPPORT
|
||||
select GPIOLIB
|
||||
select SWIOTLB
|
||||
select HAVE_KVM
|
||||
help
|
||||
The Loongson GSx64(GS264/GS464/GS464E/GS464V) series of processor
|
||||
cores implements the MIPS64R2 instruction set with many extensions,
|
||||
@ -1399,7 +1399,6 @@ config CPU_MIPS32_R2
|
||||
select CPU_SUPPORTS_32BIT_KERNEL
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_MSA
|
||||
select HAVE_KVM
|
||||
help
|
||||
Choose this option to build a kernel for release 2 or later of the
|
||||
MIPS32 architecture. Most modern embedded systems with a 32-bit
|
||||
@ -1414,7 +1413,7 @@ config CPU_MIPS32_R5
|
||||
select CPU_SUPPORTS_32BIT_KERNEL
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_MSA
|
||||
select HAVE_KVM
|
||||
select CPU_SUPPORTS_VZ
|
||||
select MIPS_O32_FP64_SUPPORT
|
||||
help
|
||||
Choose this option to build a kernel for release 5 or later of the
|
||||
@ -1430,7 +1429,7 @@ config CPU_MIPS32_R6
|
||||
select CPU_SUPPORTS_32BIT_KERNEL
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_MSA
|
||||
select HAVE_KVM
|
||||
select CPU_SUPPORTS_VZ
|
||||
select MIPS_O32_FP64_SUPPORT
|
||||
help
|
||||
Choose this option to build a kernel for release 6 or later of the
|
||||
@ -1466,7 +1465,6 @@ config CPU_MIPS64_R2
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_HUGEPAGES
|
||||
select CPU_SUPPORTS_MSA
|
||||
select HAVE_KVM
|
||||
help
|
||||
Choose this option to build a kernel for release 2 or later of the
|
||||
MIPS64 architecture. Many modern embedded systems with a 64-bit
|
||||
@ -1484,7 +1482,7 @@ config CPU_MIPS64_R5
|
||||
select CPU_SUPPORTS_HUGEPAGES
|
||||
select CPU_SUPPORTS_MSA
|
||||
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
|
||||
select HAVE_KVM
|
||||
select CPU_SUPPORTS_VZ
|
||||
help
|
||||
Choose this option to build a kernel for release 5 or later of the
|
||||
MIPS64 architecture. This is a intermediate MIPS architecture
|
||||
@ -1502,7 +1500,7 @@ config CPU_MIPS64_R6
|
||||
select CPU_SUPPORTS_HUGEPAGES
|
||||
select CPU_SUPPORTS_MSA
|
||||
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
|
||||
select HAVE_KVM
|
||||
select CPU_SUPPORTS_VZ
|
||||
help
|
||||
Choose this option to build a kernel for release 6 or later of the
|
||||
MIPS64 architecture. New MIPS processors, starting with the Warrior
|
||||
@ -1517,9 +1515,9 @@ config CPU_P5600
|
||||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_MSA
|
||||
select CPU_SUPPORTS_CPUFREQ
|
||||
select CPU_SUPPORTS_VZ
|
||||
select CPU_MIPSR2_IRQ_VI
|
||||
select CPU_MIPSR2_IRQ_EI
|
||||
select HAVE_KVM
|
||||
select MIPS_O32_FP64_SUPPORT
|
||||
help
|
||||
Choose this option to build a kernel for MIPS Warrior P5600 CPU.
|
||||
@ -1641,7 +1639,7 @@ config CPU_CAVIUM_OCTEON
|
||||
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
|
||||
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
|
||||
select MIPS_L1_CACHE_SHIFT_7
|
||||
select HAVE_KVM
|
||||
select CPU_SUPPORTS_VZ
|
||||
help
|
||||
The Cavium Octeon processor is a highly integrated chip containing
|
||||
many ethernet hardware widgets for networking tasks. The processor
|
||||
@ -2034,6 +2032,8 @@ config CPU_SUPPORTS_ADDRWINCFG
|
||||
config CPU_SUPPORTS_HUGEPAGES
|
||||
bool
|
||||
depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA))
|
||||
config CPU_SUPPORTS_VZ
|
||||
bool
|
||||
config MIPS_PGD_C0_CONTEXT
|
||||
bool
|
||||
depends on 64BIT
|
||||
|
@ -20,8 +20,6 @@
|
||||
* Some parts derived from the x86 version of this file.
|
||||
*/
|
||||
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
/*
|
||||
|
@ -17,7 +17,7 @@ if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
depends on CPU_SUPPORTS_VZ
|
||||
depends on MIPS_FP_SUPPORT
|
||||
select EXPORT_UASM
|
||||
select KVM_COMMON
|
||||
@ -26,6 +26,7 @@ config KVM
|
||||
select KVM_MMIO
|
||||
select KVM_GENERIC_MMU_NOTIFIER
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
help
|
||||
Support for hosting Guest kernels.
|
||||
|
||||
|
@ -28,7 +28,6 @@
|
||||
#define __KVM_HAVE_PPC_SMT
|
||||
#define __KVM_HAVE_IRQCHIP
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
||||
/* Not always available, but if it is, this is the correct offset. */
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
@ -733,4 +732,48 @@ struct kvm_ppc_xive_eq {
|
||||
#define KVM_XIVE_TIMA_PAGE_OFFSET 0
|
||||
#define KVM_XIVE_ESB_PAGE_OFFSET 4
|
||||
|
||||
/* for KVM_PPC_GET_PVINFO */
|
||||
|
||||
#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
|
||||
|
||||
struct kvm_ppc_pvinfo {
|
||||
/* out */
|
||||
__u32 flags;
|
||||
__u32 hcall[4];
|
||||
__u8 pad[108];
|
||||
};
|
||||
|
||||
/* for KVM_PPC_GET_SMMU_INFO */
|
||||
#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
|
||||
|
||||
struct kvm_ppc_one_page_size {
|
||||
__u32 page_shift; /* Page shift (or 0) */
|
||||
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
|
||||
};
|
||||
|
||||
struct kvm_ppc_one_seg_page_size {
|
||||
__u32 page_shift; /* Base page shift of segment (or 0) */
|
||||
__u32 slb_enc; /* SLB encoding for BookS */
|
||||
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
|
||||
};
|
||||
|
||||
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
|
||||
#define KVM_PPC_1T_SEGMENTS 0x00000002
|
||||
#define KVM_PPC_NO_HASH 0x00000004
|
||||
|
||||
struct kvm_ppc_smmu_info {
|
||||
__u64 flags;
|
||||
__u32 slb_size;
|
||||
__u16 data_keys; /* # storage keys supported for data */
|
||||
__u16 instr_keys; /* # storage keys supported for instructions */
|
||||
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
|
||||
};
|
||||
|
||||
/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
|
||||
struct kvm_ppc_resize_hpt {
|
||||
__u64 flags;
|
||||
__u32 shift;
|
||||
__u32 pad;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_KVM_POWERPC_H */
|
||||
|
@ -22,7 +22,6 @@ config KVM
|
||||
select KVM_COMMON
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select KVM_VFIO
|
||||
select IRQ_BYPASS_MANAGER
|
||||
select HAVE_KVM_IRQ_BYPASS
|
||||
|
||||
config KVM_BOOK3S_HANDLER
|
||||
|
@ -2538,9 +2538,8 @@ void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_
|
||||
vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
|
||||
}
|
||||
|
||||
int kvm_arch_create_vm_debugfs(struct kvm *kvm)
|
||||
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
|
||||
{
|
||||
if (kvm->arch.kvm_ops->create_vm_debugfs)
|
||||
kvm->arch.kvm_ops->create_vm_debugfs(kvm);
|
||||
return 0;
|
||||
}
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
@ -166,6 +165,8 @@ enum KVM_RISCV_ISA_EXT_ID {
|
||||
KVM_RISCV_ISA_EXT_ZVFH,
|
||||
KVM_RISCV_ISA_EXT_ZVFHMIN,
|
||||
KVM_RISCV_ISA_EXT_ZFA,
|
||||
KVM_RISCV_ISA_EXT_ZTSO,
|
||||
KVM_RISCV_ISA_EXT_ZACAS,
|
||||
KVM_RISCV_ISA_EXT_MAX,
|
||||
};
|
||||
|
||||
|
@ -24,6 +24,7 @@ config KVM
|
||||
select HAVE_KVM_IRQ_ROUTING
|
||||
select HAVE_KVM_MSI
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
select KVM_COMMON
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select KVM_GENERIC_HARDWARE_ENABLING
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#define INSN_OPCODE_MASK 0x007c
|
||||
#define INSN_OPCODE_SHIFT 2
|
||||
#define INSN_OPCODE_SYSTEM 28
|
||||
@ -213,9 +215,20 @@ struct csr_func {
|
||||
unsigned long wr_mask);
|
||||
};
|
||||
|
||||
static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num,
|
||||
unsigned long *val, unsigned long new_val,
|
||||
unsigned long wr_mask)
|
||||
{
|
||||
if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR))
|
||||
return KVM_INSN_ILLEGAL_TRAP;
|
||||
|
||||
return KVM_INSN_EXIT_TO_USER_SPACE;
|
||||
}
|
||||
|
||||
static const struct csr_func csr_funcs[] = {
|
||||
KVM_RISCV_VCPU_AIA_CSR_FUNCS
|
||||
KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
|
||||
{ .base = CSR_SEED, .count = 1, .func = seed_csr_rmw },
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -40,6 +40,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
|
||||
KVM_ISA_EXT_ARR(SVINVAL),
|
||||
KVM_ISA_EXT_ARR(SVNAPOT),
|
||||
KVM_ISA_EXT_ARR(SVPBMT),
|
||||
KVM_ISA_EXT_ARR(ZACAS),
|
||||
KVM_ISA_EXT_ARR(ZBA),
|
||||
KVM_ISA_EXT_ARR(ZBB),
|
||||
KVM_ISA_EXT_ARR(ZBC),
|
||||
@ -66,6 +67,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
|
||||
KVM_ISA_EXT_ARR(ZKSED),
|
||||
KVM_ISA_EXT_ARR(ZKSH),
|
||||
KVM_ISA_EXT_ARR(ZKT),
|
||||
KVM_ISA_EXT_ARR(ZTSO),
|
||||
KVM_ISA_EXT_ARR(ZVBB),
|
||||
KVM_ISA_EXT_ARR(ZVBC),
|
||||
KVM_ISA_EXT_ARR(ZVFH),
|
||||
@ -117,6 +119,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
|
||||
case KVM_RISCV_ISA_EXT_SSTC:
|
||||
case KVM_RISCV_ISA_EXT_SVINVAL:
|
||||
case KVM_RISCV_ISA_EXT_SVNAPOT:
|
||||
case KVM_RISCV_ISA_EXT_ZACAS:
|
||||
case KVM_RISCV_ISA_EXT_ZBA:
|
||||
case KVM_RISCV_ISA_EXT_ZBB:
|
||||
case KVM_RISCV_ISA_EXT_ZBC:
|
||||
@ -141,6 +144,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
|
||||
case KVM_RISCV_ISA_EXT_ZKSED:
|
||||
case KVM_RISCV_ISA_EXT_ZKSH:
|
||||
case KVM_RISCV_ISA_EXT_ZKT:
|
||||
case KVM_RISCV_ISA_EXT_ZTSO:
|
||||
case KVM_RISCV_ISA_EXT_ZVBB:
|
||||
case KVM_RISCV_ISA_EXT_ZVBC:
|
||||
case KVM_RISCV_ISA_EXT_ZVFH:
|
||||
|
@ -195,7 +195,6 @@ config S390
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KPROBES_ON_FTRACE
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_KVM
|
||||
select HAVE_LIVEPATCH
|
||||
select HAVE_MEMBLOCK_PHYS_MAP
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
|
@ -12,7 +12,320 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#define __KVM_S390
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
||||
struct kvm_s390_skeys {
|
||||
__u64 start_gfn;
|
||||
__u64 count;
|
||||
__u64 skeydata_addr;
|
||||
__u32 flags;
|
||||
__u32 reserved[9];
|
||||
};
|
||||
|
||||
#define KVM_S390_CMMA_PEEK (1 << 0)
|
||||
|
||||
/**
|
||||
* kvm_s390_cmma_log - Used for CMMA migration.
|
||||
*
|
||||
* Used both for input and output.
|
||||
*
|
||||
* @start_gfn: Guest page number to start from.
|
||||
* @count: Size of the result buffer.
|
||||
* @flags: Control operation mode via KVM_S390_CMMA_* flags
|
||||
* @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
|
||||
* pages are still remaining.
|
||||
* @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
|
||||
* in the PGSTE.
|
||||
* @values: Pointer to the values buffer.
|
||||
*
|
||||
* Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
|
||||
*/
|
||||
struct kvm_s390_cmma_log {
|
||||
__u64 start_gfn;
|
||||
__u32 count;
|
||||
__u32 flags;
|
||||
union {
|
||||
__u64 remaining;
|
||||
__u64 mask;
|
||||
};
|
||||
__u64 values;
|
||||
};
|
||||
|
||||
#define KVM_S390_RESET_POR 1
|
||||
#define KVM_S390_RESET_CLEAR 2
|
||||
#define KVM_S390_RESET_SUBSYSTEM 4
|
||||
#define KVM_S390_RESET_CPU_INIT 8
|
||||
#define KVM_S390_RESET_IPL 16
|
||||
|
||||
/* for KVM_S390_MEM_OP */
|
||||
struct kvm_s390_mem_op {
|
||||
/* in */
|
||||
__u64 gaddr; /* the guest address */
|
||||
__u64 flags; /* flags */
|
||||
__u32 size; /* amount of bytes */
|
||||
__u32 op; /* type of operation */
|
||||
__u64 buf; /* buffer in userspace */
|
||||
union {
|
||||
struct {
|
||||
__u8 ar; /* the access register number */
|
||||
__u8 key; /* access key, ignored if flag unset */
|
||||
__u8 pad1[6]; /* ignored */
|
||||
__u64 old_addr; /* ignored if cmpxchg flag unset */
|
||||
};
|
||||
__u32 sida_offset; /* offset into the sida */
|
||||
__u8 reserved[32]; /* ignored */
|
||||
};
|
||||
};
|
||||
/* types for kvm_s390_mem_op->op */
|
||||
#define KVM_S390_MEMOP_LOGICAL_READ 0
|
||||
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
|
||||
#define KVM_S390_MEMOP_SIDA_READ 2
|
||||
#define KVM_S390_MEMOP_SIDA_WRITE 3
|
||||
#define KVM_S390_MEMOP_ABSOLUTE_READ 4
|
||||
#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
|
||||
#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
|
||||
|
||||
/* flags for kvm_s390_mem_op->flags */
|
||||
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
|
||||
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
|
||||
#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
|
||||
|
||||
/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
|
||||
#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
|
||||
#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
|
||||
|
||||
struct kvm_s390_psw {
|
||||
__u64 mask;
|
||||
__u64 addr;
|
||||
};
|
||||
|
||||
/* valid values for type in kvm_s390_interrupt */
|
||||
#define KVM_S390_SIGP_STOP 0xfffe0000u
|
||||
#define KVM_S390_PROGRAM_INT 0xfffe0001u
|
||||
#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
|
||||
#define KVM_S390_RESTART 0xfffe0003u
|
||||
#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
|
||||
#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
|
||||
#define KVM_S390_MCHK 0xfffe1000u
|
||||
#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
|
||||
#define KVM_S390_INT_CPU_TIMER 0xffff1005u
|
||||
#define KVM_S390_INT_VIRTIO 0xffff2603u
|
||||
#define KVM_S390_INT_SERVICE 0xffff2401u
|
||||
#define KVM_S390_INT_EMERGENCY 0xffff1201u
|
||||
#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
|
||||
/* Anything below 0xfffe0000u is taken by INT_IO */
|
||||
#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
|
||||
(((schid)) | \
|
||||
((ssid) << 16) | \
|
||||
((cssid) << 18) | \
|
||||
((ai) << 26))
|
||||
#define KVM_S390_INT_IO_MIN 0x00000000u
|
||||
#define KVM_S390_INT_IO_MAX 0xfffdffffu
|
||||
#define KVM_S390_INT_IO_AI_MASK 0x04000000u
|
||||
|
||||
|
||||
struct kvm_s390_interrupt {
|
||||
__u32 type;
|
||||
__u32 parm;
|
||||
__u64 parm64;
|
||||
};
|
||||
|
||||
struct kvm_s390_io_info {
|
||||
__u16 subchannel_id;
|
||||
__u16 subchannel_nr;
|
||||
__u32 io_int_parm;
|
||||
__u32 io_int_word;
|
||||
};
|
||||
|
||||
struct kvm_s390_ext_info {
|
||||
__u32 ext_params;
|
||||
__u32 pad;
|
||||
__u64 ext_params2;
|
||||
};
|
||||
|
||||
struct kvm_s390_pgm_info {
|
||||
__u64 trans_exc_code;
|
||||
__u64 mon_code;
|
||||
__u64 per_address;
|
||||
__u32 data_exc_code;
|
||||
__u16 code;
|
||||
__u16 mon_class_nr;
|
||||
__u8 per_code;
|
||||
__u8 per_atmid;
|
||||
__u8 exc_access_id;
|
||||
__u8 per_access_id;
|
||||
__u8 op_access_id;
|
||||
#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
|
||||
#define KVM_S390_PGM_FLAGS_ILC_0 0x02
|
||||
#define KVM_S390_PGM_FLAGS_ILC_1 0x04
|
||||
#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
|
||||
#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
|
||||
__u8 flags;
|
||||
__u8 pad[2];
|
||||
};
|
||||
|
||||
struct kvm_s390_prefix_info {
|
||||
__u32 address;
|
||||
};
|
||||
|
||||
struct kvm_s390_extcall_info {
|
||||
__u16 code;
|
||||
};
|
||||
|
||||
struct kvm_s390_emerg_info {
|
||||
__u16 code;
|
||||
};
|
||||
|
||||
#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
|
||||
struct kvm_s390_stop_info {
|
||||
__u32 flags;
|
||||
};
|
||||
|
||||
struct kvm_s390_mchk_info {
|
||||
__u64 cr14;
|
||||
__u64 mcic;
|
||||
__u64 failing_storage_address;
|
||||
__u32 ext_damage_code;
|
||||
__u32 pad;
|
||||
__u8 fixed_logout[16];
|
||||
};
|
||||
|
||||
struct kvm_s390_irq {
|
||||
__u64 type;
|
||||
union {
|
||||
struct kvm_s390_io_info io;
|
||||
struct kvm_s390_ext_info ext;
|
||||
struct kvm_s390_pgm_info pgm;
|
||||
struct kvm_s390_emerg_info emerg;
|
||||
struct kvm_s390_extcall_info extcall;
|
||||
struct kvm_s390_prefix_info prefix;
|
||||
struct kvm_s390_stop_info stop;
|
||||
struct kvm_s390_mchk_info mchk;
|
||||
char reserved[64];
|
||||
} u;
|
||||
};
|
||||
|
||||
struct kvm_s390_irq_state {
|
||||
__u64 buf;
|
||||
__u32 flags; /* will stay unused for compatibility reasons */
|
||||
__u32 len;
|
||||
__u32 reserved[4]; /* will stay unused for compatibility reasons */
|
||||
};
|
||||
|
||||
struct kvm_s390_ucas_mapping {
|
||||
__u64 user_addr;
|
||||
__u64 vcpu_addr;
|
||||
__u64 length;
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_sec_parm {
|
||||
__u64 origin;
|
||||
__u64 length;
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_unp {
|
||||
__u64 addr;
|
||||
__u64 size;
|
||||
__u64 tweak;
|
||||
};
|
||||
|
||||
enum pv_cmd_dmp_id {
|
||||
KVM_PV_DUMP_INIT,
|
||||
KVM_PV_DUMP_CONFIG_STOR_STATE,
|
||||
KVM_PV_DUMP_COMPLETE,
|
||||
KVM_PV_DUMP_CPU,
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_dmp {
|
||||
__u64 subcmd;
|
||||
__u64 buff_addr;
|
||||
__u64 buff_len;
|
||||
__u64 gaddr; /* For dump storage state */
|
||||
__u64 reserved[4];
|
||||
};
|
||||
|
||||
enum pv_cmd_info_id {
|
||||
KVM_PV_INFO_VM,
|
||||
KVM_PV_INFO_DUMP,
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_info_dump {
|
||||
__u64 dump_cpu_buffer_len;
|
||||
__u64 dump_config_mem_buffer_per_1m;
|
||||
__u64 dump_config_finalize_len;
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_info_vm {
|
||||
__u64 inst_calls_list[4];
|
||||
__u64 max_cpus;
|
||||
__u64 max_guests;
|
||||
__u64 max_guest_addr;
|
||||
__u64 feature_indication;
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_info_header {
|
||||
__u32 id;
|
||||
__u32 len_max;
|
||||
__u32 len_written;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_info {
|
||||
struct kvm_s390_pv_info_header header;
|
||||
union {
|
||||
struct kvm_s390_pv_info_dump dump;
|
||||
struct kvm_s390_pv_info_vm vm;
|
||||
};
|
||||
};
|
||||
|
||||
enum pv_cmd_id {
|
||||
KVM_PV_ENABLE,
|
||||
KVM_PV_DISABLE,
|
||||
KVM_PV_SET_SEC_PARMS,
|
||||
KVM_PV_UNPACK,
|
||||
KVM_PV_VERIFY,
|
||||
KVM_PV_PREP_RESET,
|
||||
KVM_PV_UNSHARE_ALL,
|
||||
KVM_PV_INFO,
|
||||
KVM_PV_DUMP,
|
||||
KVM_PV_ASYNC_CLEANUP_PREPARE,
|
||||
KVM_PV_ASYNC_CLEANUP_PERFORM,
|
||||
};
|
||||
|
||||
struct kvm_pv_cmd {
|
||||
__u32 cmd; /* Command to be executed */
|
||||
__u16 rc; /* Ultravisor return code */
|
||||
__u16 rrc; /* Ultravisor return reason code */
|
||||
__u64 data; /* Data or address */
|
||||
__u32 flags; /* flags for future extensions. Must be 0 for now */
|
||||
__u32 reserved[3];
|
||||
};
|
||||
|
||||
struct kvm_s390_zpci_op {
|
||||
/* in */
|
||||
__u32 fh; /* target device */
|
||||
__u8 op; /* operation to perform */
|
||||
__u8 pad[3];
|
||||
union {
|
||||
/* for KVM_S390_ZPCIOP_REG_AEN */
|
||||
struct {
|
||||
__u64 ibv; /* Guest addr of interrupt bit vector */
|
||||
__u64 sb; /* Guest addr of summary bit */
|
||||
__u32 flags;
|
||||
__u32 noi; /* Number of interrupts */
|
||||
__u8 isc; /* Guest interrupt subclass */
|
||||
__u8 sbo; /* Offset of guest summary bit vector */
|
||||
__u16 pad;
|
||||
} reg_aen;
|
||||
__u64 reserved[8];
|
||||
} u;
|
||||
};
|
||||
|
||||
/* types for kvm_s390_zpci_op->op */
|
||||
#define KVM_S390_ZPCIOP_REG_AEN 0
|
||||
#define KVM_S390_ZPCIOP_DEREG_AEN 1
|
||||
|
||||
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
|
||||
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
|
||||
|
||||
/* Device control API: s390-specific devices */
|
||||
#define KVM_DEV_FLIC_GET_ALL_IRQS 1
|
||||
|
@ -19,7 +19,6 @@ if VIRTUALIZATION
|
||||
config KVM
|
||||
def_tristate y
|
||||
prompt "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_VCPU_ASYNC_IOCTL
|
||||
select KVM_ASYNC_PF
|
||||
|
@ -102,7 +102,7 @@ static int __diag_page_ref_service(struct kvm_vcpu *vcpu)
|
||||
parm.token_addr & 7 || parm.zarch != 0x8000000000000000ULL)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, parm.token_addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, parm.token_addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
vcpu->arch.pfault_token = parm.token_addr;
|
||||
|
@ -665,7 +665,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION1: {
|
||||
union region1_table_entry rfte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rfte.val))
|
||||
return -EFAULT;
|
||||
@ -683,7 +683,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION2: {
|
||||
union region2_table_entry rste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rste.val))
|
||||
return -EFAULT;
|
||||
@ -701,7 +701,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_REGION3: {
|
||||
union region3_table_entry rtte;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &rtte.val))
|
||||
return -EFAULT;
|
||||
@ -729,7 +729,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
case ASCE_TYPE_SEGMENT: {
|
||||
union segment_table_entry ste;
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &ste.val))
|
||||
return -EFAULT;
|
||||
@ -749,7 +749,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
ptr = ste.fc0.pto * (PAGE_SIZE / 2) + vaddr.px * 8;
|
||||
}
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, ptr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, ptr))
|
||||
return PGM_ADDRESSING;
|
||||
if (deref_table(vcpu->kvm, ptr, &pte.val))
|
||||
return -EFAULT;
|
||||
@ -771,7 +771,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
*prot = PROT_TYPE_IEP;
|
||||
return PGM_PROTECTION;
|
||||
}
|
||||
if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, raddr.addr))
|
||||
return PGM_ADDRESSING;
|
||||
*gpa = raddr.addr;
|
||||
return 0;
|
||||
@ -958,7 +958,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
return rc;
|
||||
} else {
|
||||
gpa = kvm_s390_real_to_abs(vcpu, ga);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, gpa)) {
|
||||
rc = PGM_ADDRESSING;
|
||||
prot = PROT_NONE;
|
||||
}
|
||||
|
@ -1031,7 +1031,7 @@ static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
ext = fi->srv_signal;
|
||||
/* only clear the event bit */
|
||||
/* only clear the event bits */
|
||||
fi->srv_signal.ext_params &= ~SCCB_EVENT_PENDING;
|
||||
clear_bit(IRQ_PEND_EXT_SERVICE_EV, &fi->pending_irqs);
|
||||
spin_unlock(&fi->lock);
|
||||
@ -1041,7 +1041,7 @@ static int __must_check __deliver_service_ev(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
|
||||
ext.ext_params, 0);
|
||||
|
||||
return write_sclp(vcpu, SCCB_EVENT_PENDING);
|
||||
return write_sclp(vcpu, ext.ext_params & SCCB_EVENT_PENDING);
|
||||
}
|
||||
|
||||
static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
|
||||
|
@ -2878,7 +2878,7 @@ static int kvm_s390_vm_mem_op_abs(struct kvm *kvm, struct kvm_s390_mem_op *mop)
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
|
||||
if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
|
||||
r = PGM_ADDRESSING;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -2940,7 +2940,7 @@ static int kvm_s390_vm_mem_op_cmpxchg(struct kvm *kvm, struct kvm_s390_mem_op *m
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (kvm_is_error_gpa(kvm, mop->gaddr)) {
|
||||
if (!kvm_is_gpa_in_memslot(kvm, mop->gaddr)) {
|
||||
r = PGM_ADDRESSING;
|
||||
goto out_unlock;
|
||||
}
|
||||
@ -3153,7 +3153,7 @@ static int kvm_s390_apxa_installed(void)
|
||||
*/
|
||||
static void kvm_s390_set_crycb_format(struct kvm *kvm)
|
||||
{
|
||||
kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
|
||||
kvm->arch.crypto.crycbd = virt_to_phys(kvm->arch.crypto.crycb);
|
||||
|
||||
/* Clear the CRYCB format bits - i.e., set format 0 by default */
|
||||
kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
|
||||
|
@ -149,7 +149,7 @@ static int handle_set_prefix(struct kvm_vcpu *vcpu)
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
if (kvm_is_error_gpa(vcpu->kvm, address))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, address))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
|
||||
kvm_s390_set_prefix(vcpu, address);
|
||||
@ -464,7 +464,7 @@ static int handle_test_block(struct kvm_vcpu *vcpu)
|
||||
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
||||
addr = kvm_s390_real_to_abs(vcpu, addr);
|
||||
|
||||
if (kvm_is_error_gpa(vcpu->kvm, addr))
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, addr))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
||||
/*
|
||||
* We don't expect errors on modern systems, and do not care
|
||||
|
@ -172,7 +172,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, struct kvm_vcpu *dst_vcpu,
|
||||
* first page, since address is 8k aligned and memory pieces are always
|
||||
* at least 1MB aligned and have at least a size of 1MB.
|
||||
*/
|
||||
if (kvm_is_error_gpa(vcpu->kvm, irq.u.prefix.address)) {
|
||||
if (!kvm_is_gpa_in_memslot(vcpu->kvm, irq.u.prefix.address)) {
|
||||
*reg &= 0xffffffff00000000UL;
|
||||
*reg |= SIGP_STATUS_INVALID_PARAMETER;
|
||||
return SIGP_CC_STATUS_STORED;
|
||||
|
@ -245,7 +245,6 @@ config X86
|
||||
select HAVE_FUNCTION_ERROR_INJECTION
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_RETHOOK
|
||||
select HAVE_KVM
|
||||
select HAVE_LIVEPATCH if X86_64
|
||||
select HAVE_MIXED_BREAKPOINTS_REGS
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
|
@ -15,7 +15,7 @@ typedef struct {
|
||||
unsigned int irq_spurious_count;
|
||||
unsigned int icr_read_retry_count;
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
unsigned int kvm_posted_intr_ipis;
|
||||
unsigned int kvm_posted_intr_wakeup_ipis;
|
||||
unsigned int kvm_posted_intr_nested_ipis;
|
||||
|
@ -741,7 +741,7 @@ DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR, sysvec_irq_work);
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR, sysvec_kvm_posted_intr_ipi);
|
||||
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR, sysvec_kvm_posted_intr_wakeup_ipi);
|
||||
DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR, sysvec_kvm_posted_intr_nested_ipi);
|
||||
|
@ -29,7 +29,7 @@ struct irq_desc;
|
||||
|
||||
extern void fixup_irqs(void);
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
|
||||
#endif
|
||||
|
||||
|
@ -84,11 +84,9 @@
|
||||
#define HYPERVISOR_CALLBACK_VECTOR 0xf3
|
||||
|
||||
/* Vector for KVM to deliver posted interrupt IPI */
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
#define POSTED_INTR_VECTOR 0xf2
|
||||
#define POSTED_INTR_WAKEUP_VECTOR 0xf1
|
||||
#define POSTED_INTR_NESTED_VECTOR 0xf0
|
||||
#endif
|
||||
|
||||
#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef
|
||||
|
||||
|
@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier)
|
||||
KVM_X86_OP(get_exit_info)
|
||||
KVM_X86_OP(check_intercept)
|
||||
KVM_X86_OP(handle_exit_irqoff)
|
||||
KVM_X86_OP(request_immediate_exit)
|
||||
KVM_X86_OP(sched_in)
|
||||
KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
|
||||
KVM_X86_OP_OPTIONAL(vcpu_blocking)
|
||||
|
@ -12,11 +12,9 @@ BUILD_BUG_ON(1)
|
||||
* a NULL definition, for example if "static_call_cond()" will be used
|
||||
* at the call sites.
|
||||
*/
|
||||
KVM_X86_PMU_OP(hw_event_available)
|
||||
KVM_X86_PMU_OP(pmc_idx_to_pmc)
|
||||
KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
|
||||
KVM_X86_PMU_OP(msr_idx_to_pmc)
|
||||
KVM_X86_PMU_OP(is_valid_rdpmc_ecx)
|
||||
KVM_X86_PMU_OP_OPTIONAL(check_rdpmc_early)
|
||||
KVM_X86_PMU_OP(is_valid_msr)
|
||||
KVM_X86_PMU_OP(get_msr)
|
||||
KVM_X86_PMU_OP(set_msr)
|
||||
|
@ -536,6 +536,7 @@ struct kvm_pmc {
|
||||
#define KVM_PMC_MAX_FIXED 3
|
||||
#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1)
|
||||
#define KVM_AMD_PMC_MAX_GENERIC 6
|
||||
|
||||
struct kvm_pmu {
|
||||
u8 version;
|
||||
unsigned nr_arch_gp_counters;
|
||||
@ -1468,6 +1469,15 @@ struct kvm_arch {
|
||||
*/
|
||||
bool shadow_root_allocated;
|
||||
|
||||
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
|
||||
/*
|
||||
* If set, the VM has (or had) an external write tracking user, and
|
||||
* thus all write tracking metadata has been allocated, even if KVM
|
||||
* itself isn't using write tracking.
|
||||
*/
|
||||
bool external_write_tracking_enabled;
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
hpa_t hv_root_tdp;
|
||||
spinlock_t hv_root_tdp_lock;
|
||||
@ -1665,7 +1675,8 @@ struct kvm_x86_ops {
|
||||
void (*flush_tlb_guest)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
|
||||
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu);
|
||||
enum exit_fastpath_completion (*vcpu_run)(struct kvm_vcpu *vcpu,
|
||||
bool force_immediate_exit);
|
||||
int (*handle_exit)(struct kvm_vcpu *vcpu,
|
||||
enum exit_fastpath_completion exit_fastpath);
|
||||
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||
@ -1733,8 +1744,6 @@ struct kvm_x86_ops {
|
||||
struct x86_exception *exception);
|
||||
void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
|
||||
|
||||
/*
|
||||
@ -1882,8 +1891,16 @@ static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn,
|
||||
}
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
enum kvm_intr_type {
|
||||
/* Values are arbitrary, but must be non-zero. */
|
||||
KVM_HANDLING_IRQ = 1,
|
||||
KVM_HANDLING_NMI,
|
||||
};
|
||||
|
||||
/* Enable perf NMI and timer modes to work, and minimise false positives. */
|
||||
#define kvm_arch_pmi_in_guest(vcpu) \
|
||||
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
|
||||
((vcpu) && (vcpu)->arch.handling_intr_from_guest && \
|
||||
(!!in_nmi() == ((vcpu)->arch.handling_intr_from_guest == KVM_HANDLING_NMI)))
|
||||
|
||||
void __init kvm_mmu_x86_module_init(void);
|
||||
int kvm_mmu_vendor_module_init(void);
|
||||
@ -2048,7 +2065,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
|
||||
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
|
||||
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
|
||||
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
|
||||
void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
|
||||
unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr);
|
||||
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
|
||||
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
|
||||
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
|
||||
@ -2241,7 +2258,6 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
|
||||
|
||||
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
|
||||
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
|
||||
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
|
||||
u32 size);
|
||||
|
@ -358,10 +358,10 @@ struct sev_es_save_area {
|
||||
struct vmcb_seg ldtr;
|
||||
struct vmcb_seg idtr;
|
||||
struct vmcb_seg tr;
|
||||
u64 vmpl0_ssp;
|
||||
u64 vmpl1_ssp;
|
||||
u64 vmpl2_ssp;
|
||||
u64 vmpl3_ssp;
|
||||
u64 pl0_ssp;
|
||||
u64 pl1_ssp;
|
||||
u64 pl2_ssp;
|
||||
u64 pl3_ssp;
|
||||
u64 u_cet;
|
||||
u8 reserved_0xc8[2];
|
||||
u8 vmpl;
|
||||
|
@ -25,6 +25,7 @@
|
||||
#define VMX_FEATURE_EPT_EXECUTE_ONLY ( 0*32+ 17) /* "ept_x_only" EPT entries can be execute only */
|
||||
#define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* EPT Accessed/Dirty bits */
|
||||
#define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* 1GB EPT pages */
|
||||
#define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* 5-level EPT paging */
|
||||
|
||||
/* Aggregated APIC features 24-27 */
|
||||
#define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* TPR shadow + virt APIC */
|
||||
|
@ -7,6 +7,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/stddef.h>
|
||||
@ -40,7 +42,6 @@
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_MSI
|
||||
#define __KVM_HAVE_USER_NMI
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_MSIX
|
||||
#define __KVM_HAVE_MCE
|
||||
#define __KVM_HAVE_PIT_STATE2
|
||||
@ -49,7 +50,6 @@
|
||||
#define __KVM_HAVE_DEBUGREGS
|
||||
#define __KVM_HAVE_XSAVE
|
||||
#define __KVM_HAVE_XCRS
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
/* Architectural interrupt line count. */
|
||||
#define KVM_NR_INTERRUPTS 256
|
||||
@ -526,9 +526,278 @@ struct kvm_pmu_event_filter {
|
||||
#define KVM_PMU_EVENT_ALLOW 0
|
||||
#define KVM_PMU_EVENT_DENY 1
|
||||
|
||||
#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0)
|
||||
#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0)
|
||||
#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
|
||||
|
||||
/* for KVM_CAP_MCE */
|
||||
struct kvm_x86_mce {
|
||||
__u64 status;
|
||||
__u64 addr;
|
||||
__u64 misc;
|
||||
__u64 mcg_status;
|
||||
__u8 bank;
|
||||
__u8 pad1[7];
|
||||
__u64 pad2[3];
|
||||
};
|
||||
|
||||
/* for KVM_CAP_XEN_HVM */
|
||||
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
|
||||
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
|
||||
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
|
||||
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
|
||||
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
|
||||
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
|
||||
#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
|
||||
|
||||
struct kvm_xen_hvm_config {
|
||||
__u32 flags;
|
||||
__u32 msr;
|
||||
__u64 blob_addr_32;
|
||||
__u64 blob_addr_64;
|
||||
__u8 blob_size_32;
|
||||
__u8 blob_size_64;
|
||||
__u8 pad2[30];
|
||||
};
|
||||
|
||||
struct kvm_xen_hvm_attr {
|
||||
__u16 type;
|
||||
__u16 pad[3];
|
||||
union {
|
||||
__u8 long_mode;
|
||||
__u8 vector;
|
||||
__u8 runstate_update_flag;
|
||||
union {
|
||||
__u64 gfn;
|
||||
#define KVM_XEN_INVALID_GFN ((__u64)-1)
|
||||
__u64 hva;
|
||||
} shared_info;
|
||||
struct {
|
||||
__u32 send_port;
|
||||
__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
|
||||
__u32 flags;
|
||||
#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
|
||||
#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
|
||||
#define KVM_XEN_EVTCHN_RESET (1 << 2)
|
||||
/*
|
||||
* Events sent by the guest are either looped back to
|
||||
* the guest itself (potentially on a different port#)
|
||||
* or signalled via an eventfd.
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
__u32 port;
|
||||
__u32 vcpu;
|
||||
__u32 priority;
|
||||
} port;
|
||||
struct {
|
||||
__u32 port; /* Zero for eventfd */
|
||||
__s32 fd;
|
||||
} eventfd;
|
||||
__u32 padding[4];
|
||||
} deliver;
|
||||
} evtchn;
|
||||
__u32 xen_version;
|
||||
__u64 pad[8];
|
||||
} u;
|
||||
};
|
||||
|
||||
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
|
||||
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
|
||||
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
|
||||
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
|
||||
#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
|
||||
#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
|
||||
#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
|
||||
#define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6
|
||||
|
||||
struct kvm_xen_vcpu_attr {
|
||||
__u16 type;
|
||||
__u16 pad[3];
|
||||
union {
|
||||
__u64 gpa;
|
||||
#define KVM_XEN_INVALID_GPA ((__u64)-1)
|
||||
__u64 hva;
|
||||
__u64 pad[8];
|
||||
struct {
|
||||
__u64 state;
|
||||
__u64 state_entry_time;
|
||||
__u64 time_running;
|
||||
__u64 time_runnable;
|
||||
__u64 time_blocked;
|
||||
__u64 time_offline;
|
||||
} runstate;
|
||||
__u32 vcpu_id;
|
||||
struct {
|
||||
__u32 port;
|
||||
__u32 priority;
|
||||
__u64 expires_ns;
|
||||
} timer;
|
||||
__u8 vector;
|
||||
} u;
|
||||
};
|
||||
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
|
||||
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
|
||||
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9
|
||||
|
||||
/* Secure Encrypted Virtualization command */
|
||||
enum sev_cmd_id {
|
||||
/* Guest initialization commands */
|
||||
KVM_SEV_INIT = 0,
|
||||
KVM_SEV_ES_INIT,
|
||||
/* Guest launch commands */
|
||||
KVM_SEV_LAUNCH_START,
|
||||
KVM_SEV_LAUNCH_UPDATE_DATA,
|
||||
KVM_SEV_LAUNCH_UPDATE_VMSA,
|
||||
KVM_SEV_LAUNCH_SECRET,
|
||||
KVM_SEV_LAUNCH_MEASURE,
|
||||
KVM_SEV_LAUNCH_FINISH,
|
||||
/* Guest migration commands (outgoing) */
|
||||
KVM_SEV_SEND_START,
|
||||
KVM_SEV_SEND_UPDATE_DATA,
|
||||
KVM_SEV_SEND_UPDATE_VMSA,
|
||||
KVM_SEV_SEND_FINISH,
|
||||
/* Guest migration commands (incoming) */
|
||||
KVM_SEV_RECEIVE_START,
|
||||
KVM_SEV_RECEIVE_UPDATE_DATA,
|
||||
KVM_SEV_RECEIVE_UPDATE_VMSA,
|
||||
KVM_SEV_RECEIVE_FINISH,
|
||||
/* Guest status and debug commands */
|
||||
KVM_SEV_GUEST_STATUS,
|
||||
KVM_SEV_DBG_DECRYPT,
|
||||
KVM_SEV_DBG_ENCRYPT,
|
||||
/* Guest certificates commands */
|
||||
KVM_SEV_CERT_EXPORT,
|
||||
/* Attestation report */
|
||||
KVM_SEV_GET_ATTESTATION_REPORT,
|
||||
/* Guest Migration Extension */
|
||||
KVM_SEV_SEND_CANCEL,
|
||||
|
||||
KVM_SEV_NR_MAX,
|
||||
};
|
||||
|
||||
struct kvm_sev_cmd {
|
||||
__u32 id;
|
||||
__u64 data;
|
||||
__u32 error;
|
||||
__u32 sev_fd;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_start {
|
||||
__u32 handle;
|
||||
__u32 policy;
|
||||
__u64 dh_uaddr;
|
||||
__u32 dh_len;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_update_data {
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
};
|
||||
|
||||
|
||||
struct kvm_sev_launch_secret {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_measure {
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
};
|
||||
|
||||
struct kvm_sev_guest_status {
|
||||
__u32 handle;
|
||||
__u32 policy;
|
||||
__u32 state;
|
||||
};
|
||||
|
||||
struct kvm_sev_dbg {
|
||||
__u64 src_uaddr;
|
||||
__u64 dst_uaddr;
|
||||
__u32 len;
|
||||
};
|
||||
|
||||
struct kvm_sev_attestation_report {
|
||||
__u8 mnonce[16];
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
};
|
||||
|
||||
struct kvm_sev_send_start {
|
||||
__u32 policy;
|
||||
__u64 pdh_cert_uaddr;
|
||||
__u32 pdh_cert_len;
|
||||
__u64 plat_certs_uaddr;
|
||||
__u32 plat_certs_len;
|
||||
__u64 amd_certs_uaddr;
|
||||
__u32 amd_certs_len;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
};
|
||||
|
||||
struct kvm_sev_send_update_data {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
};
|
||||
|
||||
struct kvm_sev_receive_start {
|
||||
__u32 handle;
|
||||
__u32 policy;
|
||||
__u64 pdh_uaddr;
|
||||
__u32 pdh_len;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
};
|
||||
|
||||
struct kvm_sev_receive_update_data {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
};
|
||||
|
||||
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
|
||||
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
|
||||
|
||||
struct kvm_hyperv_eventfd {
|
||||
__u32 conn_id;
|
||||
__s32 fd;
|
||||
__u32 flags;
|
||||
__u32 padding[3];
|
||||
};
|
||||
|
||||
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
|
||||
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
|
||||
|
||||
/*
|
||||
* Masked event layout.
|
||||
* Bits Description
|
||||
@ -549,10 +818,10 @@ struct kvm_pmu_event_filter {
|
||||
((__u64)(!!(exclude)) << 55))
|
||||
|
||||
#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \
|
||||
(GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32))
|
||||
#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56))
|
||||
#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8))
|
||||
#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55))
|
||||
(__GENMASK_ULL(7, 0) | __GENMASK_ULL(35, 32))
|
||||
#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (__GENMASK_ULL(63, 56))
|
||||
#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (__GENMASK_ULL(15, 8))
|
||||
#define KVM_PMU_MASKED_ENTRY_EXCLUDE (_BITULL(55))
|
||||
#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56)
|
||||
|
||||
/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
|
||||
@ -560,7 +829,7 @@ struct kvm_pmu_event_filter {
|
||||
#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
|
||||
|
||||
/* x86-specific KVM_EXIT_HYPERCALL flags. */
|
||||
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0)
|
||||
#define KVM_EXIT_HYPERCALL_LONG_MODE _BITULL(0)
|
||||
|
||||
#define KVM_X86_DEFAULT_VM 0
|
||||
#define KVM_X86_SW_PROTECTED_VM 1
|
||||
|
@ -92,7 +92,7 @@ struct kvm_clock_pairing {
|
||||
#define KVM_ASYNC_PF_DELIVERY_AS_INT (1 << 3)
|
||||
|
||||
/* MSR_KVM_ASYNC_PF_INT */
|
||||
#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
|
||||
#define KVM_ASYNC_PF_VEC_MASK __GENMASK(7, 0)
|
||||
|
||||
/* MSR_KVM_MIGRATION_CONTROL */
|
||||
#define KVM_MIGRATION_READY (1 << 0)
|
||||
|
@ -72,6 +72,8 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c)
|
||||
c->vmx_capability[MISC_FEATURES] |= VMX_F(EPT_AD);
|
||||
if (ept & VMX_EPT_1GB_PAGE_BIT)
|
||||
c->vmx_capability[MISC_FEATURES] |= VMX_F(EPT_1GB);
|
||||
if (ept & VMX_EPT_PAGE_WALK_5_BIT)
|
||||
c->vmx_capability[MISC_FEATURES] |= VMX_F(EPT_5LEVEL);
|
||||
|
||||
/* Synthetic APIC features that are aggregates of multiple features. */
|
||||
if ((c->vmx_capability[PRIMARY_CTLS] & VMX_F(VIRTUAL_TPR)) &&
|
||||
|
@ -153,7 +153,7 @@ static const __initconst struct idt_data apic_idts[] = {
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
INTG(LOCAL_TIMER_VECTOR, asm_sysvec_apic_timer_interrupt),
|
||||
INTG(X86_PLATFORM_IPI_VECTOR, asm_sysvec_x86_platform_ipi),
|
||||
# ifdef CONFIG_HAVE_KVM
|
||||
# if IS_ENABLED(CONFIG_KVM)
|
||||
INTG(POSTED_INTR_VECTOR, asm_sysvec_kvm_posted_intr_ipi),
|
||||
INTG(POSTED_INTR_WAKEUP_VECTOR, asm_sysvec_kvm_posted_intr_wakeup_ipi),
|
||||
INTG(POSTED_INTR_NESTED_VECTOR, asm_sysvec_kvm_posted_intr_nested_ipi),
|
||||
|
@ -164,7 +164,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
#if defined(CONFIG_X86_IO_APIC)
|
||||
seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
|
||||
#endif
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
seq_printf(p, "%*s: ", prec, "PIN");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
|
||||
@ -290,7 +290,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
static void dummy_handler(void) {}
|
||||
static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
|
||||
|
||||
|
@ -7,7 +7,6 @@ source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
depends on HAVE_KVM || X86
|
||||
default y
|
||||
help
|
||||
Say Y here to get to see options for using your Linux host to run other
|
||||
@ -20,7 +19,6 @@ if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
depends on HIGH_RES_TIMERS
|
||||
depends on X86_LOCAL_APIC
|
||||
select KVM_COMMON
|
||||
@ -29,9 +27,9 @@ config KVM
|
||||
select HAVE_KVM_PFNCACHE
|
||||
select HAVE_KVM_DIRTY_RING_TSO
|
||||
select HAVE_KVM_DIRTY_RING_ACQ_REL
|
||||
select IRQ_BYPASS_MANAGER
|
||||
select HAVE_KVM_IRQ_BYPASS
|
||||
select HAVE_KVM_IRQ_ROUTING
|
||||
select HAVE_KVM_READONLY_MEM
|
||||
select KVM_ASYNC_PF
|
||||
select USER_RETURN_NOTIFIER
|
||||
select KVM_MMIO
|
||||
|
@ -189,9 +189,8 @@ static const struct file_operations mmu_rmaps_stat_fops = {
|
||||
.release = kvm_mmu_rmaps_stat_release,
|
||||
};
|
||||
|
||||
int kvm_arch_create_vm_debugfs(struct kvm *kvm)
|
||||
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
|
||||
{
|
||||
debugfs_create_file("mmu_rmaps_stat", 0644, kvm->debugfs_dentry, kvm,
|
||||
&mmu_rmaps_stat_fops);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1820,22 +1820,22 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
|
||||
static int emulate_push(struct x86_emulate_ctxt *ctxt, const void *data, int len)
|
||||
{
|
||||
struct segmented_address addr;
|
||||
|
||||
rsp_increment(ctxt, -bytes);
|
||||
rsp_increment(ctxt, -len);
|
||||
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
|
||||
addr.seg = VCPU_SREG_SS;
|
||||
|
||||
return segmented_write(ctxt, addr, data, bytes);
|
||||
return segmented_write(ctxt, addr, data, len);
|
||||
}
|
||||
|
||||
static int em_push(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
/* Disable writeback. */
|
||||
ctxt->dst.type = OP_NONE;
|
||||
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
|
||||
return emulate_push(ctxt, &ctxt->src.val, ctxt->op_bytes);
|
||||
}
|
||||
|
||||
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
|
||||
@ -1863,7 +1863,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
|
||||
void *dest, int len)
|
||||
{
|
||||
int rc;
|
||||
unsigned long val, change_mask;
|
||||
unsigned long val = 0;
|
||||
unsigned long change_mask;
|
||||
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> X86_EFLAGS_IOPL_BIT;
|
||||
int cpl = ctxt->ops->cpl(ctxt);
|
||||
|
||||
@ -1920,7 +1921,7 @@ static int em_enter(struct x86_emulate_ctxt *ctxt)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
rbp = reg_read(ctxt, VCPU_REGS_RBP);
|
||||
rc = push(ctxt, &rbp, stack_size(ctxt));
|
||||
rc = emulate_push(ctxt, &rbp, stack_size(ctxt));
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
|
||||
@ -1954,7 +1955,7 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
|
||||
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int seg = ctxt->src2.val;
|
||||
unsigned long selector;
|
||||
unsigned long selector = 0;
|
||||
int rc;
|
||||
|
||||
rc = emulate_pop(ctxt, &selector, 2);
|
||||
@ -2000,7 +2001,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc = X86EMUL_CONTINUE;
|
||||
int reg = VCPU_REGS_RDI;
|
||||
u32 val;
|
||||
u32 val = 0;
|
||||
|
||||
while (reg >= VCPU_REGS_RAX) {
|
||||
if (reg == VCPU_REGS_RSP) {
|
||||
@ -2229,7 +2230,7 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
|
||||
static int em_ret(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc;
|
||||
unsigned long eip;
|
||||
unsigned long eip = 0;
|
||||
|
||||
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
@ -2241,7 +2242,8 @@ static int em_ret(struct x86_emulate_ctxt *ctxt)
|
||||
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc;
|
||||
unsigned long eip, cs;
|
||||
unsigned long eip = 0;
|
||||
unsigned long cs = 0;
|
||||
int cpl = ctxt->ops->cpl(ctxt);
|
||||
struct desc_struct new_desc;
|
||||
|
||||
@ -3011,7 +3013,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
|
||||
ret = em_push(ctxt);
|
||||
}
|
||||
|
||||
ops->get_dr(ctxt, 7, &dr7);
|
||||
dr7 = ops->get_dr(ctxt, 7);
|
||||
ops->set_dr(ctxt, 7, dr7 & ~(DR_LOCAL_ENABLE_MASK | DR_LOCAL_SLOWDOWN));
|
||||
|
||||
return ret;
|
||||
@ -3184,7 +3186,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
|
||||
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int rc;
|
||||
unsigned long eip;
|
||||
unsigned long eip = 0;
|
||||
|
||||
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
@ -3866,15 +3868,6 @@ static int check_cr_access(struct x86_emulate_ctxt *ctxt)
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
unsigned long dr7;
|
||||
|
||||
ctxt->ops->get_dr(ctxt, 7, &dr7);
|
||||
|
||||
return dr7 & DR7_GD;
|
||||
}
|
||||
|
||||
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
int dr = ctxt->modrm_reg;
|
||||
@ -3887,10 +3880,10 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
|
||||
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
|
||||
return emulate_ud(ctxt);
|
||||
|
||||
if (check_dr7_gd(ctxt)) {
|
||||
if (ctxt->ops->get_dr(ctxt, 7) & DR7_GD) {
|
||||
ulong dr6;
|
||||
|
||||
ctxt->ops->get_dr(ctxt, 6, &dr6);
|
||||
dr6 = ctxt->ops->get_dr(ctxt, 6);
|
||||
dr6 &= ~DR_TRAP_BITS;
|
||||
dr6 |= DR6_BD | DR6_ACTIVE_LOW;
|
||||
ctxt->ops->set_dr(ctxt, 6, dr6);
|
||||
@ -3962,7 +3955,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
|
||||
* protected mode.
|
||||
*/
|
||||
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
|
||||
ctxt->ops->check_pmc(ctxt, rcx))
|
||||
ctxt->ops->check_rdpmc_early(ctxt, rcx))
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
@ -4505,11 +4498,11 @@ static const struct instr_dual instr_dual_0f_38_f1 = {
|
||||
};
|
||||
|
||||
static const struct gprefix three_byte_0f_38_f0 = {
|
||||
ID(0, &instr_dual_0f_38_f0), N, N, N
|
||||
ID(0, &instr_dual_0f_38_f0), ID(0, &instr_dual_0f_38_f0), N, N
|
||||
};
|
||||
|
||||
static const struct gprefix three_byte_0f_38_f1 = {
|
||||
ID(0, &instr_dual_0f_38_f1), N, N, N
|
||||
ID(0, &instr_dual_0f_38_f1), ID(0, &instr_dual_0f_38_f1), N, N
|
||||
};
|
||||
|
||||
/*
|
||||
@ -5449,7 +5442,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||||
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
|
||||
break;
|
||||
case 0x21: /* mov from dr to reg */
|
||||
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
|
||||
ctxt->dst.val = ops->get_dr(ctxt, ctxt->modrm_reg);
|
||||
break;
|
||||
case 0x40 ... 0x4f: /* cmov */
|
||||
if (test_cc(ctxt->b, ctxt->eflags))
|
||||
|
@ -203,12 +203,12 @@ struct x86_emulate_ops {
|
||||
ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
|
||||
int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
|
||||
int (*cpl)(struct x86_emulate_ctxt *ctxt);
|
||||
void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
|
||||
ulong (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr);
|
||||
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
|
||||
int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
|
||||
int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
|
||||
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
|
||||
int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
|
||||
int (*check_rdpmc_early)(struct x86_emulate_ctxt *ctxt, u32 pmc);
|
||||
int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
|
||||
void (*halt)(struct x86_emulate_ctxt *ctxt);
|
||||
void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "ioapic.h"
|
||||
#include "trace.h"
|
||||
#include "x86.h"
|
||||
#include "xen.h"
|
||||
#include "cpuid.h"
|
||||
#include "hyperv.h"
|
||||
#include "smm.h"
|
||||
@ -124,6 +125,9 @@ static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
|
||||
return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
|
||||
}
|
||||
|
||||
__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
|
||||
EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
|
||||
|
||||
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
|
||||
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
|
||||
|
||||
@ -499,8 +503,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
|
||||
}
|
||||
|
||||
/* Check if there are APF page ready requests pending */
|
||||
if (enabled)
|
||||
if (enabled) {
|
||||
kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
|
||||
kvm_xen_sw_enable_lapic(apic->vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
|
||||
@ -2466,8 +2472,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
|
||||
if (!vcpu->arch.apic)
|
||||
if (!vcpu->arch.apic) {
|
||||
static_branch_dec(&kvm_has_noapic_vcpu);
|
||||
return;
|
||||
}
|
||||
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
|
||||
@ -2809,6 +2817,11 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
||||
|
||||
ASSERT(vcpu != NULL);
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm)) {
|
||||
static_branch_inc(&kvm_has_noapic_vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
|
||||
if (!apic)
|
||||
goto nomem;
|
||||
@ -2847,6 +2860,21 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
|
||||
static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
|
||||
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
|
||||
|
||||
/*
|
||||
* Defer evaluating inhibits until the vCPU is first run, as this vCPU
|
||||
* will not get notified of any changes until this vCPU is visible to
|
||||
* other vCPUs (marked online and added to the set of vCPUs).
|
||||
*
|
||||
* Opportunistically mark APICv active as VMX in particularly is highly
|
||||
* unlikely to have inhibits. Ignore the current per-VM APICv state so
|
||||
* that vCPU creation is guaranteed to run with a deterministic value,
|
||||
* the request will ensure the vCPU gets the correct state before VM-Entry.
|
||||
*/
|
||||
if (enable_apicv) {
|
||||
apic->apicv_active = true;
|
||||
kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
nomem_free_apic:
|
||||
kfree(apic);
|
||||
|
@ -3575,10 +3575,14 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
|
||||
if (WARN_ON_ONCE(!sp))
|
||||
return;
|
||||
|
||||
if (is_tdp_mmu_page(sp))
|
||||
if (is_tdp_mmu_page(sp)) {
|
||||
lockdep_assert_held_read(&kvm->mmu_lock);
|
||||
kvm_tdp_mmu_put_root(kvm, sp);
|
||||
else if (!--sp->root_count && sp->role.invalid)
|
||||
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
||||
} else {
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
if (!--sp->root_count && sp->role.invalid)
|
||||
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
|
||||
}
|
||||
|
||||
*root_hpa = INVALID_PAGE;
|
||||
}
|
||||
@ -3587,6 +3591,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
|
||||
void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
ulong roots_to_free)
|
||||
{
|
||||
bool is_tdp_mmu = tdp_mmu_enabled && mmu->root_role.direct;
|
||||
int i;
|
||||
LIST_HEAD(invalid_list);
|
||||
bool free_active_root;
|
||||
@ -3609,7 +3614,10 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
return;
|
||||
}
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
if (is_tdp_mmu)
|
||||
read_lock(&kvm->mmu_lock);
|
||||
else
|
||||
write_lock(&kvm->mmu_lock);
|
||||
|
||||
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
|
||||
if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i))
|
||||
@ -3635,8 +3643,13 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
|
||||
mmu->root.pgd = 0;
|
||||
}
|
||||
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
if (is_tdp_mmu) {
|
||||
read_unlock(&kvm->mmu_lock);
|
||||
WARN_ON_ONCE(!list_empty(&invalid_list));
|
||||
} else {
|
||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
|
||||
|
||||
@ -3693,15 +3706,15 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (tdp_mmu_enabled)
|
||||
return kvm_tdp_mmu_alloc_root(vcpu);
|
||||
|
||||
write_lock(&vcpu->kvm->mmu_lock);
|
||||
r = make_mmu_pages_available(vcpu);
|
||||
if (r < 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (tdp_mmu_enabled) {
|
||||
root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
|
||||
mmu->root.hpa = root;
|
||||
} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
|
||||
if (shadow_root_level >= PT64_ROOT_4LEVEL) {
|
||||
root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
|
||||
mmu->root.hpa = root;
|
||||
} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
|
||||
@ -7039,9 +7052,7 @@ int kvm_mmu_vendor_module_init(void)
|
||||
|
||||
kvm_mmu_reset_all_pte_masks();
|
||||
|
||||
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
|
||||
sizeof(struct pte_list_desc),
|
||||
0, SLAB_ACCOUNT, NULL);
|
||||
pte_list_desc_cache = KMEM_CACHE(pte_list_desc, SLAB_ACCOUNT);
|
||||
if (!pte_list_desc_cache)
|
||||
goto out;
|
||||
|
||||
|
@ -20,10 +20,23 @@
|
||||
#include "mmu_internal.h"
|
||||
#include "page_track.h"
|
||||
|
||||
static bool kvm_external_write_tracking_enabled(struct kvm *kvm)
|
||||
{
|
||||
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
|
||||
/*
|
||||
* Read external_write_tracking_enabled before related pointers. Pairs
|
||||
* with the smp_store_release in kvm_page_track_write_tracking_enable().
|
||||
*/
|
||||
return smp_load_acquire(&kvm->arch.external_write_tracking_enabled);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_KVM_EXTERNAL_WRITE_TRACKING) ||
|
||||
!tdp_enabled || kvm_shadow_root_allocated(kvm);
|
||||
return kvm_external_write_tracking_enabled(kvm) ||
|
||||
kvm_shadow_root_allocated(kvm) || !tdp_enabled;
|
||||
}
|
||||
|
||||
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
|
||||
@ -153,6 +166,50 @@ int kvm_page_track_init(struct kvm *kvm)
|
||||
return init_srcu_struct(&head->track_srcu);
|
||||
}
|
||||
|
||||
static int kvm_enable_external_write_tracking(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *slot;
|
||||
int r = 0, i, bkt;
|
||||
|
||||
mutex_lock(&kvm->slots_arch_lock);
|
||||
|
||||
/*
|
||||
* Check for *any* write tracking user (not just external users) under
|
||||
* lock. This avoids unnecessary work, e.g. if KVM itself is using
|
||||
* write tracking, or if two external users raced when registering.
|
||||
*/
|
||||
if (kvm_page_track_write_tracking_enabled(kvm))
|
||||
goto out_success;
|
||||
|
||||
for (i = 0; i < kvm_arch_nr_memslot_as_ids(kvm); i++) {
|
||||
slots = __kvm_memslots(kvm, i);
|
||||
kvm_for_each_memslot(slot, bkt, slots) {
|
||||
/*
|
||||
* Intentionally do NOT free allocations on failure to
|
||||
* avoid having to track which allocations were made
|
||||
* now versus when the memslot was created. The
|
||||
* metadata is guaranteed to be freed when the slot is
|
||||
* freed, and will be kept/used if userspace retries
|
||||
* the failed ioctl() instead of killing the VM.
|
||||
*/
|
||||
r = kvm_page_track_write_tracking_alloc(slot);
|
||||
if (r)
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
out_success:
|
||||
/*
|
||||
* Ensure that external_write_tracking_enabled becomes true strictly
|
||||
* after all the related pointers are set.
|
||||
*/
|
||||
smp_store_release(&kvm->arch.external_write_tracking_enabled, true);
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->slots_arch_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
/*
|
||||
* register the notifier so that event interception for the tracked guest
|
||||
* pages can be received.
|
||||
@ -161,10 +218,17 @@ int kvm_page_track_register_notifier(struct kvm *kvm,
|
||||
struct kvm_page_track_notifier_node *n)
|
||||
{
|
||||
struct kvm_page_track_notifier_head *head;
|
||||
int r;
|
||||
|
||||
if (!kvm || kvm->mm != current->mm)
|
||||
return -ESRCH;
|
||||
|
||||
if (!kvm_external_write_tracking_enabled(kvm)) {
|
||||
r = kvm_enable_external_write_tracking(kvm);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
kvm_get_kvm(kvm);
|
||||
|
||||
head = &kvm->arch.track_notifier_head;
|
||||
|
@ -149,11 +149,11 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
||||
* If shared is set, this function is operating under the MMU lock in read
|
||||
* mode.
|
||||
*/
|
||||
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid)\
|
||||
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
|
||||
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
|
||||
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
|
||||
if (kvm_mmu_page_as_id(_root) != _as_id) { \
|
||||
#define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _only_valid) \
|
||||
for (_root = tdp_mmu_next_root(_kvm, NULL, _only_valid); \
|
||||
({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
|
||||
_root = tdp_mmu_next_root(_kvm, _root, _only_valid)) \
|
||||
if (_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) { \
|
||||
} else
|
||||
|
||||
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
|
||||
@ -171,12 +171,19 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
||||
* Holding mmu_lock for write obviates the need for RCU protection as the list
|
||||
* is guaranteed to be stable.
|
||||
*/
|
||||
#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
|
||||
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
|
||||
if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
|
||||
kvm_mmu_page_as_id(_root) != _as_id) { \
|
||||
#define __for_each_tdp_mmu_root(_kvm, _root, _as_id, _only_valid) \
|
||||
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
|
||||
if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
|
||||
((_as_id >= 0 && kvm_mmu_page_as_id(_root) != _as_id) || \
|
||||
((_only_valid) && (_root)->role.invalid))) { \
|
||||
} else
|
||||
|
||||
#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
|
||||
__for_each_tdp_mmu_root(_kvm, _root, _as_id, false)
|
||||
|
||||
#define for_each_valid_tdp_mmu_root(_kvm, _root, _as_id) \
|
||||
__for_each_tdp_mmu_root(_kvm, _root, _as_id, true)
|
||||
|
||||
static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
@ -216,22 +223,41 @@ static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
|
||||
tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
|
||||
}
|
||||
|
||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
|
||||
int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
|
||||
struct kvm_mmu *mmu = vcpu->arch.mmu;
|
||||
union kvm_mmu_page_role role = mmu->root_role;
|
||||
int as_id = kvm_mmu_role_as_id(role);
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
/*
|
||||
* Check for an existing root before acquiring the pages lock to avoid
|
||||
* unnecessary serialization if multiple vCPUs are loading a new root.
|
||||
* E.g. when bringing up secondary vCPUs, KVM will already have created
|
||||
* a valid root on behalf of the primary vCPU.
|
||||
*/
|
||||
read_lock(&kvm->mmu_lock);
|
||||
|
||||
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, as_id) {
|
||||
if (root->role.word == role.word)
|
||||
goto out_read_unlock;
|
||||
}
|
||||
|
||||
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
|
||||
/*
|
||||
* Check for an existing root before allocating a new one. Note, the
|
||||
* role check prevents consuming an invalid root.
|
||||
* Recheck for an existing root after acquiring the pages lock, another
|
||||
* vCPU may have raced ahead and created a new usable root. Manually
|
||||
* walk the list of roots as the standard macros assume that the pages
|
||||
* lock is *not* held. WARN if grabbing a reference to a usable root
|
||||
* fails, as the last reference to a root can only be put *after* the
|
||||
* root has been invalidated, which requires holding mmu_lock for write.
|
||||
*/
|
||||
for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
|
||||
list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
|
||||
if (root->role.word == role.word &&
|
||||
kvm_tdp_mmu_get_root(root))
|
||||
goto out;
|
||||
!WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root)))
|
||||
goto out_spin_unlock;
|
||||
}
|
||||
|
||||
root = tdp_mmu_alloc_sp(vcpu);
|
||||
@ -245,13 +271,20 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
|
||||
* is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
|
||||
*/
|
||||
refcount_set(&root->tdp_mmu_root_count, 2);
|
||||
|
||||
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
|
||||
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
|
||||
out:
|
||||
return __pa(root->spt);
|
||||
out_spin_unlock:
|
||||
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
|
||||
out_read_unlock:
|
||||
read_unlock(&kvm->mmu_lock);
|
||||
/*
|
||||
* Note, KVM_REQ_MMU_FREE_OBSOLETE_ROOTS will prevent entering the guest
|
||||
* and actually consuming the root if it's invalidated after dropping
|
||||
* mmu_lock, and the root can't be freed as this vCPU holds a reference.
|
||||
*/
|
||||
mmu->root.hpa = __pa(root->spt);
|
||||
mmu->root.pgd = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
|
||||
@ -734,15 +767,26 @@ static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
* To avoid RCU stalls due to recursively removing huge swaths of SPs,
|
||||
* split the zap into two passes. On the first pass, zap at the 1gb
|
||||
* level, and then zap top-level SPs on the second pass. "1gb" is not
|
||||
* arbitrary, as KVM must be able to zap a 1gb shadow page without
|
||||
* inducing a stall to allow in-place replacement with a 1gb hugepage.
|
||||
* Zap roots in multiple passes of decreasing granularity, i.e. zap at
|
||||
* 4KiB=>2MiB=>1GiB=>root, in order to better honor need_resched() (all
|
||||
* preempt models) or mmu_lock contention (full or real-time models).
|
||||
* Zapping at finer granularity marginally increases the total time of
|
||||
* the zap, but in most cases the zap itself isn't latency sensitive.
|
||||
*
|
||||
* Because zapping a SP recurses on its children, stepping down to
|
||||
* PG_LEVEL_4K in the iterator itself is unnecessary.
|
||||
* If KVM is configured to prove the MMU, skip the 4KiB and 2MiB zaps
|
||||
* in order to mimic the page fault path, which can replace a 1GiB page
|
||||
* table with an equivalent 1GiB hugepage, i.e. can get saddled with
|
||||
* zapping a 1GiB region that's fully populated with 4KiB SPTEs. This
|
||||
* allows verifying that KVM can safely zap 1GiB regions, e.g. without
|
||||
* inducing RCU stalls, without relying on a relatively rare event
|
||||
* (zapping roots is orders of magnitude more common). Note, because
|
||||
* zapping a SP recurses on its children, stepping down to PG_LEVEL_4K
|
||||
* in the iterator itself is unnecessary.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_KVM_PROVE_MMU)) {
|
||||
__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_4K);
|
||||
__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_2M);
|
||||
}
|
||||
__tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
|
||||
__tdp_mmu_zap_root(kvm, root, shared, root->role.level);
|
||||
|
||||
@ -800,7 +844,13 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
continue;
|
||||
|
||||
tdp_mmu_iter_set_spte(kvm, &iter, 0);
|
||||
flush = true;
|
||||
|
||||
/*
|
||||
* Zappings SPTEs in invalid roots doesn't require a TLB flush,
|
||||
* see kvm_tdp_mmu_zap_invalidated_roots() for details.
|
||||
*/
|
||||
if (!root->role.invalid)
|
||||
flush = true;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -813,16 +863,16 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||
}
|
||||
|
||||
/*
|
||||
* Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
|
||||
* true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
|
||||
* more SPTEs were zapped since the MMU lock was last acquired.
|
||||
* Zap leaf SPTEs for the range of gfns, [start, end), for all *VALID** roots.
|
||||
* Returns true if a TLB flush is needed before releasing the MMU lock, i.e. if
|
||||
* one or more SPTEs were zapped since the MMU lock was last acquired.
|
||||
*/
|
||||
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
|
||||
{
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
for_each_tdp_mmu_root_yield_safe(kvm, root)
|
||||
for_each_valid_tdp_mmu_root_yield_safe(kvm, root, -1)
|
||||
flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
|
||||
|
||||
return flush;
|
||||
@ -896,7 +946,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
|
||||
* the VM is being destroyed).
|
||||
*
|
||||
* Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
|
||||
* See kvm_tdp_mmu_get_vcpu_root_hpa().
|
||||
* See kvm_tdp_mmu_alloc_root().
|
||||
*/
|
||||
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
|
||||
{
|
||||
@ -1622,7 +1672,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
|
||||
{
|
||||
struct kvm_mmu_page *root;
|
||||
|
||||
for_each_tdp_mmu_root(kvm, root, slot->as_id)
|
||||
for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
|
||||
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
|
||||
}
|
||||
|
||||
@ -1740,7 +1790,7 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
|
||||
bool spte_set = false;
|
||||
|
||||
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||
for_each_tdp_mmu_root(kvm, root, slot->as_id)
|
||||
for_each_valid_tdp_mmu_root(kvm, root, slot->as_id)
|
||||
spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
|
||||
|
||||
return spte_set;
|
||||
|
@ -10,7 +10,7 @@
|
||||
void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
|
||||
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
|
||||
|
||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
||||
int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu);
|
||||
|
||||
__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
|
||||
{
|
||||
|
@ -29,6 +29,9 @@
|
||||
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
|
||||
EXPORT_SYMBOL_GPL(kvm_pmu_cap);
|
||||
|
||||
struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
|
||||
EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
|
||||
|
||||
/* Precise Distribution of Instructions Retired (PDIR) */
|
||||
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL),
|
||||
@ -67,7 +70,7 @@ static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
|
||||
* all perf counters (both gp and fixed). The mapping relationship
|
||||
* between pmc and perf counters is as the following:
|
||||
* * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters
|
||||
* [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
|
||||
* [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
|
||||
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
|
||||
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
|
||||
*/
|
||||
@ -411,7 +414,7 @@ static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
|
||||
static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
|
||||
int idx)
|
||||
{
|
||||
int fixed_idx = idx - INTEL_PMC_IDX_FIXED;
|
||||
int fixed_idx = idx - KVM_FIXED_PMC_BASE_IDX;
|
||||
|
||||
if (filter->action == KVM_PMU_EVENT_DENY &&
|
||||
test_bit(fixed_idx, (ulong *)&filter->fixed_counter_bitmap))
|
||||
@ -441,11 +444,10 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
|
||||
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
|
||||
{
|
||||
return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
|
||||
static_call(kvm_x86_pmu_hw_event_available)(pmc) &&
|
||||
check_pmu_event_filter(pmc);
|
||||
}
|
||||
|
||||
static void reprogram_counter(struct kvm_pmc *pmc)
|
||||
static int reprogram_counter(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||
u64 eventsel = pmc->eventsel;
|
||||
@ -456,7 +458,7 @@ static void reprogram_counter(struct kvm_pmc *pmc)
|
||||
emulate_overflow = pmc_pause_counter(pmc);
|
||||
|
||||
if (!pmc_event_is_allowed(pmc))
|
||||
goto reprogram_complete;
|
||||
return 0;
|
||||
|
||||
if (emulate_overflow)
|
||||
__kvm_perf_overflow(pmc, false);
|
||||
@ -466,7 +468,7 @@ static void reprogram_counter(struct kvm_pmc *pmc)
|
||||
|
||||
if (pmc_is_fixed(pmc)) {
|
||||
fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
|
||||
pmc->idx - INTEL_PMC_IDX_FIXED);
|
||||
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
|
||||
if (fixed_ctr_ctrl & 0x1)
|
||||
eventsel |= ARCH_PERFMON_EVENTSEL_OS;
|
||||
if (fixed_ctr_ctrl & 0x2)
|
||||
@ -477,43 +479,45 @@ static void reprogram_counter(struct kvm_pmc *pmc)
|
||||
}
|
||||
|
||||
if (pmc->current_config == new_config && pmc_resume_counter(pmc))
|
||||
goto reprogram_complete;
|
||||
return 0;
|
||||
|
||||
pmc_release_perf_event(pmc);
|
||||
|
||||
pmc->current_config = new_config;
|
||||
|
||||
/*
|
||||
* If reprogramming fails, e.g. due to contention, leave the counter's
|
||||
* regprogram bit set, i.e. opportunistically try again on the next PMU
|
||||
* refresh. Don't make a new request as doing so can stall the guest
|
||||
* if reprogramming repeatedly fails.
|
||||
*/
|
||||
if (pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
|
||||
(eventsel & pmu->raw_event_mask),
|
||||
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
|
||||
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
|
||||
eventsel & ARCH_PERFMON_EVENTSEL_INT))
|
||||
return;
|
||||
|
||||
reprogram_complete:
|
||||
clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
|
||||
return pmc_reprogram_counter(pmc, PERF_TYPE_RAW,
|
||||
(eventsel & pmu->raw_event_mask),
|
||||
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
|
||||
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
|
||||
eventsel & ARCH_PERFMON_EVENTSEL_INT);
|
||||
}
|
||||
|
||||
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct kvm_pmc *pmc;
|
||||
int bit;
|
||||
|
||||
for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
|
||||
struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
|
||||
bitmap_copy(bitmap, pmu->reprogram_pmi, X86_PMC_IDX_MAX);
|
||||
|
||||
if (unlikely(!pmc)) {
|
||||
clear_bit(bit, pmu->reprogram_pmi);
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* The reprogramming bitmap can be written asynchronously by something
|
||||
* other than the task that holds vcpu->mutex, take care to clear only
|
||||
* the bits that will actually processed.
|
||||
*/
|
||||
BUILD_BUG_ON(sizeof(bitmap) != sizeof(atomic64_t));
|
||||
atomic64_andnot(*(s64 *)bitmap, &pmu->__reprogram_pmi);
|
||||
|
||||
reprogram_counter(pmc);
|
||||
kvm_for_each_pmc(pmu, pmc, bit, bitmap) {
|
||||
/*
|
||||
* If reprogramming fails, e.g. due to contention, re-set the
|
||||
* regprogram bit set, i.e. opportunistically try again on the
|
||||
* next PMU refresh. Don't make a new request as doing so can
|
||||
* stall the guest if reprogramming repeatedly fails.
|
||||
*/
|
||||
if (reprogram_counter(pmc))
|
||||
set_bit(pmc->idx, pmu->reprogram_pmi);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -525,10 +529,20 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
|
||||
kvm_pmu_cleanup(vcpu);
|
||||
}
|
||||
|
||||
/* check if idx is a valid index to access PMU */
|
||||
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
return static_call(kvm_x86_pmu_is_valid_rdpmc_ecx)(vcpu, idx);
|
||||
/*
|
||||
* On Intel, VMX interception has priority over RDPMC exceptions that
|
||||
* aren't already handled by the emulator, i.e. there are no additional
|
||||
* check needed for Intel PMUs.
|
||||
*
|
||||
* On AMD, _all_ exceptions on RDPMC have priority over SVM intercepts,
|
||||
* i.e. an invalid PMC results in a #GP, not #VMEXIT.
|
||||
*/
|
||||
if (!kvm_pmu_ops.check_rdpmc_early)
|
||||
return 0;
|
||||
|
||||
return static_call(kvm_x86_pmu_check_rdpmc_early)(vcpu, idx);
|
||||
}
|
||||
|
||||
bool is_vmware_backdoor_pmc(u32 pmc_idx)
|
||||
@ -567,10 +581,9 @@ static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
|
||||
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
|
||||
{
|
||||
bool fast_mode = idx & (1u << 31);
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct kvm_pmc *pmc;
|
||||
u64 mask = fast_mode ? ~0u : ~0ull;
|
||||
u64 mask = ~0ull;
|
||||
|
||||
if (!pmu->version)
|
||||
return 1;
|
||||
@ -716,11 +729,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
|
||||
bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
|
||||
|
||||
for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
|
||||
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
|
||||
if (!pmc)
|
||||
continue;
|
||||
|
||||
kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) {
|
||||
pmc_stop_counter(pmc);
|
||||
pmc->counter = 0;
|
||||
pmc->emulated_counter = 0;
|
||||
@ -741,6 +750,8 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
if (KVM_BUG_ON(kvm_vcpu_has_run(vcpu), vcpu->kvm))
|
||||
return;
|
||||
|
||||
@ -750,8 +761,22 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
kvm_pmu_reset(vcpu);
|
||||
|
||||
bitmap_zero(vcpu_to_pmu(vcpu)->all_valid_pmc_idx, X86_PMC_IDX_MAX);
|
||||
static_call(kvm_x86_pmu_refresh)(vcpu);
|
||||
pmu->version = 0;
|
||||
pmu->nr_arch_gp_counters = 0;
|
||||
pmu->nr_arch_fixed_counters = 0;
|
||||
pmu->counter_bitmask[KVM_PMC_GP] = 0;
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
||||
pmu->reserved_bits = 0xffffffff00200000ull;
|
||||
pmu->raw_event_mask = X86_RAW_EVENT_MASK;
|
||||
pmu->global_ctrl_mask = ~0ull;
|
||||
pmu->global_status_mask = ~0ull;
|
||||
pmu->fixed_ctr_ctrl_mask = ~0ull;
|
||||
pmu->pebs_enable_mask = ~0ull;
|
||||
pmu->pebs_data_cfg_mask = ~0ull;
|
||||
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
|
||||
|
||||
if (vcpu->kvm->arch.enable_pmu)
|
||||
static_call(kvm_x86_pmu_refresh)(vcpu);
|
||||
}
|
||||
|
||||
void kvm_pmu_init(struct kvm_vcpu *vcpu)
|
||||
@ -776,10 +801,8 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
|
||||
bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
|
||||
pmu->pmc_in_use, X86_PMC_IDX_MAX);
|
||||
|
||||
for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
|
||||
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
|
||||
|
||||
if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
|
||||
kvm_for_each_pmc(pmu, pmc, i, bitmask) {
|
||||
if (pmc->perf_event && !pmc_speculative_in_use(pmc))
|
||||
pmc_stop_counter(pmc);
|
||||
}
|
||||
|
||||
@ -799,13 +822,6 @@ static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
|
||||
kvm_pmu_request_counter_reprogram(pmc);
|
||||
}
|
||||
|
||||
static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
|
||||
unsigned int perf_hw_id)
|
||||
{
|
||||
return !((pmc->eventsel ^ perf_get_hw_event_config(perf_hw_id)) &
|
||||
AMD64_RAW_EVENT_MASK_NB);
|
||||
}
|
||||
|
||||
static inline bool cpl_is_matched(struct kvm_pmc *pmc)
|
||||
{
|
||||
bool select_os, select_user;
|
||||
@ -817,29 +833,56 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
|
||||
select_user = config & ARCH_PERFMON_EVENTSEL_USR;
|
||||
} else {
|
||||
config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
|
||||
pmc->idx - INTEL_PMC_IDX_FIXED);
|
||||
pmc->idx - KVM_FIXED_PMC_BASE_IDX);
|
||||
select_os = config & 0x1;
|
||||
select_user = config & 0x2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Skip the CPL lookup, which isn't free on Intel, if the result will
|
||||
* be the same regardless of the CPL.
|
||||
*/
|
||||
if (select_os == select_user)
|
||||
return select_os;
|
||||
|
||||
return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
|
||||
}
|
||||
|
||||
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
|
||||
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
|
||||
{
|
||||
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
struct kvm_pmc *pmc;
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
|
||||
pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
|
||||
BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
|
||||
|
||||
if (!pmc || !pmc_event_is_allowed(pmc))
|
||||
if (!kvm_pmu_has_perf_global_ctrl(pmu))
|
||||
bitmap_copy(bitmap, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
|
||||
else if (!bitmap_and(bitmap, pmu->all_valid_pmc_idx,
|
||||
(unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
|
||||
return;
|
||||
|
||||
kvm_for_each_pmc(pmu, pmc, i, bitmap) {
|
||||
/*
|
||||
* Ignore checks for edge detect (all events currently emulated
|
||||
* but KVM are always rising edges), pin control (unsupported
|
||||
* by modern CPUs), and counter mask and its invert flag (KVM
|
||||
* doesn't emulate multiple events in a single clock cycle).
|
||||
*
|
||||
* Note, the uppermost nibble of AMD's mask overlaps Intel's
|
||||
* IN_TX (bit 32) and IN_TXCP (bit 33), as well as two reserved
|
||||
* bits (bits 35:34). Checking the "in HLE/RTM transaction"
|
||||
* flags is correct as the vCPU can't be in a transaction if
|
||||
* KVM is emulating an instruction. Checking the reserved bits
|
||||
* might be wrong if they are defined in the future, but so
|
||||
* could ignoring them, so do the simple thing for now.
|
||||
*/
|
||||
if (((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB) ||
|
||||
!pmc_event_is_allowed(pmc) || !cpl_is_matched(pmc))
|
||||
continue;
|
||||
|
||||
/* Ignore checks for edge detect, pin control, invert and CMASK bits */
|
||||
if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
|
||||
kvm_pmu_incr_counter(pmc);
|
||||
kvm_pmu_incr_counter(pmc);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
|
||||
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
|
||||
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
|
||||
@ -18,13 +20,18 @@
|
||||
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
|
||||
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002
|
||||
|
||||
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
|
||||
|
||||
struct kvm_pmu_emulated_event_selectors {
|
||||
u64 INSTRUCTIONS_RETIRED;
|
||||
u64 BRANCH_INSTRUCTIONS_RETIRED;
|
||||
};
|
||||
|
||||
struct kvm_pmu_ops {
|
||||
bool (*hw_event_available)(struct kvm_pmc *pmc);
|
||||
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
|
||||
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
|
||||
unsigned int idx, u64 *mask);
|
||||
struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
|
||||
bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
@ -55,6 +62,38 @@ static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
|
||||
return pmu->version > 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* KVM tracks all counters in 64-bit bitmaps, with general purpose counters
|
||||
* mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
|
||||
* is tracked internally via index 32. On Intel, (AMD doesn't support fixed
|
||||
* counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
|
||||
* and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
|
||||
* amounter of boilerplate needed to iterate over PMCs *and* simplifies common
|
||||
* enabling/disable/reset operations.
|
||||
*
|
||||
* WARNING! This helper is only for lookups that are initiated by KVM, it is
|
||||
* NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
|
||||
* ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
|
||||
* for RDPMC, not by adding 32 to the fixed counter index).
|
||||
*/
|
||||
static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
|
||||
{
|
||||
if (idx < pmu->nr_arch_gp_counters)
|
||||
return &pmu->gp_counters[idx];
|
||||
|
||||
idx -= KVM_FIXED_PMC_BASE_IDX;
|
||||
if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
|
||||
return &pmu->fixed_counters[idx];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define kvm_for_each_pmc(pmu, pmc, i, bitmap) \
|
||||
for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \
|
||||
if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \
|
||||
continue; \
|
||||
else \
|
||||
|
||||
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
|
||||
{
|
||||
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
||||
@ -131,12 +170,13 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
|
||||
|
||||
if (pmc_is_fixed(pmc))
|
||||
return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
|
||||
pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
|
||||
pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3;
|
||||
|
||||
return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
|
||||
}
|
||||
|
||||
extern struct x86_pmu_capability kvm_pmu_cap;
|
||||
extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
|
||||
|
||||
static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
|
||||
{
|
||||
@ -178,6 +218,11 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
|
||||
pmu_ops->MAX_NR_GP_COUNTERS);
|
||||
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
|
||||
KVM_PMC_MAX_FIXED);
|
||||
|
||||
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
|
||||
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
|
||||
kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
|
||||
perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||
}
|
||||
|
||||
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
|
||||
@ -216,7 +261,7 @@ static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
|
||||
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
|
||||
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
|
||||
bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
|
||||
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
|
||||
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
|
||||
@ -225,7 +270,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
|
||||
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
|
||||
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
|
||||
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
|
||||
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
|
||||
|
||||
bool is_vmware_backdoor_pmc(u32 pmc_idx);
|
||||
|
||||
|
@ -184,7 +184,6 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
|
||||
struct kvm_smram_state_32 *smram)
|
||||
{
|
||||
struct desc_ptr dt;
|
||||
unsigned long val;
|
||||
int i;
|
||||
|
||||
smram->cr0 = kvm_read_cr0(vcpu);
|
||||
@ -195,10 +194,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
|
||||
for (i = 0; i < 8; i++)
|
||||
smram->gprs[i] = kvm_register_read_raw(vcpu, i);
|
||||
|
||||
kvm_get_dr(vcpu, 6, &val);
|
||||
smram->dr6 = (u32)val;
|
||||
kvm_get_dr(vcpu, 7, &val);
|
||||
smram->dr7 = (u32)val;
|
||||
smram->dr6 = (u32)vcpu->arch.dr6;
|
||||
smram->dr7 = (u32)vcpu->arch.dr7;
|
||||
|
||||
enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
|
||||
enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
|
||||
@ -231,7 +228,6 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
|
||||
struct kvm_smram_state_64 *smram)
|
||||
{
|
||||
struct desc_ptr dt;
|
||||
unsigned long val;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
@ -240,11 +236,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
|
||||
smram->rip = kvm_rip_read(vcpu);
|
||||
smram->rflags = kvm_get_rflags(vcpu);
|
||||
|
||||
|
||||
kvm_get_dr(vcpu, 6, &val);
|
||||
smram->dr6 = val;
|
||||
kvm_get_dr(vcpu, 7, &val);
|
||||
smram->dr7 = val;
|
||||
smram->dr6 = vcpu->arch.dr6;
|
||||
smram->dr7 = vcpu->arch.dr7;
|
||||
|
||||
smram->cr0 = kvm_read_cr0(vcpu);
|
||||
smram->cr3 = kvm_read_cr3(vcpu);
|
||||
|
@ -25,7 +25,7 @@ enum pmu_type {
|
||||
PMU_TYPE_EVNTSEL,
|
||||
};
|
||||
|
||||
static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
||||
static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
||||
{
|
||||
unsigned int num_counters = pmu->nr_arch_gp_counters;
|
||||
|
||||
@ -70,28 +70,24 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return amd_pmc_idx_to_pmc(pmu, idx);
|
||||
return amd_pmu_get_pmc(pmu, idx);
|
||||
}
|
||||
|
||||
static bool amd_hw_event_available(struct kvm_pmc *pmc)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
|
||||
{
|
||||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
|
||||
idx &= ~(3u << 30);
|
||||
if (idx >= pmu->nr_arch_gp_counters)
|
||||
return -EINVAL;
|
||||
|
||||
return idx < pmu->nr_arch_gp_counters;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* idx is the ECX register of RDPMC instruction */
|
||||
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
|
||||
unsigned int idx, u64 *mask)
|
||||
{
|
||||
return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
|
||||
return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
|
||||
}
|
||||
|
||||
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
|
||||
@ -233,11 +229,9 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
struct kvm_pmu_ops amd_pmu_ops __initdata = {
|
||||
.hw_event_available = amd_hw_event_available,
|
||||
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
||||
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
|
||||
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
|
||||
.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
|
||||
.check_rdpmc_early = amd_check_rdpmc_early,
|
||||
.is_valid_msr = amd_is_valid_msr,
|
||||
.get_msr = amd_pmu_get_msr,
|
||||
.set_msr = amd_pmu_set_msr,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user