KVM selftests "tree"-wide changes for 6.14:

- Rework vcpu_get_reg() to return a value instead of using an out-param, and
    update all affected arch code accordingly.
 
  - Convert the max_guest_memory_test into a more generic mmu_stress_test.
    The basic gist of the "conversion" is to have the test do mprotect() on
    guest memory while vCPUs are accessing said memory, e.g. to verify KVM
    and mmu_notifiers are working as intended.
 
  - Play nice with treewrite builds of unsupported architectures, e.g. arm
    (32-bit), as KVM selftests' Makefile doesn't do anything to ensure the
    target architecture is actually one KVM selftests supports.
 
  - Use the kernel's $(ARCH) definition instead of the target triple for arch
    specific directories, e.g. arm64 instead of aarch64, mainly so as not to
    be different from the rest of the kernel.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEKTobbabEP7vbhhN9OlYIJqCjN/0FAmdjehwACgkQOlYIJqCj
 N/0+/RAAh3M2IsNCtaiJ7n3COe9DHUuqxherS625J9YEOTCcGrZUd1WoSBvdDCIW
 46YKEYdHIpFHOYMEDPg5ODd20/y6lLw1yDKW7xj22cC8Np1TrPbt0q+PqaDdb4RR
 UlyObB6OsI/wxUHjsrvg2ZmDwAH9hIzh0kTUKPv7NZdaB+kT49eBV+tILDHtUGTx
 m0LUcNUIZBUhUE2YjnGNIoPQg4w+H1bYFK8lM62Rx09HtXbJ93VlMSxBq4ModMDq
 v74F6263NpOiou87bXperWT7iAWAWSqjGR+K/cwtnJpg2sosLlgpXYTnuUEiQMOL
 DU2c2u3lPRZMAhUVzj6qRr9nVBICfgk/lS5nkCb6khNZ296VK2UzimQF429a8Dhy
 ZkgOduNNuGVMBW6/Mc96L9ygo2yXNpGcT72RZ/2C8u/Zt0RwtYMYkzrBZQeqvgXp
 wMPxgkwatHLm7VSXzSLVd1c28GXTS8Fdg77HbUbOJNjvigaOeUR8ti82LRIWPn0Y
 XRyMpU1fK1wtuSkIEdxF8KWqr9B4ZPMxqFNE+ntyIhfUKvNdz6mRpQj/eYVbVdfn
 poXco5iq02aCDo7q1Zqx1rxeNrYUvTdq9UmllilRbYLVOC0M+Rf+jBvqM+VBhUCu
 hFSd80Pkh2iR4JuRDCtXQmk/cNfjk1CTgkeDmpm00Hdk98brmlc=
 =mFjt
 -----END PGP SIGNATURE-----

Merge tag 'kvm-selftests-treewide-6.14' of https://github.com/kvm-x86/linux into HEAD

KVM selftests "tree"-wide changes for 6.14:

 - Rework vcpu_get_reg() to return a value instead of using an out-param, and
   update all affected arch code accordingly.

 - Convert the max_guest_memory_test into a more generic mmu_stress_test.
   The basic gist of the "conversion" is to have the test do mprotect() on
   guest memory while vCPUs are accessing said memory, e.g. to verify KVM
   and mmu_notifiers are working as intended.

 - Play nice with treewrite builds of unsupported architectures, e.g. arm
   (32-bit), as KVM selftests' Makefile doesn't do anything to ensure the
   target architecture is actually one KVM selftests supports.

 - Use the kernel's $(ARCH) definition instead of the target triple for arch
   specific directories, e.g. arm64 instead of aarch64, mainly so as not to
   be different from the rest of the kernel.
This commit is contained in:
Paolo Bonzini 2024-12-19 07:50:06 -05:00
commit a066bad89c
164 changed files with 588 additions and 481 deletions

View File

@ -12605,8 +12605,8 @@ F: arch/arm64/include/asm/kvm*
F: arch/arm64/include/uapi/asm/kvm*
F: arch/arm64/kvm/
F: include/kvm/arm_*
F: tools/testing/selftests/kvm/*/aarch64/
F: tools/testing/selftests/kvm/aarch64/
F: tools/testing/selftests/kvm/*/arm64/
F: tools/testing/selftests/kvm/arm64/
KERNEL VIRTUAL MACHINE FOR LOONGARCH (KVM/LoongArch)
M: Tianrui Zhao <zhaotianrui@loongson.cn>
@ -12677,8 +12677,8 @@ F: arch/s390/kvm/
F: arch/s390/mm/gmap.c
F: drivers/s390/char/uvdevice.c
F: tools/testing/selftests/drivers/s390x/uvdevice/
F: tools/testing/selftests/kvm/*/s390x/
F: tools/testing/selftests/kvm/s390x/
F: tools/testing/selftests/kvm/*/s390/
F: tools/testing/selftests/kvm/s390/
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Sean Christopherson <seanjc@google.com>
@ -12695,8 +12695,8 @@ F: arch/x86/include/uapi/asm/svm.h
F: arch/x86/include/uapi/asm/vmx.h
F: arch/x86/kvm/
F: arch/x86/kvm/*/
F: tools/testing/selftests/kvm/*/x86_64/
F: tools/testing/selftests/kvm/x86_64/
F: tools/testing/selftests/kvm/*/x86/
F: tools/testing/selftests/kvm/x86/
KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

View File

@ -43,9 +43,6 @@
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
struct kvm_regs {
struct user_pt_regs regs; /* sp = sp_el0 */

View File

@ -211,9 +211,6 @@ struct kvm_riscv_sbi_sta {
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000
#define KVM_REG_RISCV_TYPE_SHIFT 24

View File

@ -1070,6 +1070,10 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_SHIFT 52
#define KVM_REG_SIZE_MASK 0x00f0000000000000ULL
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
#define KVM_REG_SIZE_U8 0x0000000000000000ULL
#define KVM_REG_SIZE_U16 0x0010000000000000ULL
#define KVM_REG_SIZE_U32 0x0020000000000000ULL

View File

@ -9,3 +9,4 @@
!config
!settings
!Makefile
!Makefile.kvm

View File

@ -1,347 +1,16 @@
# SPDX-License-Identifier: GPL-2.0-only
include ../../../build/Build.include
all:
top_srcdir = ../../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
ifeq ($(ARCH),x86)
ARCH_DIR := x86_64
else ifeq ($(ARCH),arm64)
ARCH_DIR := aarch64
else ifeq ($(ARCH),s390)
ARCH_DIR := s390x
else
ARCH_DIR := $(ARCH)
endif
LIBKVM += lib/assert.c
LIBKVM += lib/elf.c
LIBKVM += lib/guest_modes.c
LIBKVM += lib/io.c
LIBKVM += lib/kvm_util.c
LIBKVM += lib/memstress.c
LIBKVM += lib/guest_sprintf.c
LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
LIBKVM += lib/ucall_common.c
LIBKVM += lib/userfaultfd_util.c
LIBKVM_STRING += lib/string_override.c
LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/hyperv.c
LIBKVM_x86_64 += lib/x86_64/memstress.c
LIBKVM_x86_64 += lib/x86_64/pmu.c
LIBKVM_x86_64 += lib/x86_64/processor.c
LIBKVM_x86_64 += lib/x86_64/sev.c
LIBKVM_x86_64 += lib/x86_64/svm.c
LIBKVM_x86_64 += lib/x86_64/ucall.c
LIBKVM_x86_64 += lib/x86_64/vmx.c
LIBKVM_aarch64 += lib/aarch64/gic.c
LIBKVM_aarch64 += lib/aarch64/gic_v3.c
LIBKVM_aarch64 += lib/aarch64/gic_v3_its.c
LIBKVM_aarch64 += lib/aarch64/handlers.S
LIBKVM_aarch64 += lib/aarch64/processor.c
LIBKVM_aarch64 += lib/aarch64/spinlock.c
LIBKVM_aarch64 += lib/aarch64/ucall.c
LIBKVM_aarch64 += lib/aarch64/vgic.c
LIBKVM_s390x += lib/s390x/diag318_test_handler.c
LIBKVM_s390x += lib/s390x/processor.c
LIBKVM_s390x += lib/s390x/ucall.c
LIBKVM_s390x += lib/s390x/facility.c
LIBKVM_riscv += lib/riscv/handlers.S
LIBKVM_riscv += lib/riscv/processor.c
LIBKVM_riscv += lib/riscv/ucall.c
# Non-compiled test targets
TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
# Compiled test targets
TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/dirty_log_page_splitting_test
TEST_GEN_PROGS_x86_64 += x86_64/feature_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/exit_on_emulation_failure_test
TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
TEST_GEN_PROGS_x86_64 += x86_64/hwcr_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_clock
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_evmcs
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_extended_hypercalls
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_features
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_ipi
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_svm_test
TEST_GEN_PROGS_x86_64 += x86_64/hyperv_tlb_flush
TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/pmu_counters_test
TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
TEST_GEN_PROGS_x86_64 += x86_64/private_mem_conversions_test
TEST_GEN_PROGS_x86_64 += x86_64/private_mem_kvm_exits_test
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
TEST_GEN_PROGS_x86_64 += x86_64/smaller_maxphyaddr_emulation_test
TEST_GEN_PROGS_x86_64 += x86_64/smm_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_shutdown_test
TEST_GEN_PROGS_x86_64 += x86_64/svm_nested_soft_inject_test
TEST_GEN_PROGS_x86_64 += x86_64/tsc_scaling_sync
TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
TEST_GEN_PROGS_x86_64 += x86_64/ucna_injection_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test
TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_exception_with_invalid_guest_state
TEST_GEN_PROGS_x86_64 += x86_64/vmx_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test
TEST_GEN_PROGS_x86_64 += x86_64/apic_bus_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/xapic_ipi_test
TEST_GEN_PROGS_x86_64 += x86_64/xapic_state_test
TEST_GEN_PROGS_x86_64 += x86_64/xcr0_cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
TEST_GEN_PROGS_x86_64 += x86_64/tsc_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_caps_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_init2_tests
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/sev_smoke_test
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += coalesced_io_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
TEST_GEN_PROGS_x86_64 += guest_memfd_test
TEST_GEN_PROGS_x86_64 += guest_print_test
TEST_GEN_PROGS_x86_64 += hardware_disable_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
TEST_GEN_PROGS_x86_64 += max_guest_memory_test
TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test
TEST_GEN_PROGS_x86_64 += memslot_perf_test
TEST_GEN_PROGS_x86_64 += rseq_test
TEST_GEN_PROGS_x86_64 += set_memory_region_test
TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
TEST_GEN_PROGS_x86_64 += system_counter_offset_test
TEST_GEN_PROGS_x86_64 += pre_fault_memory_test
# Compiled outputs used by test targets
TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer_edge_cases
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/mmio_abort
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += aarch64/vgic_lpi_stress
TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
TEST_GEN_PROGS_aarch64 += aarch64/no-vgic-v3
TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += arch_timer
TEST_GEN_PROGS_aarch64 += coalesced_io_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += guest_print_test
TEST_GEN_PROGS_aarch64 += get-reg-list
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
TEST_GEN_PROGS_aarch64 += memslot_perf_test
TEST_GEN_PROGS_aarch64 += rseq_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
TEST_GEN_PROGS_aarch64 += kvm_binary_stats_test
TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/resets
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += s390x/cpumodel_subfuncs_test
TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
TEST_GEN_PROGS_s390x += s390x/ucontrol_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += guest_print_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x += kvm_page_table_test
TEST_GEN_PROGS_s390x += rseq_test
TEST_GEN_PROGS_s390x += set_memory_region_test
TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
TEST_GEN_PROGS_riscv += riscv/ebreak_test
TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += coalesced_io_test
TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list
TEST_GEN_PROGS_riscv += guest_print_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += steal_time
SPLIT_TESTS += arch_timer
SPLIT_TESTS += get-reg-list
TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
LIBKVM += $(LIBKVM_$(ARCH_DIR))
OVERRIDE_TARGETS = 1
# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
# which causes the environment variable to override the makefile).
include ../lib.mk
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
ifeq ($(ARCH),$(filter $(ARCH),arm64 s390 riscv x86 x86_64))
# Top-level selftests allows ARCH=x86_64 :-(
ifeq ($(ARCH),x86_64)
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/x86/include
ARCH := x86
endif
include Makefile.kvm
else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
# Empty targets for unsupported architectures
all:
clean:
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
-fno-builtin-memcmp -fno-builtin-memcpy \
-fno-builtin-memset -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -fno-strict-aliasing \
-I$(LINUX_TOOL_INCLUDE) -I$(LINUX_TOOL_ARCH_INCLUDE) \
-I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(ARCH_DIR) \
-I ../rseq -I.. $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
ifeq ($(ARCH),s390)
CFLAGS += -march=z10
endif
ifeq ($(ARCH),x86)
ifeq ($(shell echo "void foo(void) { }" | $(CC) -march=x86-64-v2 -x c - -c -o /dev/null 2>/dev/null; echo "$$?"),0)
CFLAGS += -march=x86-64-v2
endif
endif
ifeq ($(ARCH),arm64)
tools_dir := $(top_srcdir)/tools
arm64_tools_dir := $(tools_dir)/arch/arm64/tools/
ifneq ($(abs_objdir),)
arm64_hdr_outdir := $(abs_objdir)/tools/
else
arm64_hdr_outdir := $(tools_dir)/
endif
GEN_HDRS := $(arm64_hdr_outdir)arch/arm64/include/generated/
CFLAGS += -I$(GEN_HDRS)
$(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*)
$(MAKE) -C $(arm64_tools_dir) OUTPUT=$(arm64_hdr_outdir)
endif
no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \
$(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
# On s390, build the testcases KVM-enabled
pgste-option = $(call try-run, echo 'int main(void) { return 0; }' | \
$(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
LDLIBS += -ldl
LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
SPLIT_TEST_GEN_PROGS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
SPLIT_TEST_GEN_OBJ := $(patsubst %, $(OUTPUT)/$(ARCH_DIR)/%.o, $(SPLIT_TESTS))
TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TEST_GEN_OBJ))
-include $(TEST_DEP_FILES)
$(shell mkdir -p $(sort $(OUTPUT)/$(ARCH_DIR) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(filter-out $(SPLIT_TEST_GEN_PROGS), $(TEST_GEN_PROGS)) \
$(TEST_GEN_PROGS_EXTENDED): %: %.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
$(SPLIT_TEST_GEN_PROGS): $(OUTPUT)/%: $(OUTPUT)/%.o $(OUTPUT)/$(ARCH_DIR)/%.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@
$(SPLIT_TEST_GEN_OBJ): $(OUTPUT)/$(ARCH_DIR)/%.o: $(ARCH_DIR)/%.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
EXTRA_CLEAN += $(GEN_HDRS) \
$(LIBKVM_OBJS) \
$(SPLIT_TEST_GEN_OBJ) \
$(TEST_DEP_FILES) \
$(TEST_GEN_OBJ) \
cscope.*
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S $(GEN_HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
# Compile the string overrides as freestanding to prevent the compiler from
# generating self-referential code, e.g. without "freestanding" the compiler may
# "optimize" memcmp() by invoking memcmp(), thus causing infinite recursion.
$(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
$(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(SPLIT_TEST_GEN_OBJ): $(GEN_HDRS)
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
$(TEST_GEN_OBJ): $(GEN_HDRS)
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
cscope:
$(RM) cscope.*
(find $(include_paths) -name '*.h' \
-exec realpath --relative-base=$(PWD) {} \;; \
find . -name '*.c' \
-exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
cscope -b

View File

@ -0,0 +1,330 @@
# SPDX-License-Identifier: GPL-2.0-only
include ../../../build/Build.include
all:
LIBKVM += lib/assert.c
LIBKVM += lib/elf.c
LIBKVM += lib/guest_modes.c
LIBKVM += lib/io.c
LIBKVM += lib/kvm_util.c
LIBKVM += lib/memstress.c
LIBKVM += lib/guest_sprintf.c
LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
LIBKVM += lib/ucall_common.c
LIBKVM += lib/userfaultfd_util.c
LIBKVM_STRING += lib/string_override.c
LIBKVM_x86 += lib/x86/apic.c
LIBKVM_x86 += lib/x86/handlers.S
LIBKVM_x86 += lib/x86/hyperv.c
LIBKVM_x86 += lib/x86/memstress.c
LIBKVM_x86 += lib/x86/pmu.c
LIBKVM_x86 += lib/x86/processor.c
LIBKVM_x86 += lib/x86/sev.c
LIBKVM_x86 += lib/x86/svm.c
LIBKVM_x86 += lib/x86/ucall.c
LIBKVM_x86 += lib/x86/vmx.c
LIBKVM_arm64 += lib/arm64/gic.c
LIBKVM_arm64 += lib/arm64/gic_v3.c
LIBKVM_arm64 += lib/arm64/gic_v3_its.c
LIBKVM_arm64 += lib/arm64/handlers.S
LIBKVM_arm64 += lib/arm64/processor.c
LIBKVM_arm64 += lib/arm64/spinlock.c
LIBKVM_arm64 += lib/arm64/ucall.c
LIBKVM_arm64 += lib/arm64/vgic.c
LIBKVM_s390 += lib/s390/diag318_test_handler.c
LIBKVM_s390 += lib/s390/processor.c
LIBKVM_s390 += lib/s390/ucall.c
LIBKVM_s390 += lib/s390/facility.c
LIBKVM_riscv += lib/riscv/handlers.S
LIBKVM_riscv += lib/riscv/processor.c
LIBKVM_riscv += lib/riscv/ucall.c
# Non-compiled test targets
TEST_PROGS_x86 += x86/nx_huge_pages_test.sh
# Compiled test targets
TEST_GEN_PROGS_x86 = x86/cpuid_test
TEST_GEN_PROGS_x86 += x86/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86 += x86/dirty_log_page_splitting_test
TEST_GEN_PROGS_x86 += x86/feature_msrs_test
TEST_GEN_PROGS_x86 += x86/exit_on_emulation_failure_test
TEST_GEN_PROGS_x86 += x86/fix_hypercall_test
TEST_GEN_PROGS_x86 += x86/hwcr_msr_test
TEST_GEN_PROGS_x86 += x86/hyperv_clock
TEST_GEN_PROGS_x86 += x86/hyperv_cpuid
TEST_GEN_PROGS_x86 += x86/hyperv_evmcs
TEST_GEN_PROGS_x86 += x86/hyperv_extended_hypercalls
TEST_GEN_PROGS_x86 += x86/hyperv_features
TEST_GEN_PROGS_x86 += x86/hyperv_ipi
TEST_GEN_PROGS_x86 += x86/hyperv_svm_test
TEST_GEN_PROGS_x86 += x86/hyperv_tlb_flush
TEST_GEN_PROGS_x86 += x86/kvm_clock_test
TEST_GEN_PROGS_x86 += x86/kvm_pv_test
TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
TEST_GEN_PROGS_x86 += x86/platform_info_test
TEST_GEN_PROGS_x86 += x86/pmu_counters_test
TEST_GEN_PROGS_x86 += x86/pmu_event_filter_test
TEST_GEN_PROGS_x86 += x86/private_mem_conversions_test
TEST_GEN_PROGS_x86 += x86/private_mem_kvm_exits_test
TEST_GEN_PROGS_x86 += x86/set_boot_cpu_id
TEST_GEN_PROGS_x86 += x86/set_sregs_test
TEST_GEN_PROGS_x86 += x86/smaller_maxphyaddr_emulation_test
TEST_GEN_PROGS_x86 += x86/smm_test
TEST_GEN_PROGS_x86 += x86/state_test
TEST_GEN_PROGS_x86 += x86/vmx_preemption_timer_test
TEST_GEN_PROGS_x86 += x86/svm_vmcall_test
TEST_GEN_PROGS_x86 += x86/svm_int_ctl_test
TEST_GEN_PROGS_x86 += x86/svm_nested_shutdown_test
TEST_GEN_PROGS_x86 += x86/svm_nested_soft_inject_test
TEST_GEN_PROGS_x86 += x86/tsc_scaling_sync
TEST_GEN_PROGS_x86 += x86/sync_regs_test
TEST_GEN_PROGS_x86 += x86/ucna_injection_test
TEST_GEN_PROGS_x86 += x86/userspace_io_test
TEST_GEN_PROGS_x86 += x86/userspace_msr_exit_test
TEST_GEN_PROGS_x86 += x86/vmx_apic_access_test
TEST_GEN_PROGS_x86 += x86/vmx_close_while_nested_test
TEST_GEN_PROGS_x86 += x86/vmx_dirty_log_test
TEST_GEN_PROGS_x86 += x86/vmx_exception_with_invalid_guest_state
TEST_GEN_PROGS_x86 += x86/vmx_msrs_test
TEST_GEN_PROGS_x86 += x86/vmx_invalid_nested_guest_state
TEST_GEN_PROGS_x86 += x86/vmx_set_nested_state_test
TEST_GEN_PROGS_x86 += x86/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86 += x86/vmx_nested_tsc_scaling_test
TEST_GEN_PROGS_x86 += x86/apic_bus_clock_test
TEST_GEN_PROGS_x86 += x86/xapic_ipi_test
TEST_GEN_PROGS_x86 += x86/xapic_state_test
TEST_GEN_PROGS_x86 += x86/xcr0_cpuid_test
TEST_GEN_PROGS_x86 += x86/xss_msr_test
TEST_GEN_PROGS_x86 += x86/debug_regs
TEST_GEN_PROGS_x86 += x86/tsc_msrs_test
TEST_GEN_PROGS_x86 += x86/vmx_pmu_caps_test
TEST_GEN_PROGS_x86 += x86/xen_shinfo_test
TEST_GEN_PROGS_x86 += x86/xen_vmcall_test
TEST_GEN_PROGS_x86 += x86/sev_init2_tests
TEST_GEN_PROGS_x86 += x86/sev_migrate_tests
TEST_GEN_PROGS_x86 += x86/sev_smoke_test
TEST_GEN_PROGS_x86 += x86/amx_test
TEST_GEN_PROGS_x86 += x86/max_vcpuid_cap_test
TEST_GEN_PROGS_x86 += x86/triple_fault_event_test
TEST_GEN_PROGS_x86 += x86/recalc_apic_map_test
TEST_GEN_PROGS_x86 += access_tracking_perf_test
TEST_GEN_PROGS_x86 += coalesced_io_test
TEST_GEN_PROGS_x86 += demand_paging_test
TEST_GEN_PROGS_x86 += dirty_log_test
TEST_GEN_PROGS_x86 += dirty_log_perf_test
TEST_GEN_PROGS_x86 += guest_memfd_test
TEST_GEN_PROGS_x86 += guest_print_test
TEST_GEN_PROGS_x86 += hardware_disable_test
TEST_GEN_PROGS_x86 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86 += kvm_page_table_test
TEST_GEN_PROGS_x86 += memslot_modification_stress_test
TEST_GEN_PROGS_x86 += memslot_perf_test
TEST_GEN_PROGS_x86 += mmu_stress_test
TEST_GEN_PROGS_x86 += rseq_test
TEST_GEN_PROGS_x86 += set_memory_region_test
TEST_GEN_PROGS_x86 += steal_time
TEST_GEN_PROGS_x86 += kvm_binary_stats_test
TEST_GEN_PROGS_x86 += system_counter_offset_test
TEST_GEN_PROGS_x86 += pre_fault_memory_test
# Compiled outputs used by test targets
TEST_GEN_PROGS_EXTENDED_x86 += x86/nx_huge_pages_test
TEST_GEN_PROGS_arm64 += arm64/aarch32_id_regs
TEST_GEN_PROGS_arm64 += arm64/arch_timer_edge_cases
TEST_GEN_PROGS_arm64 += arm64/debug-exceptions
TEST_GEN_PROGS_arm64 += arm64/hypercalls
TEST_GEN_PROGS_arm64 += arm64/mmio_abort
TEST_GEN_PROGS_arm64 += arm64/page_fault_test
TEST_GEN_PROGS_arm64 += arm64/psci_test
TEST_GEN_PROGS_arm64 += arm64/set_id_regs
TEST_GEN_PROGS_arm64 += arm64/smccc_filter
TEST_GEN_PROGS_arm64 += arm64/vcpu_width_config
TEST_GEN_PROGS_arm64 += arm64/vgic_init
TEST_GEN_PROGS_arm64 += arm64/vgic_irq
TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress
TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access
TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3
TEST_GEN_PROGS_arm64 += access_tracking_perf_test
TEST_GEN_PROGS_arm64 += arch_timer
TEST_GEN_PROGS_arm64 += coalesced_io_test
TEST_GEN_PROGS_arm64 += demand_paging_test
TEST_GEN_PROGS_arm64 += dirty_log_test
TEST_GEN_PROGS_arm64 += dirty_log_perf_test
TEST_GEN_PROGS_arm64 += guest_print_test
TEST_GEN_PROGS_arm64 += get-reg-list
TEST_GEN_PROGS_arm64 += kvm_create_max_vcpus
TEST_GEN_PROGS_arm64 += kvm_page_table_test
TEST_GEN_PROGS_arm64 += memslot_modification_stress_test
TEST_GEN_PROGS_arm64 += memslot_perf_test
TEST_GEN_PROGS_arm64 += mmu_stress_test
TEST_GEN_PROGS_arm64 += rseq_test
TEST_GEN_PROGS_arm64 += set_memory_region_test
TEST_GEN_PROGS_arm64 += steal_time
TEST_GEN_PROGS_arm64 += kvm_binary_stats_test
TEST_GEN_PROGS_s390 = s390/memop
TEST_GEN_PROGS_s390 += s390/resets
TEST_GEN_PROGS_s390 += s390/sync_regs_test
TEST_GEN_PROGS_s390 += s390/tprot
TEST_GEN_PROGS_s390 += s390/cmma_test
TEST_GEN_PROGS_s390 += s390/debug_test
TEST_GEN_PROGS_s390 += s390/cpumodel_subfuncs_test
TEST_GEN_PROGS_s390 += s390/shared_zeropage_test
TEST_GEN_PROGS_s390 += s390/ucontrol_test
TEST_GEN_PROGS_s390 += demand_paging_test
TEST_GEN_PROGS_s390 += dirty_log_test
TEST_GEN_PROGS_s390 += guest_print_test
TEST_GEN_PROGS_s390 += kvm_create_max_vcpus
TEST_GEN_PROGS_s390 += kvm_page_table_test
TEST_GEN_PROGS_s390 += rseq_test
TEST_GEN_PROGS_s390 += set_memory_region_test
TEST_GEN_PROGS_s390 += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test
TEST_GEN_PROGS_riscv += riscv/ebreak_test
TEST_GEN_PROGS_riscv += arch_timer
TEST_GEN_PROGS_riscv += coalesced_io_test
TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
TEST_GEN_PROGS_riscv += get-reg-list
TEST_GEN_PROGS_riscv += guest_print_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += steal_time
SPLIT_TESTS += arch_timer
SPLIT_TESTS += get-reg-list
TEST_PROGS += $(TEST_PROGS_$(ARCH))
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH))
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH))
LIBKVM += $(LIBKVM_$(ARCH))
OVERRIDE_TARGETS = 1
# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
# which causes the environment variable to override the makefile).
include ../lib.mk
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \
-fno-builtin-memcmp -fno-builtin-memcpy \
-fno-builtin-memset -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -fno-strict-aliasing \
-I$(LINUX_TOOL_INCLUDE) -I$(LINUX_TOOL_ARCH_INCLUDE) \
-I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(ARCH) \
-I ../rseq -I.. $(EXTRA_CFLAGS) $(KHDR_INCLUDES)
ifeq ($(ARCH),s390)
CFLAGS += -march=z10
endif
ifeq ($(ARCH),x86)
ifeq ($(shell echo "void foo(void) { }" | $(CC) -march=x86-64-v2 -x c - -c -o /dev/null 2>/dev/null; echo "$$?"),0)
CFLAGS += -march=x86-64-v2
endif
endif
ifeq ($(ARCH),arm64)
tools_dir := $(top_srcdir)/tools
arm64_tools_dir := $(tools_dir)/arch/arm64/tools/
ifneq ($(abs_objdir),)
arm64_hdr_outdir := $(abs_objdir)/tools/
else
arm64_hdr_outdir := $(tools_dir)/
endif
GEN_HDRS := $(arm64_hdr_outdir)arch/arm64/include/generated/
CFLAGS += -I$(GEN_HDRS)
$(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*)
$(MAKE) -C $(arm64_tools_dir) OUTPUT=$(arm64_hdr_outdir)
endif
no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \
$(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
# On s390, build the testcases KVM-enabled
pgste-option = $(call try-run, echo 'int main(void) { return 0; }' | \
$(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
LDLIBS += -ldl
LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
SPLIT_TEST_GEN_PROGS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
SPLIT_TEST_GEN_OBJ := $(patsubst %, $(OUTPUT)/$(ARCH)/%.o, $(SPLIT_TESTS))
TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TEST_GEN_OBJ))
-include $(TEST_DEP_FILES)
$(shell mkdir -p $(sort $(OUTPUT)/$(ARCH) $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(filter-out $(SPLIT_TEST_GEN_PROGS), $(TEST_GEN_PROGS)) \
$(TEST_GEN_PROGS_EXTENDED): %: %.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
$(SPLIT_TEST_GEN_PROGS): $(OUTPUT)/%: $(OUTPUT)/%.o $(OUTPUT)/$(ARCH)/%.o
$(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@
$(SPLIT_TEST_GEN_OBJ): $(OUTPUT)/$(ARCH)/%.o: $(ARCH)/%.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
EXTRA_CLEAN += $(GEN_HDRS) \
$(LIBKVM_OBJS) \
$(SPLIT_TEST_GEN_OBJ) \
$(TEST_DEP_FILES) \
$(TEST_GEN_OBJ) \
cscope.*
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S $(GEN_HDRS)
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
# Compile the string overrides as freestanding to prevent the compiler from
# generating self-referential code, e.g. without "freestanding" the compiler may
# "optimize" memcmp() by invoking memcmp(), thus causing infinite recursion.
$(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
$(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(SPLIT_TEST_GEN_OBJ): $(GEN_HDRS)
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
$(TEST_GEN_OBJ): $(GEN_HDRS)
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
cscope:
$(RM) cscope.*
(find $(include_paths) -name '*.h' \
-exec realpath --relative-base=$(PWD) {} \;; \
find . -name '*.c' \
-exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
cscope -b

View File

@ -97,7 +97,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
uint64_t reg_id = raz_wi_reg_ids[i];
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
val = vcpu_get_reg(vcpu, reg_id);
TEST_ASSERT_EQ(val, 0);
/*
@ -106,7 +106,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
*/
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
vcpu_get_reg(vcpu, reg_id, &val);
val = vcpu_get_reg(vcpu, reg_id);
TEST_ASSERT_EQ(val, 0);
}
}
@ -126,14 +126,14 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
uint64_t reg_id = raz_invariant_reg_ids[i];
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
val = vcpu_get_reg(vcpu, reg_id);
TEST_ASSERT_EQ(val, 0);
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
TEST_ASSERT(r < 0 && errno == EINVAL,
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
vcpu_get_reg(vcpu, reg_id, &val);
val = vcpu_get_reg(vcpu, reg_id);
TEST_ASSERT_EQ(val, 0);
}
}
@ -144,7 +144,7 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
{
uint64_t val, el0;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;

View File

@ -501,7 +501,7 @@ void test_single_step_from_userspace(int test_cnt)
TEST_ASSERT(ss_enable, "Unexpected KVM_EXIT_DEBUG");
/* Check if the current pc is expected. */
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
TEST_ASSERT(!test_pc || pc == test_pc,
"Unexpected pc 0x%lx (expected 0x%lx)",
pc, test_pc);
@ -583,7 +583,7 @@ int main(int argc, char *argv[])
uint64_t aa64dfr0;
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &aa64dfr0);
aa64dfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
__TEST_REQUIRE(debug_version(aa64dfr0) >= 6,
"Armv8 debug architecture not supported.");
kvm_vm_free(vm);

View File

@ -173,7 +173,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
const struct kvm_fw_reg_info *reg_info = &fw_reg_info[i];
/* First 'read' should be an upper limit of the features supported */
vcpu_get_reg(vcpu, reg_info->reg, &val);
val = vcpu_get_reg(vcpu, reg_info->reg);
TEST_ASSERT(val == FW_REG_ULIMIT_VAL(reg_info->max_feat_bit),
"Expected all the features to be set for reg: 0x%lx; expected: 0x%lx; read: 0x%lx",
reg_info->reg, FW_REG_ULIMIT_VAL(reg_info->max_feat_bit), val);
@ -184,7 +184,7 @@ static void test_fw_regs_before_vm_start(struct kvm_vcpu *vcpu)
"Failed to clear all the features of reg: 0x%lx; ret: %d",
reg_info->reg, errno);
vcpu_get_reg(vcpu, reg_info->reg, &val);
val = vcpu_get_reg(vcpu, reg_info->reg);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx", reg_info->reg);
@ -214,7 +214,7 @@ static void test_fw_regs_after_vm_start(struct kvm_vcpu *vcpu)
* Before starting the VM, the test clears all the bits.
* Check if that's still the case.
*/
vcpu_get_reg(vcpu, reg_info->reg, &val);
val = vcpu_get_reg(vcpu, reg_info->reg);
TEST_ASSERT(val == 0,
"Expected all the features to be cleared for reg: 0x%lx",
reg_info->reg);

View File

@ -164,7 +164,7 @@ int main(int argc, char *argv[])
uint64_t pfr0;
vm = vm_create_with_one_vcpu(&vcpu, NULL);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &pfr0);
pfr0 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
__TEST_REQUIRE(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), pfr0),
"GICv3 not supported.");
kvm_vm_free(vm);

View File

@ -111,8 +111,8 @@ static void assert_vcpu_reset(struct kvm_vcpu *vcpu)
{
uint64_t obs_pc, obs_x0;
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &obs_pc);
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]), &obs_x0);
obs_pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
obs_x0 = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.regs[0]));
TEST_ASSERT(obs_pc == CPU_ON_ENTRY_ADDR,
"unexpected target cpu pc: %lx (expected: %lx)",
@ -152,7 +152,7 @@ static void host_test_cpu_on(void)
*/
vcpu_power_off(target);
vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
target_mpidr = vcpu_get_reg(target, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
vcpu_args_set(source, 1, target_mpidr & MPIDR_HWID_BITMASK);
enter_guest(source);
@ -244,7 +244,7 @@ static void host_test_system_off2(void)
setup_vm(guest_test_system_off2, &source, &target);
vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION, &psci_version);
psci_version = vcpu_get_reg(target, KVM_REG_ARM_PSCI_VERSION);
TEST_ASSERT(psci_version >= PSCI_VERSION(1, 3),
"Unexpected PSCI version %lu.%lu",

View File

@ -346,7 +346,7 @@ static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
uint64_t mask = ftr_bits->mask;
uint64_t val, new_val, ftr;
vcpu_get_reg(vcpu, reg, &val);
val = vcpu_get_reg(vcpu, reg);
ftr = (val & mask) >> shift;
ftr = get_safe_value(ftr_bits, ftr);
@ -356,7 +356,7 @@ static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
val |= ftr;
vcpu_set_reg(vcpu, reg, val);
vcpu_get_reg(vcpu, reg, &new_val);
new_val = vcpu_get_reg(vcpu, reg);
TEST_ASSERT_EQ(new_val, val);
return new_val;
@ -370,7 +370,7 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
uint64_t val, old_val, ftr;
int r;
vcpu_get_reg(vcpu, reg, &val);
val = vcpu_get_reg(vcpu, reg);
ftr = (val & mask) >> shift;
ftr = get_invalid_value(ftr_bits, ftr);
@ -384,7 +384,7 @@ static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
TEST_ASSERT(r < 0 && errno == EINVAL,
"Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
vcpu_get_reg(vcpu, reg, &val);
val = vcpu_get_reg(vcpu, reg);
TEST_ASSERT_EQ(val, old_val);
}
@ -471,7 +471,7 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
}
/* Get the id register value */
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
/* Try to set MPAM=0. This should always be possible. */
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
@ -508,7 +508,7 @@ static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
}
/* Get the id register value */
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), &val);
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
/* Try to set MPAM_frac=0. This should always be possible. */
val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
@ -576,7 +576,7 @@ static void test_clidr(struct kvm_vcpu *vcpu)
uint64_t clidr;
int level;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), &clidr);
clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
/* find the first empty level in the cache hierarchy */
for (level = 1; level < 7; level++) {
@ -601,7 +601,7 @@ static void test_ctr(struct kvm_vcpu *vcpu)
{
u64 ctr;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), &ctr);
ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0));
ctr &= ~CTR_EL0_DIC_MASK;
if (ctr & CTR_EL0_IminLine_MASK)
ctr--;
@ -617,7 +617,7 @@ static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
test_clidr(vcpu);
test_ctr(vcpu);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &val);
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
val++;
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
@ -630,7 +630,7 @@ static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encodin
size_t idx = encoding_to_range_idx(encoding);
uint64_t observed;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding), &observed);
observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
TEST_ASSERT_EQ(test_reg_vals[idx], observed);
}
@ -665,7 +665,7 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
/* Check for AARCH64 only system */
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);

View File

@ -440,8 +440,7 @@ static void create_vpmu_vm(void *guest_code)
"Failed to create vgic-v3, skipping");
/* Make sure that PMUv3 support is indicated in the ID register */
vcpu_get_reg(vpmu_vm.vcpu,
KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
dfr0 = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1));
pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0);
TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF &&
pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP,
@ -484,7 +483,7 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
create_vpmu_vm(guest_code);
vcpu = vpmu_vm.vcpu;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
pmcr_orig = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
pmcr = pmcr_orig;
/*
@ -493,7 +492,7 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail)
*/
set_pmcr_n(&pmcr, pmcr_n);
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
pmcr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
if (expect_fail)
TEST_ASSERT(pmcr_orig == pmcr,
@ -521,7 +520,7 @@ static void run_access_test(uint64_t pmcr_n)
vcpu = vpmu_vm.vcpu;
/* Save the initial sp to restore them later to run the guest again */
vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
sp = vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1));
run_vcpu(vcpu, pmcr_n);
@ -572,12 +571,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
* Test if the 'set' and 'clr' variants of the registers
* are initialized based on the number of valid counters.
*/
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
@ -589,12 +588,12 @@ static void run_pmregs_validity_test(uint64_t pmcr_n)
*/
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), &reg_val);
reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id));
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(set_reg_id), reg_val);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), &reg_val);
reg_val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id));
TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0,
"Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx",
KVM_ARM64_SYS_REG(clr_reg_id), reg_val);
@ -625,7 +624,7 @@ static uint64_t get_pmcr_n_limit(void)
uint64_t pmcr;
create_vpmu_vm(guest_code);
vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
pmcr = vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0));
destroy_vpmu_vm();
return get_pmcr_n(pmcr);
}

View File

@ -21,7 +21,7 @@
#include "ucall_common.h"
#ifdef __aarch64__
#include "aarch64/vgic.h"
#include "arm64/vgic.h"
static int gic_fd;

View File

@ -702,16 +702,22 @@ static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t va
return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
}
static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
static inline uint64_t vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id)
{
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
uint64_t val;
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
return val;
}
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
{
struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
TEST_ASSERT(KVM_REG_SIZE(id) <= sizeof(val), "Reg %lx too big", id);
vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
}

View File

@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/apic.h
*
* Copyright (C) 2021, Google LLC.
*/

View File

@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* tools/testing/selftests/kvm/include/x86_64/evmcs.h
*
* Copyright (C) 2018, Red Hat, Inc.
*
*/
#ifndef SELFTEST_KVM_EVMCS_H

View File

@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* tools/testing/selftests/kvm/include/x86_64/hyperv.h
*
* Copyright (C) 2021, Red Hat, Inc.
*
*/
#ifndef SELFTEST_KVM_HYPERV_H

View File

@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/mce.h
*
* Copyright (C) 2022, Google LLC.
*/

View File

@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/processor.h
*
* Copyright (C) 2018, Google LLC.
*/

View File

@ -1,10 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* tools/testing/selftests/kvm/include/x86_64/svm.h
* This is a copy of arch/x86/include/asm/svm.h
*
*/
#ifndef SELFTEST_KVM_SVM_H
#define SELFTEST_KVM_SVM_H

View File

@ -1,8 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/svm_utils.h
* Header for nested SVM testing
*
* Copyright (C) 2020, Red Hat, Inc.
*/

View File

@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* tools/testing/selftests/kvm/include/x86_64/vmx.h
*
* Copyright (C) 2018, Google LLC.
*/

View File

@ -281,8 +281,8 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
*/
vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1));
tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1));
/* Configure base granule size */
switch (vm->mode) {
@ -360,8 +360,8 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
uint64_t pstate, pc;
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate);
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc);
pstate = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate));
pc = vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc));
fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
indent, "", pstate, pc);

View File

@ -1648,7 +1648,8 @@ int _vcpu_run(struct kvm_vcpu *vcpu)
rc = __vcpu_run(vcpu);
} while (rc == -1 && errno == EINTR);
assert_on_unhandled_exception(vcpu);
if (!rc)
assert_on_unhandled_exception(vcpu);
return rc;
}

View File

@ -221,39 +221,39 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
{
struct kvm_riscv_core core;
vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6);
core.mode = vcpu_get_reg(vcpu, RISCV_CORE_REG(mode));
core.regs.pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc));
core.regs.ra = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra));
core.regs.sp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp));
core.regs.gp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp));
core.regs.tp = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp));
core.regs.t0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0));
core.regs.t1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1));
core.regs.t2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2));
core.regs.s0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0));
core.regs.s1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1));
core.regs.a0 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0));
core.regs.a1 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1));
core.regs.a2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2));
core.regs.a3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3));
core.regs.a4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4));
core.regs.a5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5));
core.regs.a6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6));
core.regs.a7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7));
core.regs.s2 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2));
core.regs.s3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3));
core.regs.s4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4));
core.regs.s5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5));
core.regs.s6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6));
core.regs.s7 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7));
core.regs.s8 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8));
core.regs.s9 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9));
core.regs.s10 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10));
core.regs.s11 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11));
core.regs.t3 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3));
core.regs.t4 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4));
core.regs.t5 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5));
core.regs.t6 = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6));
fprintf(stream,
" MODE: 0x%lx\n", core.mode);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* x86_64-specific extensions to memstress.c.
* x86-specific extensions to memstress.c.
*
* Copyright (C) 2022, Google, Inc.
*/

View File

@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2018, Google LLC.
*/

View File

@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/svm.c
* Helpers used for nested SVM testing
* Largely inspired from KVM unit test svm.c
*

View File

@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* tools/testing/selftests/kvm/lib/x86_64/vmx.c
*
* Copyright (C) 2018, Google LLC.
*/

View File

@ -15,16 +15,62 @@
#include "test_util.h"
#include "guest_modes.h"
#include "processor.h"
#include "ucall_common.h"
static bool mprotect_ro_done;
static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
{
uint64_t gpa;
int i;
for (;;) {
for (i = 0; i < 2; i++) {
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
*((volatile uint64_t *)gpa) = gpa;
GUEST_SYNC(0);
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
GUEST_SYNC(i);
}
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
*((volatile uint64_t *)gpa);
GUEST_SYNC(2);
/*
* Write to the region while mprotect(PROT_READ) is underway. Keep
* looping until the memory is guaranteed to be read-only, otherwise
* vCPUs may complete their writes and advance to the next stage
* prematurely.
*
* For architectures that support skipping the faulting instruction,
* generate the store via inline assembly to ensure the exact length
* of the instruction is known and stable (vcpu_arch_put_guest() on
* fixed-length architectures should work, but the cost of paranoia
* is low in this case). For x86, hand-code the exact opcode so that
* there is no room for variability in the generated instruction.
*/
do {
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
#ifdef __x86_64__
asm volatile(".byte 0x48,0x89,0x00" :: "a"(gpa) : "memory"); /* mov %rax, (%rax) */
#elif defined(__aarch64__)
asm volatile("str %0, [%0]" :: "r" (gpa) : "memory");
#else
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
#endif
} while (!READ_ONCE(mprotect_ro_done));
/*
* Only architectures that write the entire range can explicitly sync,
* as other architectures will be stuck on the write fault.
*/
#if defined(__x86_64__) || defined(__aarch64__)
GUEST_SYNC(3);
#endif
for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
GUEST_SYNC(4);
GUEST_ASSERT(0);
}
struct vcpu_info {
@ -51,34 +97,100 @@ static void rendezvous_with_boss(void)
}
}
static void run_vcpu(struct kvm_vcpu *vcpu)
static void assert_sync_stage(struct kvm_vcpu *vcpu, int stage)
{
struct ucall uc;
TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
TEST_ASSERT_EQ(uc.args[1], stage);
}
static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
{
vcpu_run(vcpu);
TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
assert_sync_stage(vcpu, stage);
}
static void *vcpu_worker(void *data)
{
struct kvm_sregs __maybe_unused sregs;
struct vcpu_info *info = data;
struct kvm_vcpu *vcpu = info->vcpu;
struct kvm_vm *vm = vcpu->vm;
struct kvm_sregs sregs;
int r;
vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
rendezvous_with_boss();
run_vcpu(vcpu);
/* Stage 0, write all of guest memory. */
run_vcpu(vcpu, 0);
rendezvous_with_boss();
vcpu_sregs_get(vcpu, &sregs);
#ifdef __x86_64__
vcpu_sregs_get(vcpu, &sregs);
/* Toggle CR0.WP to trigger a MMU context reset. */
sregs.cr0 ^= X86_CR0_WP;
#endif
vcpu_sregs_set(vcpu, &sregs);
#endif
rendezvous_with_boss();
run_vcpu(vcpu);
/* Stage 1, re-write all of guest memory. */
run_vcpu(vcpu, 1);
rendezvous_with_boss();
/* Stage 2, read all of guest memory, which is now read-only. */
run_vcpu(vcpu, 2);
/*
* Stage 3, write guest memory and verify KVM returns -EFAULT for once
* the mprotect(PROT_READ) lands. Only architectures that support
* validating *all* of guest memory sync for this stage, as vCPUs will
* be stuck on the faulting instruction for other architectures. Go to
* stage 3 without a rendezvous
*/
do {
r = _vcpu_run(vcpu);
} while (!r);
TEST_ASSERT(r == -1 && errno == EFAULT,
"Expected EFAULT on write to RO memory, got r = %d, errno = %d", r, errno);
#if defined(__x86_64__) || defined(__aarch64__)
/*
* Verify *all* writes from the guest hit EFAULT due to the VMA now
* being read-only. x86 and arm64 only at this time as skipping the
* instruction that hits the EFAULT requires advancing the program
* counter, which is arch specific and relies on inline assembly.
*/
#ifdef __x86_64__
vcpu->run->kvm_valid_regs = KVM_SYNC_X86_REGS;
#endif
for (;;) {
r = _vcpu_run(vcpu);
if (!r)
break;
TEST_ASSERT_EQ(errno, EFAULT);
#if defined(__x86_64__)
WRITE_ONCE(vcpu->run->kvm_dirty_regs, KVM_SYNC_X86_REGS);
vcpu->run->s.regs.regs.rip += 3;
#elif defined(__aarch64__)
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc),
vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)) + 4);
#endif
}
assert_sync_stage(vcpu, 3);
#endif /* __x86_64__ || __aarch64__ */
rendezvous_with_boss();
/*
* Stage 4. Run to completion, waiting for mprotect(PROT_WRITE) to
* make the memory writable again.
*/
do {
r = _vcpu_run(vcpu);
} while (r && errno == EFAULT);
TEST_ASSERT_EQ(r, 0);
assert_sync_stage(vcpu, 4);
rendezvous_with_boss();
return NULL;
@ -161,7 +273,7 @@ int main(int argc, char *argv[])
const uint64_t start_gpa = SZ_4G;
const int first_slot = 1;
struct timespec time_start, time_run1, time_reset, time_run2;
struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw;
uint64_t max_gpa, gpa, slot_size, max_mem, i;
int max_slots, slot, opt, fd;
bool hugepages = false;
@ -209,7 +321,13 @@ int main(int argc, char *argv[])
vcpus = malloc(nr_vcpus * sizeof(*vcpus));
TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm = __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus,
#ifdef __x86_64__
max_mem / SZ_1G,
#else
max_mem / vm_guest_mode_params[VM_MODE_DEFAULT].page_size,
#endif
guest_code, vcpus);
max_gpa = vm->max_gfn << vm->page_shift;
TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
@ -259,14 +377,28 @@ int main(int argc, char *argv[])
rendezvous_with_vcpus(&time_reset, "reset");
rendezvous_with_vcpus(&time_run2, "run 2");
mprotect(mem, slot_size, PROT_READ);
usleep(10);
mprotect_ro_done = true;
sync_global_to_guest(vm, mprotect_ro_done);
rendezvous_with_vcpus(&time_ro, "mprotect RO");
mprotect(mem, slot_size, PROT_READ | PROT_WRITE);
rendezvous_with_vcpus(&time_rw, "mprotect RW");
time_rw = timespec_sub(time_rw, time_ro);
time_ro = timespec_sub(time_ro, time_run2);
time_run2 = timespec_sub(time_run2, time_reset);
time_reset = timespec_sub(time_reset, time_run1);
time_reset = timespec_sub(time_reset, time_run1);
time_run1 = timespec_sub(time_run1, time_start);
pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds\n",
pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds, "
"ro = %ld.%.9lds, rw = %ld.%.9lds\n",
time_run1.tv_sec, time_run1.tv_nsec,
time_reset.tv_sec, time_reset.tv_nsec,
time_run2.tv_sec, time_run2.tv_nsec);
time_run2.tv_sec, time_run2.tv_nsec,
time_ro.tv_sec, time_ro.tv_nsec,
time_rw.tv_sec, time_rw.tv_nsec);
/*
* Delete even numbered slots (arbitrary) and unmap the first half of

View File

@ -93,7 +93,7 @@ struct kvm_vm *test_vm_create(void)
vcpu_init_vector_tables(vcpus[i]);
/* Initialize guest timer frequency. */
vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq);
timer_freq = vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency));
sync_global_to_guest(vm, timer_freq);
pr_debug("timer_freq: %lu\n", timer_freq);

View File

@ -60,7 +60,7 @@ int main(void)
TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG);
vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &pc);
pc = vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc));
TEST_ASSERT_EQ(pc, LABEL_ADDRESS(sw_bp_1));
/* skip sw_bp_1 */

View File

@ -608,7 +608,7 @@ static void test_vm_events_overflow(void *guest_code)
vcpu_init_vector_tables(vcpu);
/* Initialize guest timer frequency. */
vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency), &timer_freq);
timer_freq = vcpu_get_reg(vcpu, RISCV_TIMER_REG(frequency));
sync_global_to_guest(vm, timer_freq);
run_vcpu(vcpu);

View File

@ -61,7 +61,7 @@ static void test_one_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t value)
{
uint64_t eval_reg;
vcpu_get_reg(vcpu, id, &eval_reg);
eval_reg = vcpu_get_reg(vcpu, id);
TEST_ASSERT(eval_reg == value, "value == 0x%lx", value);
}

View File

@ -17,9 +17,9 @@
#include <processor.h>
/*
* s390x needs at least 1MB alignment, and the x86_64 MOVE/DELETE tests need a
* 2MB sized and aligned region so that the initial region corresponds to
* exactly one large page.
* s390 needs at least 1MB alignment, and the x86 MOVE/DELETE tests need a 2MB
* sized and aligned region so that the initial region corresponds to exactly
* one large page.
*/
#define MEM_REGION_SIZE 0x200000

View File

@ -269,9 +269,8 @@ static void guest_code(int cpu)
static bool is_steal_time_supported(struct kvm_vcpu *vcpu)
{
uint64_t id = RISCV_SBI_EXT_REG(KVM_RISCV_SBI_EXT_STA);
unsigned long enabled;
unsigned long enabled = vcpu_get_reg(vcpu, id);
vcpu_get_reg(vcpu, id, &enabled);
TEST_ASSERT(enabled == 0 || enabled == 1, "Expected boolean result");
return enabled;

Some files were not shown because too many files have changed in this diff Show More