- second part of the ucontrol selftest

- cpumodel sanity check selftest
 - gen17 cpumodel changes
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEwGNS88vfc9+v45Yq41TmuOI4ufgFAmczfG8ACgkQ41TmuOI4
 ufgfdhAAgkrSu7tjyOuXAFfedoVSU0nsUwBpsmWpVxUm7khu6AiTN8PD1SgMfhq5
 SDUb0Ib7SEH9qsK/U1tyBlA9JNDlMrfftOqHvTSxZKGE21KLHOcAF4wDbWv+BT3k
 4fQ8AbgNprng+BRZLI1UczrDLIgxsnmucHcdJXBadb4v/6cdp3AnEbrdc7DfnSU6
 YQowjpn5DBZISTammozfEJ7LWI6R07JYXKj1ZHFVHbcqPnGkw6lpr/RsEluv/uKG
 Z5gHDEz3jz3nT+/o/YE5+JS520LzpOQnyi3jlrZKAPEpBLJP9G/3rcV513VGcAxu
 qBEL85JGxe/zoFlQ9HWq8NS/ZP6PS0gitJAxMnh19mL6NlQjY2SH7r81MRNQyNIp
 sGQ9T/e22HEKbEppn2VxoU5QM8/w9JTwG6p18oCc9En0qui4S/yVlM53nSFauJ4e
 5iAJNgE060bSLVs7SFwgWD4FDCTAl1FLcOvc/n7dyv2623QA402IUHhLNBHD8IjX
 U1uYPGMQJ3hNHTPc0/a3RCijKZwKEmBlrTARwllxVVMpQ0CDnyvL4nHkPrxyfT3r
 /Dx6CTZBfch9G9tYhqVojWqPznhKjoyuTZhTSX4QDRMz8NzH6UgD4dSd83K4M4Nm
 pTu1JEnrpXHyWE8NknEph04YjJcAbbZIZDgdUTR51QdbQ8D5Z4Y=
 =5R0M
 -----END PGP SIGNATURE-----

Merge tag 'kvm-s390-next-6.13-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

- second part of the ucontrol selftest
- cpumodel sanity check selftest
- gen17 cpumodel changes
This commit is contained in:
Paolo Bonzini 2024-11-12 13:17:55 -05:00
commit 185e02d61e
12 changed files with 737 additions and 13 deletions

View File

@ -356,6 +356,7 @@ struct kvm_s390_sie_block {
#define ECD_MEF 0x08000000
#define ECD_ETOKENF 0x02000000
#define ECD_ECC 0x00200000
#define ECD_HMAC 0x00004000
__u32 ecd; /* 0x01c8 */
__u8 reserved1cc[18]; /* 0x01cc */
__u64 pp; /* 0x01de */

View File

@ -469,7 +469,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 kdsa[16]; /* with MSA9 */
__u8 sortl[32]; /* with STFLE.150 */
__u8 dfltcc[32]; /* with STFLE.151 */
__u8 reserved[1728];
__u8 pfcr[16]; /* with STFLE.201 */
__u8 reserved[1712];
};
#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6

View File

@ -348,6 +348,16 @@ static inline int plo_test_bit(unsigned char nr)
return cc == 0;
}
static __always_inline void pfcr_query(u8 (*query)[16])
{
asm volatile(
" lghi 0,0\n"
" .insn rsy,0xeb0000000016,0,0,%[query]\n"
: [query] "=QS" (*query)
:
: "cc", "0");
}
static __always_inline void __sortl_query(u8 (*query)[32])
{
asm volatile(
@ -429,6 +439,9 @@ static void __init kvm_s390_cpu_feat_init(void)
if (test_facility(151)) /* DFLTCC */
__dfltcc_query(&kvm_s390_available_subfunc.dfltcc);
if (test_facility(201)) /* PFCR */
pfcr_query(&kvm_s390_available_subfunc.pfcr);
if (MACHINE_HAS_ESOP)
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
/*
@ -799,6 +812,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 192);
set_kvm_facility(kvm->arch.model.fac_list, 192);
}
if (test_facility(198)) {
set_kvm_facility(kvm->arch.model.fac_mask, 198);
set_kvm_facility(kvm->arch.model.fac_list, 198);
}
if (test_facility(199)) {
set_kvm_facility(kvm->arch.model.fac_mask, 199);
set_kvm_facility(kvm->arch.model.fac_list, 199);
}
r = 0;
} else
r = -EINVAL;
@ -1543,6 +1564,9 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
return 0;
}
@ -1757,6 +1781,9 @@ static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
return 0;
}
@ -1825,6 +1852,9 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
VM_EVENT(kvm, 3, "GET: host PFCR subfunc 0x%16.16lx.%16.16lx",
((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0],
((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]);
return 0;
}
@ -3774,6 +3804,13 @@ static bool kvm_has_pckmo_ecc(struct kvm *kvm)
}
static bool kvm_has_pckmo_hmac(struct kvm *kvm)
{
/* At least one HMAC subfunction must be present */
return kvm_has_pckmo_subfunc(kvm, 118) ||
kvm_has_pckmo_subfunc(kvm, 122);
}
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
/*
@ -3786,7 +3823,7 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
vcpu->arch.sie_block->eca &= ~ECA_APIE;
vcpu->arch.sie_block->ecd &= ~ECD_ECC;
vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC);
if (vcpu->kvm->arch.crypto.apie)
vcpu->arch.sie_block->eca |= ECA_APIE;
@ -3794,9 +3831,11 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
/* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw) {
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
/* ecc is also wrapped with AES key */
/* ecc/hmac is also wrapped with AES key */
if (kvm_has_pckmo_ecc(vcpu->kvm))
vcpu->arch.sie_block->ecd |= ECD_ECC;
if (kvm_has_pckmo_hmac(vcpu->kvm))
vcpu->arch.sie_block->ecd |= ECD_HMAC;
}
if (vcpu->kvm->arch.crypto.dea_kw)

View File

@ -335,7 +335,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd &
(ECD_ECC | ECD_HMAC);
if (!ecb3_flags && !ecd_flags)
goto end;

View File

@ -109,10 +109,12 @@ static struct facility_def facility_defs[] = {
15, /* AP Facilities Test */
156, /* etoken facility */
165, /* nnpa facility */
170, /* ineffective-nonconstrained-transaction facility */
193, /* bear enhancement facility */
194, /* rdp enhancement facility */
196, /* processor activity instrumentation facility */
197, /* processor activity instrumentation extension 1 */
201, /* concurrent-functions facility */
-1 /* END */
}
},

View File

@ -469,7 +469,8 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 kdsa[16]; /* with MSA9 */
__u8 sortl[32]; /* with STFLE.150 */
__u8 dfltcc[32]; /* with STFLE.151 */
__u8 reserved[1728];
__u8 pfcr[16]; /* with STFLE.201 */
__u8 reserved[1712];
};
#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6

View File

@ -55,6 +55,7 @@ LIBKVM_aarch64 += lib/aarch64/vgic.c
LIBKVM_s390x += lib/s390x/diag318_test_handler.c
LIBKVM_s390x += lib/s390x/processor.c
LIBKVM_s390x += lib/s390x/ucall.c
LIBKVM_s390x += lib/s390x/facility.c
LIBKVM_riscv += lib/riscv/handlers.S
LIBKVM_riscv += lib/riscv/processor.c
@ -189,6 +190,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += s390x/cpumodel_subfuncs_test
TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test
TEST_GEN_PROGS_s390x += s390x/ucontrol_test
TEST_GEN_PROGS_s390x += demand_paging_test

View File

@ -0,0 +1,50 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright IBM Corp. 2024
*
* Authors:
* Hariharan Mari <hari55@linux.ibm.com>
*
* Get the facility bits with the STFLE instruction
*/
#ifndef SELFTEST_KVM_FACILITY_H
#define SELFTEST_KVM_FACILITY_H
#include <linux/bitops.h>
/* alt_stfle_fac_list[16] + stfle_fac_list[16] */
#define NB_STFL_DOUBLEWORDS 32
extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
extern bool stfle_flag;
static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr)
{
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
static inline void stfle(uint64_t *fac, unsigned int nb_doublewords)
{
register unsigned long r0 asm("0") = nb_doublewords - 1;
asm volatile(" .insn s,0xb2b00000,0(%1)\n"
: "+d" (r0)
: "a" (fac)
: "memory", "cc");
}
static inline void setup_facilities(void)
{
stfle(stfl_doublewords, NB_STFL_DOUBLEWORDS);
stfle_flag = true;
}
static inline bool test_facility(int nr)
{
if (!stfle_flag)
setup_facilities();
return test_bit_inv(nr, stfl_doublewords);
}
#endif

View File

@ -32,4 +32,10 @@ static inline void cpu_relax(void)
barrier();
}
/* Get the instruction length */
static inline int insn_length(unsigned char code)
{
return ((((int)code + 64) >> 7) + 1) << 1;
}
#endif

View File

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright IBM Corp. 2024
*
* Authors:
* Hariharan Mari <hari55@linux.ibm.com>
*
* Contains the definition for the global variables to have the test facitlity feature.
*/
#include "facility.h"
uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS];
bool stfle_flag;

View File

@ -0,0 +1,301 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright IBM Corp. 2024
*
* Authors:
* Hariharan Mari <hari55@linux.ibm.com>
*
* The tests compare the result of the KVM ioctl for obtaining CPU subfunction data with those
* from an ASM block performing the same CPU subfunction. Currently KVM doesn't mask instruction
* query data reported via the CPU Model, allowing us to directly compare it with the data
* acquired through executing the queries in the test.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include "facility.h"
#include "kvm_util.h"
#define PLO_FUNCTION_MAX 256
/* Query available CPU subfunctions */
struct kvm_s390_vm_cpu_subfunc cpu_subfunc;
static void get_cpu_machine_subfuntions(struct kvm_vm *vm,
struct kvm_s390_vm_cpu_subfunc *cpu_subfunc)
{
int r;
r = __kvm_device_attr_get(vm->fd, KVM_S390_VM_CPU_MODEL,
KVM_S390_VM_CPU_MACHINE_SUBFUNC, cpu_subfunc);
TEST_ASSERT(!r, "Get cpu subfunctions failed r=%d errno=%d", r, errno);
}
static inline int plo_test_bit(unsigned char nr)
{
unsigned long function = nr | 0x100;
int cc;
asm volatile(" lgr 0,%[function]\n"
/* Parameter registers are ignored for "test bit" */
" plo 0,0,0,0(0)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: [function] "d" (function)
: "cc", "0");
return cc == 0;
}
/* Testing Perform Locked Operation (PLO) CPU subfunction's ASM block */
static void test_plo_asm_block(u8 (*query)[32])
{
for (int i = 0; i < PLO_FUNCTION_MAX; ++i) {
if (plo_test_bit(i))
(*query)[i >> 3] |= 0x80 >> (i & 7);
}
}
/* Testing Crypto Compute Message Authentication Code (KMAC) CPU subfunction's ASM block */
static void test_kmac_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb91e0000,0,2\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Cipher Message with Chaining (KMC) CPU subfunction's ASM block */
static void test_kmc_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb92f0000,2,4\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Cipher Message (KM) CPU subfunction's ASM block */
static void test_km_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb92e0000,2,4\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Compute Intermediate Message Digest (KIMD) CPU subfunction's ASM block */
static void test_kimd_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb93e0000,0,2\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Compute Last Message Digest (KLMD) CPU subfunction's ASM block */
static void test_klmd_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb93f0000,0,2\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Cipher Message with Counter (KMCTR) CPU subfunction's ASM block */
static void test_kmctr_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rrf,0xb92d0000,2,4,6,0\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Cipher Message with Cipher Feedback (KMF) CPU subfunction's ASM block */
static void test_kmf_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb92a0000,2,4\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Cipher Message with Output Feedback (KMO) CPU subfunction's ASM block */
static void test_kmo_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb92b0000,2,4\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Perform Cryptographic Computation (PCC) CPU subfunction's ASM block */
static void test_pcc_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb92c0000,0,0\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Perform Random Number Operation (PRNO) CPU subfunction's ASM block */
static void test_prno_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb93c0000,2,4\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Cipher Message with Authentication (KMA) CPU subfunction's ASM block */
static void test_kma_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rrf,0xb9290000,2,4,6,0\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Crypto Compute Digital Signature Authentication (KDSA) CPU subfunction's ASM block */
static void test_kdsa_asm_block(u8 (*query)[16])
{
asm volatile(" la %%r1,%[query]\n"
" xgr %%r0,%%r0\n"
" .insn rre,0xb93a0000,0,2\n"
: [query] "=R" (*query)
:
: "cc", "r0", "r1");
}
/* Testing Sort Lists (SORTL) CPU subfunction's ASM block */
static void test_sortl_asm_block(u8 (*query)[32])
{
asm volatile(" lghi 0,0\n"
" la 1,%[query]\n"
" .insn rre,0xb9380000,2,4\n"
: [query] "=R" (*query)
:
: "cc", "0", "1");
}
/* Testing Deflate Conversion Call (DFLTCC) CPU subfunction's ASM block */
static void test_dfltcc_asm_block(u8 (*query)[32])
{
asm volatile(" lghi 0,0\n"
" la 1,%[query]\n"
" .insn rrf,0xb9390000,2,4,6,0\n"
: [query] "=R" (*query)
:
: "cc", "0", "1");
}
/*
* Testing Perform Function with Concurrent Results (PFCR)
* CPU subfunctions's ASM block
*/
static void test_pfcr_asm_block(u8 (*query)[16])
{
asm volatile(" lghi 0,0\n"
" .insn rsy,0xeb0000000016,0,0,%[query]\n"
: [query] "=QS" (*query)
:
: "cc", "0");
}
typedef void (*testfunc_t)(u8 (*array)[]);
struct testdef {
const char *subfunc_name;
u8 *subfunc_array;
size_t array_size;
testfunc_t test;
int facility_bit;
} testlist[] = {
/*
* PLO was introduced in the very first 64-bit machine generation.
* Hence it is assumed PLO is always installed in Z Arch.
*/
{ "PLO", cpu_subfunc.plo, sizeof(cpu_subfunc.plo), test_plo_asm_block, 1 },
/* MSA - Facility bit 17 */
{ "KMAC", cpu_subfunc.kmac, sizeof(cpu_subfunc.kmac), test_kmac_asm_block, 17 },
{ "KMC", cpu_subfunc.kmc, sizeof(cpu_subfunc.kmc), test_kmc_asm_block, 17 },
{ "KM", cpu_subfunc.km, sizeof(cpu_subfunc.km), test_km_asm_block, 17 },
{ "KIMD", cpu_subfunc.kimd, sizeof(cpu_subfunc.kimd), test_kimd_asm_block, 17 },
{ "KLMD", cpu_subfunc.klmd, sizeof(cpu_subfunc.klmd), test_klmd_asm_block, 17 },
/* MSA - Facility bit 77 */
{ "KMCTR", cpu_subfunc.kmctr, sizeof(cpu_subfunc.kmctr), test_kmctr_asm_block, 77 },
{ "KMF", cpu_subfunc.kmf, sizeof(cpu_subfunc.kmf), test_kmf_asm_block, 77 },
{ "KMO", cpu_subfunc.kmo, sizeof(cpu_subfunc.kmo), test_kmo_asm_block, 77 },
{ "PCC", cpu_subfunc.pcc, sizeof(cpu_subfunc.pcc), test_pcc_asm_block, 77 },
/* MSA5 - Facility bit 57 */
{ "PPNO", cpu_subfunc.ppno, sizeof(cpu_subfunc.ppno), test_prno_asm_block, 57 },
/* MSA8 - Facility bit 146 */
{ "KMA", cpu_subfunc.kma, sizeof(cpu_subfunc.kma), test_kma_asm_block, 146 },
/* MSA9 - Facility bit 155 */
{ "KDSA", cpu_subfunc.kdsa, sizeof(cpu_subfunc.kdsa), test_kdsa_asm_block, 155 },
/* SORTL - Facility bit 150 */
{ "SORTL", cpu_subfunc.sortl, sizeof(cpu_subfunc.sortl), test_sortl_asm_block, 150 },
/* DFLTCC - Facility bit 151 */
{ "DFLTCC", cpu_subfunc.dfltcc, sizeof(cpu_subfunc.dfltcc), test_dfltcc_asm_block, 151 },
/* Concurrent-function facility - Facility bit 201 */
{ "PFCR", cpu_subfunc.pfcr, sizeof(cpu_subfunc.pfcr), test_pfcr_asm_block, 201 },
};
int main(int argc, char *argv[])
{
struct kvm_vm *vm;
int idx;
ksft_print_header();
vm = vm_create(1);
memset(&cpu_subfunc, 0, sizeof(cpu_subfunc));
get_cpu_machine_subfuntions(vm, &cpu_subfunc);
ksft_set_plan(ARRAY_SIZE(testlist));
for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
if (test_facility(testlist[idx].facility_bit)) {
u8 *array = malloc(testlist[idx].array_size);
testlist[idx].test((u8 (*)[testlist[idx].array_size])array);
TEST_ASSERT_EQ(memcmp(testlist[idx].subfunc_array,
array, testlist[idx].array_size), 0);
ksft_test_result_pass("%s\n", testlist[idx].subfunc_name);
free(array);
} else {
ksft_test_result_skip("%s feature is not avaialable\n",
testlist[idx].subfunc_name);
}
}
kvm_vm_free(vm);
ksft_finished();
}

View File

@ -16,7 +16,11 @@
#include <linux/capability.h>
#include <linux/sizes.h>
#define PGM_SEGMENT_TRANSLATION 0x10
#define VM_MEM_SIZE (4 * SZ_1M)
#define VM_MEM_EXT_SIZE (2 * SZ_1M)
#define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)
/* so directly declare capget to check caps without libcap */
int capget(cap_user_header_t header, cap_user_data_t data);
@ -58,6 +62,50 @@ asm("test_gprs_asm:\n"
" j 0b\n"
);
/* Test program manipulating memory */
extern char test_mem_asm[];
asm("test_mem_asm:\n"
"xgr %r0, %r0\n"
"0:\n"
" ahi %r0,1\n"
" st %r1,0(%r5,%r6)\n"
" xgr %r1,%r1\n"
" l %r1,0(%r5,%r6)\n"
" ahi %r0,1\n"
" diag 0,0,0x44\n"
" j 0b\n"
);
/* Test program manipulating storage keys */
extern char test_skey_asm[];
asm("test_skey_asm:\n"
"xgr %r0, %r0\n"
"0:\n"
" ahi %r0,1\n"
" st %r1,0(%r5,%r6)\n"
" iske %r1,%r6\n"
" ahi %r0,1\n"
" diag 0,0,0x44\n"
" sske %r1,%r6\n"
" xgr %r1,%r1\n"
" iske %r1,%r6\n"
" ahi %r0,1\n"
" diag 0,0,0x44\n"
" rrbe %r1,%r6\n"
" iske %r1,%r6\n"
" ahi %r0,1\n"
" diag 0,0,0x44\n"
" j 0b\n"
);
FIXTURE(uc_kvm)
{
struct kvm_s390_sie_block *sie_block;
@ -67,6 +115,7 @@ FIXTURE(uc_kvm)
uintptr_t base_hva;
uintptr_t code_hva;
int kvm_run_size;
vm_paddr_t pgd;
void *vm_mem;
int vcpu_fd;
int kvm_fd;
@ -116,7 +165,7 @@ FIXTURE_SETUP(uc_kvm)
self->base_gpa = 0;
self->code_gpa = self->base_gpa + (3 * SZ_1M);
self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE);
self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M);
ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
self->base_hva = (uintptr_t)self->vm_mem;
self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
@ -222,16 +271,112 @@ TEST(uc_cap_hpage)
close(kvm_fd);
}
/* verify SIEIC exit
/* calculate host virtual addr from guest physical addr */
static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
{
return (void *)(self->base_hva - self->base_gpa + gpa);
}
/* map / make additional memory available */
static int uc_map_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
{
struct kvm_s390_ucas_mapping map = {
.user_addr = (u64)gpa2hva(self, vcpu_addr),
.vcpu_addr = vcpu_addr,
.length = length,
};
pr_info("ucas map %p %p 0x%llx",
(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
}
/* unmap previously mapped memory */
static int uc_unmap_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
{
struct kvm_s390_ucas_mapping map = {
.user_addr = (u64)gpa2hva(self, vcpu_addr),
.vcpu_addr = vcpu_addr,
.length = length,
};
pr_info("ucas unmap %p %p 0x%llx",
(void *)map.user_addr, (void *)map.vcpu_addr, map.length);
return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map);
}
/* handle ucontrol exit by mapping the accessed segment */
static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) *self)
{
struct kvm_run *run = self->run;
u64 seg_addr;
int rc;
TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
switch (run->s390_ucontrol.pgm_code) {
case PGM_SEGMENT_TRANSLATION:
seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1);
pr_info("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n",
run->s390_ucontrol.trans_exc_code, seg_addr);
/* map / make additional memory available */
rc = uc_map_ext(self, seg_addr, SZ_1M);
TEST_ASSERT_EQ(0, rc);
break;
default:
TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
}
}
/*
* Handle the SIEIC exit
* * fail on codes not expected in the test cases
* Returns if interception is handled / execution can be continued
*/
static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
static void uc_skey_enable(FIXTURE_DATA(uc_kvm) *self)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
/* disable KSS */
sie_block->cpuflags &= ~CPUSTAT_KSS;
/* disable skey inst interception */
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
}
/*
* Handle the instruction intercept
* Returns if interception is handled / execution can be continued
*/
static bool uc_handle_insn_ic(FIXTURE_DATA(uc_kvm) *self)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
int ilen = insn_length(sie_block->ipa >> 8);
struct kvm_run *run = self->run;
switch (run->s390_sieic.ipa) {
case 0xB229: /* ISKE */
case 0xB22b: /* SSKE */
case 0xB22a: /* RRBE */
uc_skey_enable(self);
/* rewind to reexecute intercepted instruction */
run->psw_addr = run->psw_addr - ilen;
pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr);
return true;
default:
return false;
}
}
/*
* Handle the SIEIC exit
* * fail on codes not expected in the test cases
* Returns if interception is handled / execution can be continued
*/
static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
struct kvm_run *run = self->run;
/* check SIE interception code */
pr_info("sieic: 0x%.2x 0x%.4x 0x%.4x\n",
pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n",
run->s390_sieic.icptcode,
run->s390_sieic.ipa,
run->s390_sieic.ipb);
@ -239,7 +384,10 @@ static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
case ICPT_INST:
/* end execution in caller on intercepted instruction */
pr_info("sie instruction interception\n");
return false;
return uc_handle_insn_ic(self);
case ICPT_KSS:
uc_skey_enable(self);
return true;
case ICPT_OPEREXC:
/* operation exception */
TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
@ -250,11 +398,17 @@ static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self)
}
/* verify VM state on exit */
static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self)
{
struct kvm_run *run = self->run;
switch (run->exit_reason) {
case KVM_EXIT_S390_UCONTROL:
/** check program interruption code
* handle page fault --> ucas map
*/
uc_handle_exit_ucontrol(self);
break;
case KVM_EXIT_S390_SIEIC:
return uc_handle_sieic(self);
default:
@ -264,7 +418,7 @@ static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self)
}
/* run the VM until interrupted */
static int uc_run_once(FIXTURE_DATA(uc_kvm) * self)
static int uc_run_once(FIXTURE_DATA(uc_kvm) *self)
{
int rc;
@ -275,7 +429,7 @@ static int uc_run_once(FIXTURE_DATA(uc_kvm) * self)
return rc;
}
static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) *self)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
@ -286,6 +440,89 @@ static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self)
TEST_ASSERT_EQ(0x440000, sie_block->ipb);
}
TEST_F(uc_kvm, uc_no_user_region)
{
struct kvm_userspace_memory_region region = {
.slot = 1,
.guest_phys_addr = self->code_gpa,
.memory_size = VM_MEM_EXT_SIZE,
.userspace_addr = (uintptr_t)self->code_hva,
};
struct kvm_userspace_memory_region2 region2 = {
.slot = 1,
.guest_phys_addr = self->code_gpa,
.memory_size = VM_MEM_EXT_SIZE,
.userspace_addr = (uintptr_t)self->code_hva,
};
ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region));
ASSERT_EQ(EINVAL, errno);
ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2));
ASSERT_EQ(EINVAL, errno);
}
TEST_F(uc_kvm, uc_map_unmap)
{
struct kvm_sync_regs *sync_regs = &self->run->s.regs;
struct kvm_run *run = self->run;
const u64 disp = 1;
int rc;
/* copy test_mem_asm to code_hva / code_gpa */
TH_LOG("copy code %p to vm mapped memory %p / %p",
&test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
/* DAT disabled + 64 bit mode */
run->psw_mask = 0x0000000180000000ULL;
run->psw_addr = self->code_gpa;
/* set register content for test_mem_asm to access not mapped memory*/
sync_regs->gprs[1] = 0x55;
sync_regs->gprs[5] = self->base_gpa;
sync_regs->gprs[6] = VM_MEM_SIZE + disp;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
/* run and expect to fail with ucontrol pic segment translation */
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(1, sync_regs->gprs[0]);
ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
/* fail to map memory with not segment aligned address */
rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE);
ASSERT_GT(0, rc)
TH_LOG("ucas map for non segment address should fail but didn't; "
"result %d not expected, %s", rc, strerror(errno));
/* map / make additional memory available */
rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
ASSERT_EQ(0, rc)
TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
uc_assert_diag44(self);
/* assert registers and memory are in expected state */
ASSERT_EQ(2, sync_regs->gprs[0]);
ASSERT_EQ(0x55, sync_regs->gprs[1]);
ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp));
/* unmap and run loop again */
rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
ASSERT_EQ(0, rc)
TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno));
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(3, sync_regs->gprs[0]);
ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
/* handle ucontrol exit and remap memory after previous map and unmap */
ASSERT_EQ(true, uc_handle_exit(self));
}
TEST_F(uc_kvm, uc_gprs)
{
struct kvm_sync_regs *sync_regs = &self->run->s.regs;
@ -329,4 +566,73 @@ TEST_F(uc_kvm, uc_gprs)
ASSERT_EQ(1, sync_regs->gprs[0]);
}
TEST_F(uc_kvm, uc_skey)
{
struct kvm_s390_sie_block *sie_block = self->sie_block;
struct kvm_sync_regs *sync_regs = &self->run->s.regs;
u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2);
struct kvm_run *run = self->run;
const u8 skeyvalue = 0x34;
/* copy test_skey_asm to code_hva / code_gpa */
TH_LOG("copy code %p to vm mapped memory %p / %p",
&test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa);
memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE);
/* set register content for test_skey_asm to access not mapped memory */
sync_regs->gprs[1] = skeyvalue;
sync_regs->gprs[5] = self->base_gpa;
sync_regs->gprs[6] = test_vaddr;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
/* DAT disabled + 64 bit mode */
run->psw_mask = 0x0000000180000000ULL;
run->psw_addr = self->code_gpa;
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(true, uc_handle_exit(self));
ASSERT_EQ(1, sync_regs->gprs[0]);
/* ISKE */
ASSERT_EQ(0, uc_run_once(self));
/*
* Bail out and skip the test after uc_skey_enable was executed but iske
* is still intercepted. Instructions are not handled by the kernel.
* Thus there is no need to test this here.
*/
TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS);
TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
TEST_REQUIRE(sie_block->ipa != 0xb229);
/* ISKE contd. */
ASSERT_EQ(false, uc_handle_exit(self));
ASSERT_EQ(2, sync_regs->gprs[0]);
/* assert initial skey (ACC = 0, R & C = 1) */
ASSERT_EQ(0x06, sync_regs->gprs[1]);
uc_assert_diag44(self);
/* SSKE + ISKE */
sync_regs->gprs[1] = skeyvalue;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
ASSERT_EQ(3, sync_regs->gprs[0]);
ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
uc_assert_diag44(self);
/* RRBE + ISKE */
sync_regs->gprs[1] = skeyvalue;
run->kvm_dirty_regs |= KVM_SYNC_GPRS;
ASSERT_EQ(0, uc_run_once(self));
ASSERT_EQ(false, uc_handle_exit(self));
ASSERT_EQ(4, sync_regs->gprs[0]);
/* assert R reset but rest of skey unchanged */
ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
uc_assert_diag44(self);
}
TEST_HARNESS_MAIN