2017-11-24 14:00:33 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2008-03-25 17:47:20 +00:00
|
|
|
/*
|
2012-07-20 09:15:04 +00:00
|
|
|
* definition for kvm on s390
|
2008-03-25 17:47:20 +00:00
|
|
|
*
|
2019-10-02 08:46:58 +00:00
|
|
|
* Copyright IBM Corp. 2008, 2020
|
2008-03-25 17:47:20 +00:00
|
|
|
*
|
|
|
|
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
|
|
|
* Christian Borntraeger <borntraeger@de.ibm.com>
|
2009-05-25 11:40:51 +00:00
|
|
|
* Christian Ehrhardt <ehrhardt@de.ibm.com>
|
2008-03-25 17:47:20 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ARCH_S390_KVM_S390_H
|
|
|
|
#define ARCH_S390_KVM_S390_H
|
2008-03-25 17:47:23 +00:00
|
|
|
|
2009-05-12 15:21:49 +00:00
|
|
|
#include <linux/hrtimer.h>
|
KVM: s390: interrupt subsystem, cpu timer, waitpsw
This patch contains the s390 interrupt subsystem (similar to in kernel apic)
including timer interrupts (similar to in-kernel-pit) and enabled wait
(similar to in kernel hlt).
In order to achieve that, this patch also introduces intercept handling
for instruction intercepts, and it implements load control instructions.
This patch introduces an ioctl KVM_S390_INTERRUPT which is valid for both
the vm file descriptors and the vcpu file descriptors. In case this ioctl is
issued against a vm file descriptor, the interrupt is considered floating.
Floating interrupts may be delivered to any virtual cpu in the configuration.
The following interrupts are supported:
SIGP STOP - interprocessor signal that stops a remote cpu
SIGP SET PREFIX - interprocessor signal that sets the prefix register of a
(stopped) remote cpu
INT EMERGENCY - interprocessor interrupt, usually used to signal need_reshed
and for smp_call_function() in the guest.
PROGRAM INT - exception during program execution such as page fault, illegal
instruction and friends
RESTART - interprocessor signal that starts a stopped cpu
INT VIRTIO - floating interrupt for virtio signalisation
INT SERVICE - floating interrupt for signalisations from the system
service processor
struct kvm_s390_interrupt, which is submitted as ioctl parameter when injecting
an interrupt, also carrys parameter data for interrupts along with the interrupt
type. Interrupts on s390 usually have a state that represents the current
operation, or identifies which device has caused the interruption on s390.
kvm_s390_handle_wait() does handle waitpsw in two flavors: in case of a
disabled wait (that is, disabled for interrupts), we exit to userspace. In case
of an enabled wait we set up a timer that equals the cpu clock comparator value
and sleep on a wait queue.
[christian: change virtio interrupt to 0x2603]
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-03-25 17:47:26 +00:00
|
|
|
#include <linux/kvm.h>
|
2008-03-25 17:47:23 +00:00
|
|
|
#include <linux/kvm_host.h>
|
2019-09-30 08:19:18 +00:00
|
|
|
#include <linux/lockdep.h>
|
2015-02-02 14:42:51 +00:00
|
|
|
#include <asm/facility.h>
|
2015-11-04 12:47:58 +00:00
|
|
|
#include <asm/processor.h>
|
2016-08-08 20:39:32 +00:00
|
|
|
#include <asm/sclp.h>
|
2008-03-25 17:47:23 +00:00
|
|
|
|
2024-02-20 08:56:34 +00:00
|
|
|
static inline void kvm_s390_fpu_store(struct kvm_run *run)
|
|
|
|
{
|
|
|
|
fpu_stfpc(&run->s.regs.fpc);
|
|
|
|
if (cpu_has_vx())
|
|
|
|
save_vx_regs((__vector128 *)&run->s.regs.vrs);
|
|
|
|
else
|
|
|
|
save_fp_regs((freg_t *)&run->s.regs.fprs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_s390_fpu_load(struct kvm_run *run)
|
|
|
|
{
|
|
|
|
fpu_lfpc_safe(&run->s.regs.fpc);
|
|
|
|
if (cpu_has_vx())
|
|
|
|
load_vx_regs((__vector128 *)&run->s.regs.vrs);
|
|
|
|
else
|
|
|
|
load_fp_regs((freg_t *)&run->s.regs.fprs);
|
|
|
|
}
|
|
|
|
|
2013-06-28 11:30:24 +00:00
|
|
|
/* Transactional Memory Execution related macros */
|
2017-03-13 10:48:28 +00:00
|
|
|
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
|
2013-06-28 11:30:24 +00:00
|
|
|
#define TDB_FORMAT1 1
|
2022-10-20 14:31:57 +00:00
|
|
|
#define IS_ITDB_VALID(vcpu) \
|
|
|
|
((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1))
|
2013-06-28 11:30:24 +00:00
|
|
|
|
2015-07-22 13:50:58 +00:00
|
|
|
extern debug_info_t *kvm_s390_dbf;
|
2019-10-02 08:46:58 +00:00
|
|
|
extern debug_info_t *kvm_s390_dbf_uv;
|
|
|
|
|
|
|
|
#define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
|
|
|
|
do { \
|
|
|
|
debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \
|
|
|
|
d_args); \
|
|
|
|
debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \
|
|
|
|
"%d: " d_string "\n", (d_kvm)->userspace_pid, \
|
|
|
|
d_args); \
|
|
|
|
} while (0)
|
|
|
|
|
2015-07-22 13:50:58 +00:00
|
|
|
#define KVM_EVENT(d_loglevel, d_string, d_args...)\
|
|
|
|
do { \
|
|
|
|
debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
|
|
|
|
d_args); \
|
|
|
|
} while (0)
|
|
|
|
|
2008-03-25 17:47:20 +00:00
|
|
|
#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
|
|
|
|
do { \
|
|
|
|
debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
|
|
|
|
d_args); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
|
|
|
|
do { \
|
|
|
|
debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
|
|
|
|
"%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
|
|
|
|
d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
|
|
|
|
d_args); \
|
|
|
|
} while (0)
|
KVM: s390: interrupt subsystem, cpu timer, waitpsw
This patch contains the s390 interrupt subsystem (similar to in kernel apic)
including timer interrupts (similar to in-kernel-pit) and enabled wait
(similar to in kernel hlt).
In order to achieve that, this patch also introduces intercept handling
for instruction intercepts, and it implements load control instructions.
This patch introduces an ioctl KVM_S390_INTERRUPT which is valid for both
the vm file descriptors and the vcpu file descriptors. In case this ioctl is
issued against a vm file descriptor, the interrupt is considered floating.
Floating interrupts may be delivered to any virtual cpu in the configuration.
The following interrupts are supported:
SIGP STOP - interprocessor signal that stops a remote cpu
SIGP SET PREFIX - interprocessor signal that sets the prefix register of a
(stopped) remote cpu
INT EMERGENCY - interprocessor interrupt, usually used to signal need_reshed
and for smp_call_function() in the guest.
PROGRAM INT - exception during program execution such as page fault, illegal
instruction and friends
RESTART - interprocessor signal that starts a stopped cpu
INT VIRTIO - floating interrupt for virtio signalisation
INT SERVICE - floating interrupt for signalisations from the system
service processor
struct kvm_s390_interrupt, which is submitted as ioctl parameter when injecting
an interrupt, also carrys parameter data for interrupts along with the interrupt
type. Interrupts on s390 usually have a state that represents the current
operation, or identifies which device has caused the interruption on s390.
kvm_s390_handle_wait() does handle waitpsw in two flavors: in case of a
disabled wait (that is, disabled for interrupts), we exit to userspace. In case
of an enabled wait we set up a timer that equals the cpu clock comparator value
and sleep on a wait queue.
[christian: change virtio interrupt to 0x2603]
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-03-25 17:47:26 +00:00
|
|
|
|
2018-01-23 17:05:28 +00:00
|
|
|
static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
|
|
|
|
{
|
|
|
|
atomic_or(flags, &vcpu->arch.sie_block->cpuflags);
|
|
|
|
}
|
|
|
|
|
2018-01-23 17:05:30 +00:00
|
|
|
static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
|
|
|
|
{
|
|
|
|
atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags);
|
|
|
|
}
|
|
|
|
|
2018-01-23 17:05:31 +00:00
|
|
|
static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags)
|
|
|
|
{
|
|
|
|
return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags;
|
|
|
|
}
|
|
|
|
|
2014-05-05 14:26:19 +00:00
|
|
|
static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
|
KVM: s390: interrupt subsystem, cpu timer, waitpsw
This patch contains the s390 interrupt subsystem (similar to in kernel apic)
including timer interrupts (similar to in-kernel-pit) and enabled wait
(similar to in kernel hlt).
In order to achieve that, this patch also introduces intercept handling
for instruction intercepts, and it implements load control instructions.
This patch introduces an ioctl KVM_S390_INTERRUPT which is valid for both
the vm file descriptors and the vcpu file descriptors. In case this ioctl is
issued against a vm file descriptor, the interrupt is considered floating.
Floating interrupts may be delivered to any virtual cpu in the configuration.
The following interrupts are supported:
SIGP STOP - interprocessor signal that stops a remote cpu
SIGP SET PREFIX - interprocessor signal that sets the prefix register of a
(stopped) remote cpu
INT EMERGENCY - interprocessor interrupt, usually used to signal need_reshed
and for smp_call_function() in the guest.
PROGRAM INT - exception during program execution such as page fault, illegal
instruction and friends
RESTART - interprocessor signal that starts a stopped cpu
INT VIRTIO - floating interrupt for virtio signalisation
INT SERVICE - floating interrupt for signalisations from the system
service processor
struct kvm_s390_interrupt, which is submitted as ioctl parameter when injecting
an interrupt, also carrys parameter data for interrupts along with the interrupt
type. Interrupts on s390 usually have a state that represents the current
operation, or identifies which device has caused the interruption on s390.
kvm_s390_handle_wait() does handle waitpsw in two flavors: in case of a
disabled wait (that is, disabled for interrupts), we exit to userspace. In case
of an enabled wait we set up a timer that equals the cpu clock comparator value
and sleep on a wait queue.
[christian: change virtio interrupt to 0x2603]
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-03-25 17:47:26 +00:00
|
|
|
{
|
2018-01-23 17:05:31 +00:00
|
|
|
return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED);
|
KVM: s390: interrupt subsystem, cpu timer, waitpsw
This patch contains the s390 interrupt subsystem (similar to in kernel apic)
including timer interrupts (similar to in-kernel-pit) and enabled wait
(similar to in kernel hlt).
In order to achieve that, this patch also introduces intercept handling
for instruction intercepts, and it implements load control instructions.
This patch introduces an ioctl KVM_S390_INTERRUPT which is valid for both
the vm file descriptors and the vcpu file descriptors. In case this ioctl is
issued against a vm file descriptor, the interrupt is considered floating.
Floating interrupts may be delivered to any virtual cpu in the configuration.
The following interrupts are supported:
SIGP STOP - interprocessor signal that stops a remote cpu
SIGP SET PREFIX - interprocessor signal that sets the prefix register of a
(stopped) remote cpu
INT EMERGENCY - interprocessor interrupt, usually used to signal need_reshed
and for smp_call_function() in the guest.
PROGRAM INT - exception during program execution such as page fault, illegal
instruction and friends
RESTART - interprocessor signal that starts a stopped cpu
INT VIRTIO - floating interrupt for virtio signalisation
INT SERVICE - floating interrupt for signalisations from the system
service processor
struct kvm_s390_interrupt, which is submitted as ioctl parameter when injecting
an interrupt, also carrys parameter data for interrupts along with the interrupt
type. Interrupts on s390 usually have a state that represents the current
operation, or identifies which device has caused the interruption on s390.
kvm_s390_handle_wait() does handle waitpsw in two flavors: in case of a
disabled wait (that is, disabled for interrupts), we exit to userspace. In case
of an enabled wait we set up a timer that equals the cpu clock comparator value
and sleep on a wait queue.
[christian: change virtio interrupt to 0x2603]
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-03-25 17:47:26 +00:00
|
|
|
}
|
|
|
|
|
2016-02-22 12:52:27 +00:00
|
|
|
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2021-09-10 18:32:19 +00:00
|
|
|
return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask);
|
2016-02-22 12:52:27 +00:00
|
|
|
}
|
|
|
|
|
2012-01-04 09:25:20 +00:00
|
|
|
static inline int kvm_is_ucontrol(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_KVM_S390_UCONTROL
|
|
|
|
if (kvm->arch.gmap)
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
2012-01-11 10:19:32 +00:00
|
|
|
|
2014-05-13 14:58:30 +00:00
|
|
|
#define GUEST_PREFIX_SHIFT 13
|
|
|
|
static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
|
|
|
|
}
|
|
|
|
|
2012-01-11 10:19:32 +00:00
|
|
|
static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
|
|
|
|
{
|
2015-07-10 13:27:20 +00:00
|
|
|
VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
|
|
|
|
prefix);
|
2014-05-13 14:58:30 +00:00
|
|
|
vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
|
2014-07-29 06:53:36 +00:00
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
2022-02-25 18:22:46 +00:00
|
|
|
kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu);
|
2012-01-11 10:19:32 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 11:44:40 +00:00
|
|
|
static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar)
|
2012-12-20 14:32:07 +00:00
|
|
|
{
|
2013-01-25 14:34:17 +00:00
|
|
|
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
|
|
|
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
2012-12-20 14:32:07 +00:00
|
|
|
|
2015-01-19 10:24:51 +00:00
|
|
|
if (ar)
|
|
|
|
*ar = base2;
|
|
|
|
|
2012-12-20 14:32:07 +00:00
|
|
|
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
|
|
|
}
|
|
|
|
|
2024-06-28 16:35:47 +00:00
|
|
|
static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar)
|
|
|
|
{
|
|
|
|
u32 base1 = vcpu->arch.sie_block->ipb >> 28;
|
|
|
|
s64 disp1;
|
|
|
|
|
|
|
|
/* The displacement is a 20bit _SIGNED_ value */
|
|
|
|
disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
|
|
|
|
((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19);
|
|
|
|
|
|
|
|
if (ar)
|
|
|
|
*ar = base1;
|
|
|
|
|
|
|
|
return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
|
|
|
|
}
|
|
|
|
|
2012-12-20 14:32:07 +00:00
|
|
|
static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
|
2015-01-19 10:24:51 +00:00
|
|
|
u64 *address1, u64 *address2,
|
2016-12-09 11:44:40 +00:00
|
|
|
u8 *ar_b1, u8 *ar_b2)
|
2012-12-20 14:32:07 +00:00
|
|
|
{
|
2013-01-25 14:34:17 +00:00
|
|
|
u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
|
|
|
|
u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
|
|
|
|
u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
|
|
|
|
u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
|
2012-12-20 14:32:07 +00:00
|
|
|
|
|
|
|
*address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
|
|
|
|
*address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
2015-01-19 10:24:51 +00:00
|
|
|
|
|
|
|
if (ar_b1)
|
|
|
|
*ar_b1 = base1;
|
|
|
|
if (ar_b2)
|
|
|
|
*ar_b2 = base2;
|
2012-12-20 14:32:07 +00:00
|
|
|
}
|
|
|
|
|
2013-06-12 11:54:53 +00:00
|
|
|
static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
|
|
|
|
{
|
2013-09-12 08:33:46 +00:00
|
|
|
if (r1)
|
|
|
|
*r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
|
|
|
|
if (r2)
|
|
|
|
*r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
|
2013-06-12 11:54:53 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 11:44:40 +00:00
|
|
|
static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar)
|
2012-12-20 14:32:07 +00:00
|
|
|
{
|
2013-01-25 14:34:17 +00:00
|
|
|
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
|
|
|
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
|
2012-12-20 14:32:07 +00:00
|
|
|
((vcpu->arch.sie_block->ipb & 0xff00) << 4);
|
2013-01-25 14:34:17 +00:00
|
|
|
/* The displacement is a 20bit _SIGNED_ value */
|
|
|
|
if (disp2 & 0x80000)
|
|
|
|
disp2+=0xfff00000;
|
2012-12-20 14:32:07 +00:00
|
|
|
|
2015-01-19 10:24:51 +00:00
|
|
|
if (ar)
|
|
|
|
*ar = base2;
|
|
|
|
|
2013-01-25 14:34:17 +00:00
|
|
|
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
|
2012-12-20 14:32:07 +00:00
|
|
|
}
|
|
|
|
|
2016-12-09 11:44:40 +00:00
|
|
|
static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar)
|
2012-12-20 14:32:07 +00:00
|
|
|
{
|
2013-01-25 14:34:17 +00:00
|
|
|
u32 base2 = vcpu->arch.sie_block->ipb >> 28;
|
|
|
|
u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
|
2012-12-20 14:32:07 +00:00
|
|
|
|
2015-01-19 10:24:51 +00:00
|
|
|
if (ar)
|
|
|
|
*ar = base2;
|
|
|
|
|
2012-12-20 14:32:07 +00:00
|
|
|
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
|
|
|
|
}
|
|
|
|
|
2013-07-26 13:04:06 +00:00
|
|
|
/* Set the condition code in the guest program status word */
|
|
|
|
static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
|
|
|
|
{
|
|
|
|
vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
|
|
|
|
vcpu->arch.sie_block->gpsw.mask |= cc << 44;
|
|
|
|
}
|
|
|
|
|
2015-03-09 20:27:12 +00:00
|
|
|
/* test availability of facility in a kvm instance */
|
2015-02-02 14:42:51 +00:00
|
|
|
static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
|
|
|
|
{
|
2015-12-02 07:53:52 +00:00
|
|
|
return __test_facility(nr, kvm->arch.model.fac_mask) &&
|
|
|
|
__test_facility(nr, kvm->arch.model.fac_list);
|
2015-02-02 14:42:51 +00:00
|
|
|
}
|
|
|
|
|
2015-03-16 15:05:41 +00:00
|
|
|
static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
|
|
|
|
{
|
|
|
|
unsigned char *ptr;
|
|
|
|
|
|
|
|
if (nr >= MAX_FACILITY_BIT)
|
|
|
|
return -EINVAL;
|
|
|
|
ptr = (unsigned char *) fac_list + (nr >> 3);
|
|
|
|
*ptr |= (0x80UL >> (nr & 7));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-19 16:36:43 +00:00
|
|
|
static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
|
|
|
|
return test_bit_inv(nr, kvm->arch.cpu_feat);
|
|
|
|
}
|
|
|
|
|
2014-04-10 15:35:00 +00:00
|
|
|
/* are cpu states controlled by user space */
|
|
|
|
static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
return kvm->arch.user_cpu_state_ctrl != 0;
|
|
|
|
}
|
|
|
|
|
2021-10-08 20:31:12 +00:00
|
|
|
static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
if (kvm->arch.user_cpu_state_ctrl)
|
|
|
|
return;
|
|
|
|
|
|
|
|
VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control");
|
|
|
|
kvm->arch.user_cpu_state_ctrl = 1;
|
|
|
|
}
|
|
|
|
|
2021-12-06 19:54:29 +00:00
|
|
|
/* get the end gfn of the last (highest gfn) memslot */
|
|
|
|
static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
|
|
|
|
{
|
KVM: Keep memslots in tree-based structures instead of array-based ones
The current memslot code uses a (reverse gfn-ordered) memslot array for
keeping track of them.
Because the memslot array that is currently in use cannot be modified
every memslot management operation (create, delete, move, change flags)
has to make a copy of the whole array so it has a scratch copy to work on.
Strictly speaking, however, it is only necessary to make copy of the
memslot that is being modified, copying all the memslots currently present
is just a limitation of the array-based memslot implementation.
Two memslot sets, however, are still needed so the VM continues to run
on the currently active set while the requested operation is being
performed on the second, currently inactive one.
In order to have two memslot sets, but only one copy of actual memslots
it is necessary to split out the memslot data from the memslot sets.
The memslots themselves should be also kept independent of each other
so they can be individually added or deleted.
These two memslot sets should normally point to the same set of
memslots. They can, however, be desynchronized when performing a
memslot management operation by replacing the memslot to be modified
by its copy. After the operation is complete, both memslot sets once
again point to the same, common set of memslot data.
This commit implements the aforementioned idea.
For tracking of gfns an ordinary rbtree is used since memslots cannot
overlap in the guest address space and so this data structure is
sufficient for ensuring that lookups are done quickly.
The "last used slot" mini-caches (both per-slot set one and per-vCPU one),
that keep track of the last found-by-gfn memslot, are still present in the
new code.
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <17c0cf3663b760a0d3753d4ac08c0753e941b811.1638817641.git.maciej.szmigiero@oracle.com>
2021-12-06 19:54:30 +00:00
|
|
|
struct rb_node *node;
|
2021-12-06 19:54:29 +00:00
|
|
|
struct kvm_memory_slot *ms;
|
|
|
|
|
KVM: Keep memslots in tree-based structures instead of array-based ones
The current memslot code uses a (reverse gfn-ordered) memslot array for
keeping track of them.
Because the memslot array that is currently in use cannot be modified
every memslot management operation (create, delete, move, change flags)
has to make a copy of the whole array so it has a scratch copy to work on.
Strictly speaking, however, it is only necessary to make copy of the
memslot that is being modified, copying all the memslots currently present
is just a limitation of the array-based memslot implementation.
Two memslot sets, however, are still needed so the VM continues to run
on the currently active set while the requested operation is being
performed on the second, currently inactive one.
In order to have two memslot sets, but only one copy of actual memslots
it is necessary to split out the memslot data from the memslot sets.
The memslots themselves should be also kept independent of each other
so they can be individually added or deleted.
These two memslot sets should normally point to the same set of
memslots. They can, however, be desynchronized when performing a
memslot management operation by replacing the memslot to be modified
by its copy. After the operation is complete, both memslot sets once
again point to the same, common set of memslot data.
This commit implements the aforementioned idea.
For tracking of gfns an ordinary rbtree is used since memslots cannot
overlap in the guest address space and so this data structure is
sufficient for ensuring that lookups are done quickly.
The "last used slot" mini-caches (both per-slot set one and per-vCPU one),
that keep track of the last found-by-gfn memslot, are still present in the
new code.
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <17c0cf3663b760a0d3753d4ac08c0753e941b811.1638817641.git.maciej.szmigiero@oracle.com>
2021-12-06 19:54:30 +00:00
|
|
|
if (WARN_ON(kvm_memslots_empty(slots)))
|
2021-12-06 19:54:29 +00:00
|
|
|
return 0;
|
|
|
|
|
KVM: Keep memslots in tree-based structures instead of array-based ones
The current memslot code uses a (reverse gfn-ordered) memslot array for
keeping track of them.
Because the memslot array that is currently in use cannot be modified
every memslot management operation (create, delete, move, change flags)
has to make a copy of the whole array so it has a scratch copy to work on.
Strictly speaking, however, it is only necessary to make copy of the
memslot that is being modified, copying all the memslots currently present
is just a limitation of the array-based memslot implementation.
Two memslot sets, however, are still needed so the VM continues to run
on the currently active set while the requested operation is being
performed on the second, currently inactive one.
In order to have two memslot sets, but only one copy of actual memslots
it is necessary to split out the memslot data from the memslot sets.
The memslots themselves should be also kept independent of each other
so they can be individually added or deleted.
These two memslot sets should normally point to the same set of
memslots. They can, however, be desynchronized when performing a
memslot management operation by replacing the memslot to be modified
by its copy. After the operation is complete, both memslot sets once
again point to the same, common set of memslot data.
This commit implements the aforementioned idea.
For tracking of gfns an ordinary rbtree is used since memslots cannot
overlap in the guest address space and so this data structure is
sufficient for ensuring that lookups are done quickly.
The "last used slot" mini-caches (both per-slot set one and per-vCPU one),
that keep track of the last found-by-gfn memslot, are still present in the
new code.
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maciej S. Szmigiero <maciej.szmigiero@oracle.com>
Message-Id: <17c0cf3663b760a0d3753d4ac08c0753e941b811.1638817641.git.maciej.szmigiero@oracle.com>
2021-12-06 19:54:30 +00:00
|
|
|
node = rb_last(&slots->gfn_tree);
|
|
|
|
ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
|
2021-12-06 19:54:29 +00:00
|
|
|
return ms->base_gfn + ms->npages;
|
|
|
|
}
|
|
|
|
|
2022-02-09 15:22:17 +00:00
|
|
|
static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm)
|
|
|
|
{
|
2024-08-01 12:31:09 +00:00
|
|
|
u32 gd;
|
|
|
|
|
|
|
|
if (!kvm->arch.gisa_int.origin)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
gd = virt_to_phys(kvm->arch.gisa_int.origin);
|
2022-02-09 15:22:17 +00:00
|
|
|
|
|
|
|
if (gd && sclp.has_gisaf)
|
|
|
|
gd |= GISA_FORMAT1;
|
|
|
|
return gd;
|
|
|
|
}
|
|
|
|
|
2019-09-30 08:19:18 +00:00
|
|
|
/* implemented in pv.c */
|
|
|
|
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
|
|
|
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
|
KVM: s390: pv: asynchronous destroy for reboot
Until now, destroying a protected guest was an entirely synchronous
operation that could potentially take a very long time, depending on
the size of the guest, due to the time needed to clean up the address
space from protected pages.
This patch implements an asynchronous destroy mechanism, that allows a
protected guest to reboot significantly faster than previously.
This is achieved by clearing the pages of the old guest in background.
In case of reboot, the new guest will be able to run in the same
address space almost immediately.
The old protected guest is then only destroyed when all of its memory
has been destroyed or otherwise made non protected.
Two new PV commands are added for the KVM_S390_PV_COMMAND ioctl:
KVM_PV_ASYNC_CLEANUP_PREPARE: set aside the current protected VM for
later asynchronous teardown. The current KVM VM will then continue
immediately as non-protected. If a protected VM had already been
set aside for asynchronous teardown, but without starting the teardown
process, this call will fail. There can be at most one VM set aside at
any time. Once it is set aside, the protected VM only exists in the
context of the Ultravisor, it is not associated with the KVM VM
anymore. Its protected CPUs have already been destroyed, but not its
memory. This command can be issued again immediately after starting
KVM_PV_ASYNC_CLEANUP_PERFORM, without having to wait for completion.
KVM_PV_ASYNC_CLEANUP_PERFORM: tears down the protected VM previously
set aside using KVM_PV_ASYNC_CLEANUP_PREPARE. Ideally the
KVM_PV_ASYNC_CLEANUP_PERFORM PV command should be issued by userspace
from a separate thread. If a fatal signal is received (or if the
process terminates naturally), the command will terminate immediately
without completing. All protected VMs whose teardown was interrupted
will be put in the need_cleanup list. The rest of the normal KVM
teardown process will take care of properly cleaning up all remaining
protected VMs, including the ones on the need_cleanup list.
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Nico Boehr <nrb@linux.ibm.com>
Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Link: https://lore.kernel.org/r/20221111170632.77622-2-imbrenda@linux.ibm.com
Message-Id: <20221111170632.77622-2-imbrenda@linux.ibm.com>
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
2022-11-11 17:06:27 +00:00
|
|
|
int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc);
|
|
|
|
int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
|
|
|
|
int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc);
|
2019-09-30 08:19:18 +00:00
|
|
|
int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
|
|
|
|
int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc);
|
|
|
|
int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
|
|
|
|
u16 *rrc);
|
|
|
|
int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
|
|
|
|
unsigned long tweak, u16 *rc, u16 *rrc);
|
2019-05-15 11:24:30 +00:00
|
|
|
int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state);
|
2022-05-17 16:36:25 +00:00
|
|
|
int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc);
|
2022-05-17 16:36:24 +00:00
|
|
|
int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
|
|
|
|
u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc);
|
|
|
|
int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
|
|
|
|
u16 *rc, u16 *rrc);
|
2019-09-30 08:19:18 +00:00
|
|
|
|
|
|
|
static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
return kvm->arch.pv.handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return vcpu->arch.pv.handle;
|
|
|
|
}
|
|
|
|
|
2014-11-20 12:49:32 +00:00
|
|
|
/* implemented in interrupt.c */
|
KVM: s390: interrupt subsystem, cpu timer, waitpsw
This patch contains the s390 interrupt subsystem (similar to in kernel apic)
including timer interrupts (similar to in-kernel-pit) and enabled wait
(similar to in kernel hlt).
In order to achieve that, this patch also introduces intercept handling
for instruction intercepts, and it implements load control instructions.
This patch introduces an ioctl KVM_S390_INTERRUPT which is valid for both
the vm file descriptors and the vcpu file descriptors. In case this ioctl is
issued against a vm file descriptor, the interrupt is considered floating.
Floating interrupts may be delivered to any virtual cpu in the configuration.
The following interrupts are supported:
SIGP STOP - interprocessor signal that stops a remote cpu
SIGP SET PREFIX - interprocessor signal that sets the prefix register of a
(stopped) remote cpu
INT EMERGENCY - interprocessor interrupt, usually used to signal need_reshed
and for smp_call_function() in the guest.
PROGRAM INT - exception during program execution such as page fault, illegal
instruction and friends
RESTART - interprocessor signal that starts a stopped cpu
INT VIRTIO - floating interrupt for virtio signalisation
INT SERVICE - floating interrupt for signalisations from the system
service processor
struct kvm_s390_interrupt, which is submitted as ioctl parameter when injecting
an interrupt, also carrys parameter data for interrupts along with the interrupt
type. Interrupts on s390 usually have a state that represents the current
operation, or identifies which device has caused the interruption on s390.
kvm_s390_handle_wait() does handle waitpsw in two flavors: in case of a
disabled wait (that is, disabled for interrupts), we exit to userspace. In case
of an enabled wait we set up a timer that equals the cpu clock comparator value
and sleep on a wait queue.
[christian: change virtio interrupt to 0x2603]
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
2008-03-25 17:47:26 +00:00
|
|
|
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
|
2014-05-16 09:59:46 +00:00
|
|
|
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
|
2009-05-12 15:21:49 +00:00
|
|
|
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
|
2014-08-25 10:27:29 +00:00
|
|
|
int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
|
2014-02-11 12:48:07 +00:00
|
|
|
void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
|
2014-03-25 16:09:08 +00:00
|
|
|
void kvm_s390_clear_float_irqs(struct kvm *kvm);
|
2013-03-25 16:22:53 +00:00
|
|
|
int __must_check kvm_s390_inject_vm(struct kvm *kvm,
|
|
|
|
struct kvm_s390_interrupt *s390int);
|
|
|
|
int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
|
2014-07-29 13:11:49 +00:00
|
|
|
struct kvm_s390_irq *irq);
|
2014-11-20 12:49:32 +00:00
|
|
|
static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_s390_pgm_info *pgm_info)
|
|
|
|
{
|
|
|
|
struct kvm_s390_irq irq = {
|
|
|
|
.type = KVM_S390_PROGRAM_INT,
|
|
|
|
.u.pgm = *pgm_info,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_s390_inject_vcpu(vcpu, &irq);
|
|
|
|
}
|
|
|
|
static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
|
|
|
|
{
|
|
|
|
struct kvm_s390_irq irq = {
|
|
|
|
.type = KVM_S390_PROGRAM_INT,
|
|
|
|
.u.pgm.code = code,
|
|
|
|
};
|
|
|
|
|
|
|
|
return kvm_s390_inject_vcpu(vcpu, &irq);
|
|
|
|
}
|
2012-12-20 14:32:12 +00:00
|
|
|
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
|
2013-07-03 13:18:35 +00:00
|
|
|
u64 isc_mask, u32 schid);
|
2015-02-04 14:59:11 +00:00
|
|
|
int kvm_s390_reinject_io_int(struct kvm *kvm,
|
|
|
|
struct kvm_s390_interrupt_info *inti);
|
2013-07-15 11:36:01 +00:00
|
|
|
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
|
2008-03-25 17:47:29 +00:00
|
|
|
|
2014-11-12 16:13:29 +00:00
|
|
|
/* implemented in intercept.c */
|
2015-11-04 12:47:58 +00:00
|
|
|
u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
|
2014-11-12 16:13:29 +00:00
|
|
|
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
|
2015-11-04 12:47:58 +00:00
|
|
|
static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
|
|
|
|
{
|
|
|
|
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
|
|
|
|
|
|
|
|
sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
|
|
|
|
}
|
|
|
|
static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
|
|
|
|
{
|
|
|
|
kvm_s390_rewind_psw(vcpu, -ilen);
|
|
|
|
}
|
|
|
|
static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-05-24 10:10:27 +00:00
|
|
|
/* don't inject PER events if we re-execute the instruction */
|
|
|
|
vcpu->arch.sie_block->icptstatus &= ~0x02;
|
2015-11-04 12:47:58 +00:00
|
|
|
kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
|
|
|
|
}
|
2014-11-12 16:13:29 +00:00
|
|
|
|
2017-09-29 10:41:50 +00:00
|
|
|
int handle_sthyi(struct kvm_vcpu *vcpu);
|
|
|
|
|
2008-03-25 17:47:29 +00:00
|
|
|
/* implemented in priv.c */
|
2014-04-17 07:10:40 +00:00
|
|
|
int is_valid_psw(psw_t *psw);
|
2016-08-15 02:53:22 +00:00
|
|
|
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
|
2009-01-22 09:28:29 +00:00
|
|
|
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
|
2016-11-29 06:17:55 +00:00
|
|
|
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
|
2011-07-24 08:48:17 +00:00
|
|
|
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
|
2012-04-24 07:24:44 +00:00
|
|
|
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
|
2012-12-20 14:32:09 +00:00
|
|
|
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
|
2014-01-23 09:47:13 +00:00
|
|
|
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
|
2013-06-20 15:22:04 +00:00
|
|
|
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
|
2017-02-24 21:12:56 +00:00
|
|
|
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
|
2008-03-25 17:47:31 +00:00
|
|
|
|
2015-07-08 11:19:48 +00:00
|
|
|
/* implemented in vsie.c */
|
|
|
|
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
|
2016-05-27 20:03:52 +00:00
|
|
|
void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
|
2015-07-08 11:19:48 +00:00
|
|
|
void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
|
|
|
|
unsigned long end);
|
|
|
|
void kvm_s390_vsie_init(struct kvm *kvm);
|
|
|
|
void kvm_s390_vsie_destroy(struct kvm *kvm);
|
|
|
|
|
2008-03-25 17:47:31 +00:00
|
|
|
/* implemented in sigp.c */
|
|
|
|
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
|
2014-02-21 07:59:59 +00:00
|
|
|
int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
2008-03-25 17:47:31 +00:00
|
|
|
|
|
|
|
/* implemented in kvm-s390.c */
|
KVM: s390x: fix SCK locking
When handling the SCK instruction, the kvm lock is taken, even though
the vcpu lock is already being held. The normal locking order is kvm
lock first and then vcpu lock. This is can (and in some circumstances
does) lead to deadlocks.
The function kvm_s390_set_tod_clock is called both by the SCK handler
and by some IOCTLs to set the clock. The IOCTLs will not hold the vcpu
lock, so they can safely take the kvm lock. The SCK handler holds the
vcpu lock, but will also somehow need to acquire the kvm lock without
relinquishing the vcpu lock.
The solution is to factor out the code to set the clock, and provide
two wrappers. One is called like the original function and does the
locking, the other is called kvm_s390_try_set_tod_clock and uses
trylock to try to acquire the kvm lock. This new wrapper is then used
in the SCK handler. If locking fails, -EAGAIN is returned, which is
eventually propagated to userspace, thus also freeing the vcpu lock and
allowing for forward progress.
This is not the most efficient or elegant way to solve this issue, but
the SCK instruction is deprecated and its performance is not critical.
The goal of this patch is just to provide a simple but correct way to
fix the bug.
Fixes: 6a3f95a6b04c ("KVM: s390: Intercept SCK instruction")
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Reviewed-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
Link: https://lore.kernel.org/r/20220301143340.111129-1-imbrenda@linux.ibm.com
Cc: stable@vger.kernel.org
Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
2022-03-01 14:33:40 +00:00
|
|
|
int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod);
|
2013-11-06 14:46:33 +00:00
|
|
|
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
|
|
|
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
2019-05-15 11:24:30 +00:00
|
|
|
int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
|
2015-04-14 10:17:34 +00:00
|
|
|
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
|
2018-09-25 23:16:16 +00:00
|
|
|
bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
|
2013-05-17 12:41:35 +00:00
|
|
|
void exit_sie(struct kvm_vcpu *vcpu);
|
2015-04-09 11:49:04 +00:00
|
|
|
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
|
2014-03-25 12:47:11 +00:00
|
|
|
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
|
2016-02-15 08:40:12 +00:00
|
|
|
void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
|
|
|
|
__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
|
2022-06-28 13:56:09 +00:00
|
|
|
int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc);
|
2014-03-25 12:47:11 +00:00
|
|
|
|
2008-03-25 17:47:34 +00:00
|
|
|
/* implemented in diag.c */
|
|
|
|
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
|
|
|
|
|
2015-04-14 10:17:34 +00:00
|
|
|
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
|
|
|
|
{
|
2021-11-16 16:04:02 +00:00
|
|
|
unsigned long i;
|
2015-04-14 10:17:34 +00:00
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
|
|
|
WARN_ON(!mutex_is_locked(&kvm->lock));
|
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
|
kvm_s390_vcpu_block(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
|
|
|
|
{
|
2021-11-16 16:04:02 +00:00
|
|
|
unsigned long i;
|
2015-04-14 10:17:34 +00:00
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
|
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
|
kvm_s390_vcpu_unblock(vcpu);
|
|
|
|
}
|
|
|
|
|
2015-09-29 14:20:36 +00:00
|
|
|
static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
|
|
|
|
{
|
|
|
|
u64 rc;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
rc = get_tod_clock_fast() + kvm->arch.epoch;
|
|
|
|
preempt_enable();
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2014-01-01 15:31:49 +00:00
|
|
|
/**
|
|
|
|
* kvm_s390_inject_prog_cond - conditionally inject a program check
|
|
|
|
* @vcpu: virtual cpu
|
|
|
|
* @rc: original return/error code
|
|
|
|
*
|
|
|
|
* This function is supposed to be used after regular guest access functions
|
|
|
|
* failed, to conditionally inject a program check to a vcpu. The typical
|
|
|
|
* pattern would look like
|
|
|
|
*
|
|
|
|
* rc = write_guest(vcpu, addr, data, len);
|
|
|
|
* if (rc)
|
|
|
|
* return kvm_s390_inject_prog_cond(vcpu, rc);
|
|
|
|
*
|
|
|
|
* A negative return code from guest access functions implies an internal error
|
|
|
|
* like e.g. out of memory. In these cases no program check should be injected
|
|
|
|
* to the guest.
|
|
|
|
* A positive value implies that an exception happened while accessing a guest's
|
|
|
|
* memory. In this case all data belonging to the corresponding program check
|
|
|
|
* has been stored in vcpu->arch.pgm and can be injected with
|
|
|
|
* kvm_s390_inject_prog_irq().
|
|
|
|
*
|
|
|
|
* Returns: - the original @rc value if @rc was negative (internal error)
|
|
|
|
* - zero if @rc was already zero
|
|
|
|
* - zero or error code from injecting if @rc was positive
|
|
|
|
* (program check injected to @vcpu)
|
|
|
|
*/
|
|
|
|
static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
|
|
|
|
{
|
|
|
|
if (rc <= 0)
|
|
|
|
return rc;
|
|
|
|
return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
|
|
|
|
}
|
|
|
|
|
2014-07-29 13:11:49 +00:00
|
|
|
int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
|
|
|
|
struct kvm_s390_irq *s390irq);
|
|
|
|
|
2013-10-07 15:11:48 +00:00
|
|
|
/* implemented in interrupt.c */
|
2014-08-05 15:40:47 +00:00
|
|
|
int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
|
2013-10-07 15:11:48 +00:00
|
|
|
int psw_extint_disabled(struct kvm_vcpu *vcpu);
|
2013-07-15 11:36:01 +00:00
|
|
|
void kvm_s390_destroy_adapters(struct kvm *kvm);
|
2014-10-14 13:29:30 +00:00
|
|
|
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
|
2014-09-02 09:27:35 +00:00
|
|
|
extern struct kvm_device_ops kvm_flic_ops;
|
2014-10-15 14:48:53 +00:00
|
|
|
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
|
KVM: s390: Clarify SIGP orders versus STOP/RESTART
With KVM_CAP_S390_USER_SIGP, there are only five Signal Processor
orders (CONDITIONAL EMERGENCY SIGNAL, EMERGENCY SIGNAL, EXTERNAL CALL,
SENSE, and SENSE RUNNING STATUS) which are intended for frequent use
and thus are processed in-kernel. The remainder are sent to userspace
with the KVM_CAP_S390_USER_SIGP capability. Of those, three orders
(RESTART, STOP, and STOP AND STORE STATUS) have the potential to
inject work back into the kernel, and thus are asynchronous.
Let's look for those pending IRQs when processing one of the in-kernel
SIGP orders, and return BUSY (CC2) if one is in process. This is in
agreement with the Principles of Operation, which states that only one
order can be "active" on a CPU at a time.
Cc: stable@vger.kernel.org
Suggested-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Christian Borntraeger <borntraeger@linux.ibm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Link: https://lore.kernel.org/r/20211213210550.856213-2-farman@linux.ibm.com
[borntraeger@linux.ibm.com: add stable tag]
Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
2021-12-13 21:05:50 +00:00
|
|
|
int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
|
2014-10-15 14:48:53 +00:00
|
|
|
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
|
2014-11-24 16:13:46 +00:00
|
|
|
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
|
|
|
|
void __user *buf, int len);
|
|
|
|
int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
|
|
|
|
__u8 __user *buf, int len);
|
2017-06-12 12:15:19 +00:00
|
|
|
void kvm_s390_gisa_init(struct kvm *kvm);
|
|
|
|
void kvm_s390_gisa_clear(struct kvm *kvm);
|
|
|
|
void kvm_s390_gisa_destroy(struct kvm *kvm);
|
2022-02-09 15:22:17 +00:00
|
|
|
void kvm_s390_gisa_disable(struct kvm *kvm);
|
|
|
|
void kvm_s390_gisa_enable(struct kvm *kvm);
|
2022-11-30 23:09:12 +00:00
|
|
|
int __init kvm_s390_gib_init(u8 nisc);
|
2019-01-31 08:52:40 +00:00
|
|
|
void kvm_s390_gib_destroy(void);
|
2013-10-07 15:11:48 +00:00
|
|
|
|
2014-01-23 11:26:52 +00:00
|
|
|
/* implemented in guestdbg.c */
|
|
|
|
void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
|
|
|
|
int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_guest_debug *dbg);
|
|
|
|
void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
|
2016-05-24 10:10:27 +00:00
|
|
|
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
|
2016-05-24 10:40:11 +00:00
|
|
|
int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
|
2014-01-23 11:26:52 +00:00
|
|
|
|
2015-04-21 12:44:54 +00:00
|
|
|
/* support for Basic/Extended SCA handling */
|
|
|
|
static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
|
|
|
|
{
|
2015-04-23 14:09:06 +00:00
|
|
|
struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
|
|
|
|
|
|
|
|
return &sca->ipte_control;
|
2015-04-21 12:44:54 +00:00
|
|
|
}
|
2016-08-08 20:39:32 +00:00
|
|
|
static inline int kvm_s390_use_sca_entries(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Without SIGP interpretation, only SRS interpretation (if available)
|
|
|
|
* might use the entries. By not setting the entries and keeping them
|
|
|
|
* invalid, hardware will not access them but intercept.
|
|
|
|
*/
|
|
|
|
return sclp.has_sigpif;
|
|
|
|
}
|
KVM: s390: Inject machine check into the guest
If the exit flag of SIE indicates that a machine check has happened
during guest's running and needs to be injected, inject it to the guest
accordingly.
But some machine checks, e.g. Channel Report Pending (CRW), refer to
host conditions only (the guest's channel devices are not managed by
the kernel directly) and are therefore not injected into the guest.
External Damage (ED) is also not reinjected into the guest because ETR
conditions are gone in Linux and STP conditions are not enabled in the
guest, and ED contains only these 8 ETR and STP conditions.
In general, instruction-processing damage, system recovery,
storage error, service-processor damage and channel subsystem damage
will be reinjected into the guest, and the remain (System damage,
timing-facility damage, warning, ED and CRW) will be handled on the host.
Signed-off-by: QingFeng Hao <haoqf@linux.vnet.ibm.com>
Acked-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
2017-06-07 10:03:05 +00:00
|
|
|
void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
|
|
|
|
struct mcck_volatile_info *mcck_info);
|
2018-04-22 15:37:03 +00:00
|
|
|
|
2024-10-22 12:05:55 +00:00
|
|
|
static inline bool kvm_s390_cur_gmap_fault_is_write(void)
|
|
|
|
{
|
|
|
|
if (current->thread.gmap_int_code == PGM_PROTECTION)
|
|
|
|
return true;
|
|
|
|
return test_facility(75) && (current->thread.gmap_teid.fsi == TEID_FSI_STORE);
|
|
|
|
}
|
|
|
|
|
2018-04-22 15:37:03 +00:00
|
|
|
/**
|
|
|
|
* kvm_s390_vcpu_crypto_reset_all
|
|
|
|
*
|
|
|
|
* Reset the crypto attributes for each vcpu. This can be done while the vcpus
|
|
|
|
* are running as each vcpu will be removed from SIE before resetting the crypt
|
|
|
|
* attributes and restored to SIE afterward.
|
|
|
|
*
|
|
|
|
* Note: The kvm->lock must be held while calling this function
|
|
|
|
*
|
|
|
|
* @kvm: the KVM guest
|
|
|
|
*/
|
|
|
|
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
|
2020-09-07 13:26:07 +00:00
|
|
|
|
2022-06-06 20:33:18 +00:00
|
|
|
/**
|
|
|
|
* kvm_s390_vcpu_pci_enable_interp
|
|
|
|
*
|
|
|
|
* Set the associated PCI attributes for each vcpu to allow for zPCI Load/Store
|
|
|
|
* interpretation as well as adapter interruption forwarding.
|
|
|
|
*
|
|
|
|
* @kvm: the KVM guest
|
|
|
|
*/
|
|
|
|
void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm);
|
|
|
|
|
2020-09-07 13:26:07 +00:00
|
|
|
/**
|
|
|
|
* diag9c_forwarding_hz
|
|
|
|
*
|
|
|
|
* Set the maximum number of diag9c forwarding per second
|
|
|
|
*/
|
|
|
|
extern unsigned int diag9c_forwarding_hz;
|
|
|
|
|
2008-03-25 17:47:20 +00:00
|
|
|
#endif
|