mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
* ARM: HYP mode stub supports kexec/kdump on 32-bit; improved PMU
support; virtual interrupt controller performance improvements; support for userspace virtual interrupt controller (slower, but necessary for KVM on the weird Broadcom SoCs used by the Raspberry Pi 3) * MIPS: basic support for hardware virtualization (ImgTec P5600/P6600/I6400 and Cavium Octeon III) * PPC: in-kernel acceleration for VFIO * s390: support for guests without storage keys; adapter interruption suppression * x86: usual range of nVMX improvements, notably nested EPT support for accessed and dirty bits; emulation of CPL3 CPUID faulting * generic: first part of VCPU thread request API; kvm_stat improvements -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJZEHUkAAoJEL/70l94x66DBeYH/09wrpJ2FjU4Rqv7FxmqgWfH 9WGi4wvn/Z+XzQSyfMJiu2SfZVzU69/Y67OMHudy7vBT6knB+ziM7Ntoiu/hUfbG 0g5KsDX79FW15HuvuuGh9kSjUsj7qsQdyPZwP4FW/6ZoDArV9mibSvdjSmiUSMV/ 2wxaoLzjoShdOuCe9EABaPhKK0XCrOYkygT6Paz1pItDxaSn8iW3ulaCuWMprUfG Niq+dFemK464E4yn6HVD88xg5j2eUM6bfuXB3qR3eTR76mHLgtwejBzZdDjLG9fk 32PNYKhJNomBxHVqtksJ9/7cSR6iNPs7neQ1XHemKWTuYqwYQMlPj1NDy0aslQU= =IsiZ -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM updates from Paolo Bonzini: "ARM: - HYP mode stub supports kexec/kdump on 32-bit - improved PMU support - virtual interrupt controller performance improvements - support for userspace virtual interrupt controller (slower, but necessary for KVM on the weird Broadcom SoCs used by the Raspberry Pi 3) MIPS: - basic support for hardware virtualization (ImgTec P5600/P6600/I6400 and Cavium Octeon III) PPC: - in-kernel acceleration for VFIO s390: - support for guests without storage keys - adapter interruption suppression x86: - usual range of nVMX improvements, notably nested EPT support for accessed and dirty bits - emulation of CPL3 CPUID faulting generic: - first part of VCPU thread request API - kvm_stat improvements" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (227 commits) kvm: nVMX: Don't validate disabled secondary controls KVM: put back #ifndef CONFIG_S390 around kvm_vcpu_kick Revert "KVM: Support vCPU-based gfn->hva cache" tools/kvm: fix top level makefile KVM: x86: don't hold kvm->lock in KVM_SET_GSI_ROUTING KVM: Documentation: remove VM mmap documentation kvm: nVMX: Remove superfluous VMX instruction fault checks KVM: x86: fix emulation of RSM and IRET instructions KVM: mark requests that need synchronization KVM: return if kvm_vcpu_wake_up() did wake up the VCPU KVM: add explicit barrier to kvm_vcpu_kick KVM: perform a wake_up in kvm_make_all_cpus_request KVM: mark requests that do not need a wakeup KVM: remove #ifndef CONFIG_S390 around kvm_vcpu_wake_up KVM: x86: always use kvm_make_request instead of set_bit KVM: add kvm_{test,clear}_request to replace {test,clear}_bit s390: kvm: Cpu model support for msa6, msa7 and msa8 KVM: x86: remove irq disablement around KVM_SET_CLOCK/KVM_GET_CLOCK kvm: better MWAIT emulation for guests KVM: x86: virtualize cpuid faulting ...
This commit is contained in:
commit
2d3e4866de
@ -110,17 +110,18 @@ Type: system ioctl
|
||||
Parameters: machine type identifier (KVM_VM_*)
|
||||
Returns: a VM fd that can be used to control the new virtual machine.
|
||||
|
||||
The new VM has no virtual cpus and no memory. An mmap() of a VM fd
|
||||
will access the virtual machine's physical address space; offset zero
|
||||
corresponds to guest physical address zero. Use of mmap() on a VM fd
|
||||
is discouraged if userspace memory allocation (KVM_CAP_USER_MEMORY) is
|
||||
available.
|
||||
You most certainly want to use 0 as machine type.
|
||||
The new VM has no virtual cpus and no memory.
|
||||
You probably want to use 0 as machine type.
|
||||
|
||||
In order to create user controlled virtual machines on S390, check
|
||||
KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
|
||||
privileged user (CAP_SYS_ADMIN).
|
||||
|
||||
To use hardware assisted virtualization on MIPS (VZ ASE) rather than
|
||||
the default trap & emulate implementation (which changes the virtual
|
||||
memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
|
||||
flag KVM_VM_MIPS_VZ.
|
||||
|
||||
|
||||
4.3 KVM_GET_MSR_INDEX_LIST
|
||||
|
||||
@ -1321,130 +1322,6 @@ The flags bitmap is defined as:
|
||||
/* the host supports the ePAPR idle hcall
|
||||
#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
|
||||
|
||||
4.48 KVM_ASSIGN_PCI_DEVICE (deprecated)
|
||||
|
||||
Capability: none
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_pci_dev (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Assigns a host PCI device to the VM.
|
||||
|
||||
struct kvm_assigned_pci_dev {
|
||||
__u32 assigned_dev_id;
|
||||
__u32 busnr;
|
||||
__u32 devfn;
|
||||
__u32 flags;
|
||||
__u32 segnr;
|
||||
union {
|
||||
__u32 reserved[11];
|
||||
};
|
||||
};
|
||||
|
||||
The PCI device is specified by the triple segnr, busnr, and devfn.
|
||||
Identification in succeeding service requests is done via assigned_dev_id. The
|
||||
following flags are specified:
|
||||
|
||||
/* Depends on KVM_CAP_IOMMU */
|
||||
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
|
||||
/* The following two depend on KVM_CAP_PCI_2_3 */
|
||||
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
|
||||
#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
|
||||
|
||||
If KVM_DEV_ASSIGN_PCI_2_3 is set, the kernel will manage legacy INTx interrupts
|
||||
via the PCI-2.3-compliant device-level mask, thus enable IRQ sharing with other
|
||||
assigned devices or host devices. KVM_DEV_ASSIGN_MASK_INTX specifies the
|
||||
guest's view on the INTx mask, see KVM_ASSIGN_SET_INTX_MASK for details.
|
||||
|
||||
The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
|
||||
isolation of the device. Usages not specifying this flag are deprecated.
|
||||
|
||||
Only PCI header type 0 devices with PCI BAR resources are supported by
|
||||
device assignment. The user requesting this ioctl must have read/write
|
||||
access to the PCI sysfs resource files associated with the device.
|
||||
|
||||
Errors:
|
||||
ENOTTY: kernel does not support this ioctl
|
||||
|
||||
Other error conditions may be defined by individual device types or
|
||||
have their standard meanings.
|
||||
|
||||
|
||||
4.49 KVM_DEASSIGN_PCI_DEVICE (deprecated)
|
||||
|
||||
Capability: none
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_pci_dev (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Ends PCI device assignment, releasing all associated resources.
|
||||
|
||||
See KVM_ASSIGN_PCI_DEVICE for the data structure. Only assigned_dev_id is
|
||||
used in kvm_assigned_pci_dev to identify the device.
|
||||
|
||||
Errors:
|
||||
ENOTTY: kernel does not support this ioctl
|
||||
|
||||
Other error conditions may be defined by individual device types or
|
||||
have their standard meanings.
|
||||
|
||||
4.50 KVM_ASSIGN_DEV_IRQ (deprecated)
|
||||
|
||||
Capability: KVM_CAP_ASSIGN_DEV_IRQ
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_irq (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Assigns an IRQ to a passed-through device.
|
||||
|
||||
struct kvm_assigned_irq {
|
||||
__u32 assigned_dev_id;
|
||||
__u32 host_irq; /* ignored (legacy field) */
|
||||
__u32 guest_irq;
|
||||
__u32 flags;
|
||||
union {
|
||||
__u32 reserved[12];
|
||||
};
|
||||
};
|
||||
|
||||
The following flags are defined:
|
||||
|
||||
#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
|
||||
#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
|
||||
#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
|
||||
|
||||
#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
|
||||
#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
|
||||
#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
|
||||
|
||||
It is not valid to specify multiple types per host or guest IRQ. However, the
|
||||
IRQ type of host and guest can differ or can even be null.
|
||||
|
||||
Errors:
|
||||
ENOTTY: kernel does not support this ioctl
|
||||
|
||||
Other error conditions may be defined by individual device types or
|
||||
have their standard meanings.
|
||||
|
||||
|
||||
4.51 KVM_DEASSIGN_DEV_IRQ (deprecated)
|
||||
|
||||
Capability: KVM_CAP_ASSIGN_DEV_IRQ
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_irq (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Ends an IRQ assignment to a passed-through device.
|
||||
|
||||
See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
|
||||
by assigned_dev_id, flags must correspond to the IRQ type specified on
|
||||
KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
|
||||
|
||||
|
||||
4.52 KVM_SET_GSI_ROUTING
|
||||
|
||||
Capability: KVM_CAP_IRQ_ROUTING
|
||||
@ -1531,52 +1408,6 @@ struct kvm_irq_routing_hv_sint {
|
||||
__u32 sint;
|
||||
};
|
||||
|
||||
4.53 KVM_ASSIGN_SET_MSIX_NR (deprecated)
|
||||
|
||||
Capability: none
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_msix_nr (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Set the number of MSI-X interrupts for an assigned device. The number is
|
||||
reset again by terminating the MSI-X assignment of the device via
|
||||
KVM_DEASSIGN_DEV_IRQ. Calling this service more than once at any earlier
|
||||
point will fail.
|
||||
|
||||
struct kvm_assigned_msix_nr {
|
||||
__u32 assigned_dev_id;
|
||||
__u16 entry_nr;
|
||||
__u16 padding;
|
||||
};
|
||||
|
||||
#define KVM_MAX_MSIX_PER_DEV 256
|
||||
|
||||
|
||||
4.54 KVM_ASSIGN_SET_MSIX_ENTRY (deprecated)
|
||||
|
||||
Capability: none
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_msix_entry (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting
|
||||
the GSI vector to zero means disabling the interrupt.
|
||||
|
||||
struct kvm_assigned_msix_entry {
|
||||
__u32 assigned_dev_id;
|
||||
__u32 gsi;
|
||||
__u16 entry; /* The index of entry in the MSI-X table */
|
||||
__u16 padding[3];
|
||||
};
|
||||
|
||||
Errors:
|
||||
ENOTTY: kernel does not support this ioctl
|
||||
|
||||
Other error conditions may be defined by individual device types or
|
||||
have their standard meanings.
|
||||
|
||||
|
||||
4.55 KVM_SET_TSC_KHZ
|
||||
|
||||
@ -1728,40 +1559,6 @@ should skip processing the bitmap and just invalidate everything. It must
|
||||
be set to the number of set bits in the bitmap.
|
||||
|
||||
|
||||
4.61 KVM_ASSIGN_SET_INTX_MASK (deprecated)
|
||||
|
||||
Capability: KVM_CAP_PCI_2_3
|
||||
Architectures: x86
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_assigned_pci_dev (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Allows userspace to mask PCI INTx interrupts from the assigned device. The
|
||||
kernel will not deliver INTx interrupts to the guest between setting and
|
||||
clearing of KVM_ASSIGN_SET_INTX_MASK via this interface. This enables use of
|
||||
and emulation of PCI 2.3 INTx disable command register behavior.
|
||||
|
||||
This may be used for both PCI 2.3 devices supporting INTx disable natively and
|
||||
older devices lacking this support. Userspace is responsible for emulating the
|
||||
read value of the INTx disable bit in the guest visible PCI command register.
|
||||
When modifying the INTx disable state, userspace should precede updating the
|
||||
physical device command register by calling this ioctl to inform the kernel of
|
||||
the new intended INTx mask state.
|
||||
|
||||
Note that the kernel uses the device INTx disable bit to internally manage the
|
||||
device interrupt state for PCI 2.3 devices. Reads of this register may
|
||||
therefore not match the expected value. Writes should always use the guest
|
||||
intended INTx disable value rather than attempting to read-copy-update the
|
||||
current physical device state. Races between user and kernel updates to the
|
||||
INTx disable bit are handled lazily in the kernel. It's possible the device
|
||||
may generate unintended interrupts, but they will not be injected into the
|
||||
guest.
|
||||
|
||||
See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
|
||||
by assigned_dev_id. In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
|
||||
evaluated.
|
||||
|
||||
|
||||
4.62 KVM_CREATE_SPAPR_TCE
|
||||
|
||||
Capability: KVM_CAP_SPAPR_TCE
|
||||
@ -2068,11 +1865,23 @@ registers, find a list below:
|
||||
MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
|
||||
MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PWBASE | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_WIRED | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_PWCTL | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_HWRENA | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_COUNT | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_COMPARE | 32
|
||||
@ -2089,6 +1898,7 @@ registers, find a list below:
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
|
||||
@ -2096,6 +1906,7 @@ registers, find a list below:
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
|
||||
@ -2162,6 +1973,10 @@ hardware, host kernel, guest, and whether XPA is present in the guest, i.e.
|
||||
with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and
|
||||
the PFNX field starting at bit 30.
|
||||
|
||||
MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit
|
||||
patterns:
|
||||
0x7030 0000 0001 01 <reg:8>
|
||||
|
||||
MIPS KVM control registers (see above) have the following id bit patterns:
|
||||
0x7030 0000 0002 <reg:16>
|
||||
|
||||
@ -4164,6 +3979,23 @@ to take care of that.
|
||||
This capability can be enabled dynamically even if VCPUs were already
|
||||
created and are running.
|
||||
|
||||
7.9 KVM_CAP_S390_GS
|
||||
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
Returns: 0 on success; -EINVAL if the machine does not support
|
||||
guarded storage; -EBUSY if a VCPU has already been created.
|
||||
|
||||
Allows use of guarded storage for the KVM guest.
|
||||
|
||||
7.10 KVM_CAP_S390_AIS
|
||||
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
|
||||
Allow use of adapter-interruption suppression.
|
||||
Returns: 0 on success; -EBUSY if a VCPU has already been created.
|
||||
|
||||
8. Other capabilities.
|
||||
----------------------
|
||||
|
||||
@ -4210,3 +4042,118 @@ This capability, if KVM_CHECK_EXTENSION indicates that it is
|
||||
available, means that that the kernel can support guests using the
|
||||
hashed page table MMU defined in Power ISA V3.00 (as implemented in
|
||||
the POWER9 processor), including in-memory segment tables.
|
||||
|
||||
8.5 KVM_CAP_MIPS_VZ
|
||||
|
||||
Architectures: mips
|
||||
|
||||
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
|
||||
it is available, means that full hardware assisted virtualization capabilities
|
||||
of the hardware are available for use through KVM. An appropriate
|
||||
KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which
|
||||
utilises it.
|
||||
|
||||
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
|
||||
available, it means that the VM is using full hardware assisted virtualization
|
||||
capabilities of the hardware. This is useful to check after creating a VM with
|
||||
KVM_VM_MIPS_DEFAULT.
|
||||
|
||||
The value returned by KVM_CHECK_EXTENSION should be compared against known
|
||||
values (see below). All other values are reserved. This is to allow for the
|
||||
possibility of other hardware assisted virtualization implementations which
|
||||
may be incompatible with the MIPS VZ ASE.
|
||||
|
||||
0: The trap & emulate implementation is in use to run guest code in user
|
||||
mode. Guest virtual memory segments are rearranged to fit the guest in the
|
||||
user mode address space.
|
||||
|
||||
1: The MIPS VZ ASE is in use, providing full hardware assisted
|
||||
virtualization, including standard guest virtual memory segments.
|
||||
|
||||
8.6 KVM_CAP_MIPS_TE
|
||||
|
||||
Architectures: mips
|
||||
|
||||
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
|
||||
it is available, means that the trap & emulate implementation is available to
|
||||
run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware
|
||||
assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed
|
||||
to KVM_CREATE_VM to create a VM which utilises it.
|
||||
|
||||
If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
|
||||
available, it means that the VM is using trap & emulate.
|
||||
|
||||
8.7 KVM_CAP_MIPS_64BIT
|
||||
|
||||
Architectures: mips
|
||||
|
||||
This capability indicates the supported architecture type of the guest, i.e. the
|
||||
supported register and address width.
|
||||
|
||||
The values returned when this capability is checked by KVM_CHECK_EXTENSION on a
|
||||
kvm VM handle correspond roughly to the CP0_Config.AT register field, and should
|
||||
be checked specifically against known values (see below). All other values are
|
||||
reserved.
|
||||
|
||||
0: MIPS32 or microMIPS32.
|
||||
Both registers and addresses are 32-bits wide.
|
||||
It will only be possible to run 32-bit guest code.
|
||||
|
||||
1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
|
||||
Registers are 64-bits wide, but addresses are 32-bits wide.
|
||||
64-bit guest code may run but cannot access MIPS64 memory segments.
|
||||
It will also be possible to run 32-bit guest code.
|
||||
|
||||
2: MIPS64 or microMIPS64 with access to all address segments.
|
||||
Both registers and addresses are 64-bits wide.
|
||||
It will be possible to run 64-bit or 32-bit guest code.
|
||||
|
||||
8.8 KVM_CAP_X86_GUEST_MWAIT
|
||||
|
||||
Architectures: x86
|
||||
|
||||
This capability indicates that guest using memory monotoring instructions
|
||||
(MWAIT/MWAITX) to stop the virtual CPU will not cause a VM exit. As such time
|
||||
spent while virtual CPU is halted in this way will then be accounted for as
|
||||
guest running time on the host (as opposed to e.g. HLT).
|
||||
|
||||
8.9 KVM_CAP_ARM_USER_IRQ
|
||||
|
||||
Architectures: arm, arm64
|
||||
This capability, if KVM_CHECK_EXTENSION indicates that it is available, means
|
||||
that if userspace creates a VM without an in-kernel interrupt controller, it
|
||||
will be notified of changes to the output level of in-kernel emulated devices,
|
||||
which can generate virtual interrupts, presented to the VM.
|
||||
For such VMs, on every return to userspace, the kernel
|
||||
updates the vcpu's run->s.regs.device_irq_level field to represent the actual
|
||||
output level of the device.
|
||||
|
||||
Whenever kvm detects a change in the device output level, kvm guarantees at
|
||||
least one return to userspace before running the VM. This exit could either
|
||||
be a KVM_EXIT_INTR or any other exit event, like KVM_EXIT_MMIO. This way,
|
||||
userspace can always sample the device output level and re-compute the state of
|
||||
the userspace interrupt controller. Userspace should always check the state
|
||||
of run->s.regs.device_irq_level on every kvm exit.
|
||||
The value in run->s.regs.device_irq_level can represent both level and edge
|
||||
triggered interrupt signals, depending on the device. Edge triggered interrupt
|
||||
signals will exit to userspace with the bit in run->s.regs.device_irq_level
|
||||
set exactly once per edge signal.
|
||||
|
||||
The field run->s.regs.device_irq_level is available independent of
|
||||
run->kvm_valid_regs or run->kvm_dirty_regs bits.
|
||||
|
||||
If KVM_CAP_ARM_USER_IRQ is supported, the KVM_CHECK_EXTENSION ioctl returns a
|
||||
number larger than 0 indicating the version of this capability is implemented
|
||||
and thereby which bits in in run->s.regs.device_irq_level can signal values.
|
||||
|
||||
Currently the following bits are defined for the device_irq_level bitmap:
|
||||
|
||||
KVM_CAP_ARM_USER_IRQ >= 1:
|
||||
|
||||
KVM_ARM_DEV_EL1_VTIMER - EL1 virtual timer
|
||||
KVM_ARM_DEV_EL1_PTIMER - EL1 physical timer
|
||||
KVM_ARM_DEV_PMU - ARM PMU overflow interrupt signal
|
||||
|
||||
Future versions of kvm may implement additional events. These will get
|
||||
indicated by returning a higher number from KVM_CHECK_EXTENSION and will be
|
||||
listed above.
|
||||
|
53
Documentation/virtual/kvm/arm/hyp-abi.txt
Normal file
53
Documentation/virtual/kvm/arm/hyp-abi.txt
Normal file
@ -0,0 +1,53 @@
|
||||
* Internal ABI between the kernel and HYP
|
||||
|
||||
This file documents the interaction between the Linux kernel and the
|
||||
hypervisor layer when running Linux as a hypervisor (for example
|
||||
KVM). It doesn't cover the interaction of the kernel with the
|
||||
hypervisor when running as a guest (under Xen, KVM or any other
|
||||
hypervisor), or any hypervisor-specific interaction when the kernel is
|
||||
used as a host.
|
||||
|
||||
On arm and arm64 (without VHE), the kernel doesn't run in hypervisor
|
||||
mode, but still needs to interact with it, allowing a built-in
|
||||
hypervisor to be either installed or torn down.
|
||||
|
||||
In order to achieve this, the kernel must be booted at HYP (arm) or
|
||||
EL2 (arm64), allowing it to install a set of stubs before dropping to
|
||||
SVC/EL1. These stubs are accessible by using a 'hvc #0' instruction,
|
||||
and only act on individual CPUs.
|
||||
|
||||
Unless specified otherwise, any built-in hypervisor must implement
|
||||
these functions (see arch/arm{,64}/include/asm/virt.h):
|
||||
|
||||
* r0/x0 = HVC_SET_VECTORS
|
||||
r1/x1 = vectors
|
||||
|
||||
Set HVBAR/VBAR_EL2 to 'vectors' to enable a hypervisor. 'vectors'
|
||||
must be a physical address, and respect the alignment requirements
|
||||
of the architecture. Only implemented by the initial stubs, not by
|
||||
Linux hypervisors.
|
||||
|
||||
* r0/x0 = HVC_RESET_VECTORS
|
||||
|
||||
Turn HYP/EL2 MMU off, and reset HVBAR/VBAR_EL2 to the initials
|
||||
stubs' exception vector value. This effectively disables an existing
|
||||
hypervisor.
|
||||
|
||||
* r0/x0 = HVC_SOFT_RESTART
|
||||
r1/x1 = restart address
|
||||
x2 = x0's value when entering the next payload (arm64)
|
||||
x3 = x1's value when entering the next payload (arm64)
|
||||
x4 = x2's value when entering the next payload (arm64)
|
||||
|
||||
Mask all exceptions, disable the MMU, move the arguments into place
|
||||
(arm64 only), and jump to the restart address while at HYP/EL2. This
|
||||
hypercall is not expected to return to its caller.
|
||||
|
||||
Any other value of r0/x0 triggers a hypervisor-specific handling,
|
||||
which is not documented here.
|
||||
|
||||
The return value of a stub hypercall is held by r0/x0, and is 0 on
|
||||
success, and HVC_STUB_ERR on error. A stub hypercall is allowed to
|
||||
clobber any of the caller-saved registers (x0-x18 on arm64, r0-r3 and
|
||||
ip on arm). It is thus recommended to use a function call to perform
|
||||
the hypercall.
|
@ -14,6 +14,8 @@ FLIC provides support to
|
||||
- purge one pending floating I/O interrupt (KVM_DEV_FLIC_CLEAR_IO_IRQ)
|
||||
- enable/disable for the guest transparent async page faults
|
||||
- register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*)
|
||||
- modify AIS (adapter-interruption-suppression) mode state (KVM_DEV_FLIC_AISM)
|
||||
- inject adapter interrupts on a specified adapter (KVM_DEV_FLIC_AIRQ_INJECT)
|
||||
|
||||
Groups:
|
||||
KVM_DEV_FLIC_ENQUEUE
|
||||
@ -64,12 +66,18 @@ struct kvm_s390_io_adapter {
|
||||
__u8 isc;
|
||||
__u8 maskable;
|
||||
__u8 swap;
|
||||
__u8 pad;
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
id contains the unique id for the adapter, isc the I/O interruption subclass
|
||||
to use, maskable whether this adapter may be masked (interrupts turned off)
|
||||
and swap whether the indicators need to be byte swapped.
|
||||
to use, maskable whether this adapter may be masked (interrupts turned off),
|
||||
swap whether the indicators need to be byte swapped, and flags contains
|
||||
further characteristics of the adapter.
|
||||
Currently defined values for 'flags' are:
|
||||
- KVM_S390_ADAPTER_SUPPRESSIBLE: adapter is subject to AIS
|
||||
(adapter-interrupt-suppression) facility. This flag only has an effect if
|
||||
the AIS capability is enabled.
|
||||
Unknown flag values are ignored.
|
||||
|
||||
|
||||
KVM_DEV_FLIC_ADAPTER_MODIFY
|
||||
@ -101,6 +109,33 @@ struct kvm_s390_io_adapter_req {
|
||||
release a userspace page for the translated address specified in addr
|
||||
from the list of mappings
|
||||
|
||||
KVM_DEV_FLIC_AISM
|
||||
modify the adapter-interruption-suppression mode for a given isc if the
|
||||
AIS capability is enabled. Takes a kvm_s390_ais_req describing:
|
||||
|
||||
struct kvm_s390_ais_req {
|
||||
__u8 isc;
|
||||
__u16 mode;
|
||||
};
|
||||
|
||||
isc contains the target I/O interruption subclass, mode the target
|
||||
adapter-interruption-suppression mode. The following modes are
|
||||
currently supported:
|
||||
- KVM_S390_AIS_MODE_ALL: ALL-Interruptions Mode, i.e. airq injection
|
||||
is always allowed;
|
||||
- KVM_S390_AIS_MODE_SINGLE: SINGLE-Interruption Mode, i.e. airq
|
||||
injection is only allowed once and the following adapter interrupts
|
||||
will be suppressed until the mode is set again to ALL-Interruptions
|
||||
or SINGLE-Interruption mode.
|
||||
|
||||
KVM_DEV_FLIC_AIRQ_INJECT
|
||||
Inject adapter interrupts on a specified adapter.
|
||||
attr->attr contains the unique id for the adapter, which allows for
|
||||
adapter-specific checks and actions.
|
||||
For adapters subject to AIS, handle the airq injection suppression for
|
||||
an isc according to the adapter-interruption-suppression mode on condition
|
||||
that the AIS capability is enabled.
|
||||
|
||||
Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on
|
||||
FLIC with an unknown group or attribute gives the error code EINVAL (instead of
|
||||
ENXIO, as specified in the API documentation). It is not possible to conclude
|
||||
|
@ -16,7 +16,21 @@ Groups:
|
||||
|
||||
KVM_DEV_VFIO_GROUP attributes:
|
||||
KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking
|
||||
kvm_device_attr.addr points to an int32_t file descriptor
|
||||
for the VFIO group.
|
||||
KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking
|
||||
kvm_device_attr.addr points to an int32_t file descriptor
|
||||
for the VFIO group.
|
||||
KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table
|
||||
allocated by sPAPR KVM.
|
||||
kvm_device_attr.addr points to a struct:
|
||||
|
||||
For each, kvm_device_attr.addr points to an int32_t file descriptor
|
||||
for the VFIO group.
|
||||
struct kvm_vfio_spapr_tce {
|
||||
__s32 groupfd;
|
||||
__s32 tablefd;
|
||||
};
|
||||
|
||||
where
|
||||
@groupfd is a file descriptor for a VFIO group;
|
||||
@tablefd is a file descriptor for a TCE table allocated via
|
||||
KVM_CREATE_SPAPR_TCE.
|
||||
|
@ -140,7 +140,8 @@ struct kvm_s390_vm_cpu_subfunc {
|
||||
u8 kmo[16]; # valid with Message-Security-Assist-Extension 4
|
||||
u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
|
||||
u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
|
||||
u8 reserved[1824]; # reserved for future instructions
|
||||
u8 kma[16]; # valid with Message-Security-Assist-Extension 8
|
||||
u8 reserved[1808]; # reserved for future instructions
|
||||
};
|
||||
|
||||
Parameters: address of a buffer to load the subfunction blocks from.
|
||||
|
@ -28,6 +28,11 @@ S390:
|
||||
property inside the device tree's /hypervisor node.
|
||||
For more information refer to Documentation/virtual/kvm/ppc-pv.txt
|
||||
|
||||
MIPS:
|
||||
KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
|
||||
number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and
|
||||
the return value is placed in $2 (v0).
|
||||
|
||||
KVM Hypercalls Documentation
|
||||
===========================
|
||||
The template for each hypercall is:
|
||||
|
@ -422,7 +422,17 @@ dtb_check_done:
|
||||
cmp r0, #HYP_MODE
|
||||
bne 1f
|
||||
|
||||
bl __hyp_get_vectors
|
||||
/*
|
||||
* Compute the address of the hyp vectors after relocation.
|
||||
* This requires some arithmetic since we cannot directly
|
||||
* reference __hyp_stub_vectors in a PC-relative way.
|
||||
* Call __hyp_set_vectors with the new address so that we
|
||||
* can HVC again after the copy.
|
||||
*/
|
||||
0: adr r0, 0b
|
||||
movw r1, #:lower16:__hyp_stub_vectors - 0b
|
||||
movt r1, #:upper16:__hyp_stub_vectors - 0b
|
||||
add r0, r0, r1
|
||||
sub r0, r0, r5
|
||||
add r0, r0, r10
|
||||
bl __hyp_set_vectors
|
||||
|
@ -33,7 +33,7 @@
|
||||
#define ARM_EXCEPTION_IRQ 5
|
||||
#define ARM_EXCEPTION_FIQ 6
|
||||
#define ARM_EXCEPTION_HVC 7
|
||||
|
||||
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
|
||||
/*
|
||||
* The rr_lo_hi macro swaps a pair of registers depending on
|
||||
* current endianness. It is used in conjunction with ldrd and strd
|
||||
@ -72,10 +72,11 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __init_stage2_translation(void);
|
||||
|
||||
extern void __kvm_hyp_reset(unsigned long);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
||||
|
@ -30,7 +30,6 @@
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HAVE_ONE_REG
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
@ -45,7 +44,7 @@
|
||||
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
|
||||
#endif
|
||||
|
||||
#define KVM_REQ_VCPU_EXIT 8
|
||||
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
|
||||
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
@ -270,12 +269,6 @@ static inline void __cpu_init_stage2(void)
|
||||
kvm_call_hyp(__init_stage2_translation);
|
||||
}
|
||||
|
||||
static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
|
||||
phys_addr_t phys_idmap_start)
|
||||
{
|
||||
kvm_call_hyp((void *)virt_to_idmap(__kvm_hyp_reset), vector_ptr);
|
||||
}
|
||||
|
||||
static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
return 0;
|
||||
|
@ -56,7 +56,6 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
phys_addr_t kvm_get_idmap_vector(void);
|
||||
phys_addr_t kvm_get_idmap_start(void);
|
||||
int kvm_mmu_init(void);
|
||||
void kvm_clear_hyp_idmap(void);
|
||||
|
||||
|
@ -43,7 +43,7 @@ extern struct processor {
|
||||
/*
|
||||
* Special stuff for a reset
|
||||
*/
|
||||
void (*reset)(unsigned long addr) __attribute__((noreturn));
|
||||
void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
|
||||
/*
|
||||
* Idle the processor
|
||||
*/
|
||||
@ -88,7 +88,7 @@ extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
|
||||
#else
|
||||
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
|
||||
#endif
|
||||
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
|
||||
extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
|
||||
|
||||
/* These three are private to arch/arm/kernel/suspend.c */
|
||||
extern void cpu_do_suspend(void *);
|
||||
|
@ -53,7 +53,7 @@ static inline void sync_boot_mode(void)
|
||||
}
|
||||
|
||||
void __hyp_set_vectors(unsigned long phys_vector_base);
|
||||
unsigned long __hyp_get_vectors(void);
|
||||
void __hyp_reset_vectors(void);
|
||||
#else
|
||||
#define __boot_cpu_mode (SVC_MODE)
|
||||
#define sync_boot_mode()
|
||||
@ -94,6 +94,18 @@ extern char __hyp_text_start[];
|
||||
extern char __hyp_text_end[];
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/* Only assembly code should need those */
|
||||
|
||||
#define HVC_SET_VECTORS 0
|
||||
#define HVC_SOFT_RESTART 1
|
||||
#define HVC_RESET_VECTORS 2
|
||||
|
||||
#define HVC_STUB_HCALL_NR 3
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define HVC_STUB_ERR 0xbadca11
|
||||
|
||||
#endif /* ! VIRT_H */
|
||||
|
@ -27,6 +27,8 @@
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
#define KVM_REG_SIZE(id) \
|
||||
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
@ -114,6 +116,8 @@ struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
||||
struct kvm_sync_regs {
|
||||
/* Used with KVM_CAP_ARM_USER_IRQ */
|
||||
__u64 device_irq_level;
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
|
@ -125,7 +125,7 @@ ENTRY(__hyp_stub_install_secondary)
|
||||
* (see safe_svcmode_maskall).
|
||||
*/
|
||||
@ Now install the hypervisor stub:
|
||||
adr r7, __hyp_stub_vectors
|
||||
W(adr) r7, __hyp_stub_vectors
|
||||
mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
|
||||
|
||||
@ Disable all traps, so we don't get any nasty surprise
|
||||
@ -202,9 +202,23 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
|
||||
ENDPROC(__hyp_stub_install_secondary)
|
||||
|
||||
__hyp_stub_do_trap:
|
||||
cmp r0, #-1
|
||||
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
|
||||
mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
|
||||
teq r0, #HVC_SET_VECTORS
|
||||
bne 1f
|
||||
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
|
||||
b __hyp_stub_exit
|
||||
|
||||
1: teq r0, #HVC_SOFT_RESTART
|
||||
bne 1f
|
||||
bx r1
|
||||
|
||||
1: teq r0, #HVC_RESET_VECTORS
|
||||
beq __hyp_stub_exit
|
||||
|
||||
ldr r0, =HVC_STUB_ERR
|
||||
__ERET
|
||||
|
||||
__hyp_stub_exit:
|
||||
mov r0, #0
|
||||
__ERET
|
||||
ENDPROC(__hyp_stub_do_trap)
|
||||
|
||||
@ -230,15 +244,26 @@ ENDPROC(__hyp_stub_do_trap)
|
||||
* so you will need to set that to something sensible at the new hypervisor's
|
||||
* initialisation entry point.
|
||||
*/
|
||||
ENTRY(__hyp_get_vectors)
|
||||
mov r0, #-1
|
||||
ENDPROC(__hyp_get_vectors)
|
||||
@ fall through
|
||||
ENTRY(__hyp_set_vectors)
|
||||
mov r1, r0
|
||||
mov r0, #HVC_SET_VECTORS
|
||||
__HVC(0)
|
||||
ret lr
|
||||
ENDPROC(__hyp_set_vectors)
|
||||
|
||||
ENTRY(__hyp_soft_restart)
|
||||
mov r1, r0
|
||||
mov r0, #HVC_SOFT_RESTART
|
||||
__HVC(0)
|
||||
ret lr
|
||||
ENDPROC(__hyp_soft_restart)
|
||||
|
||||
ENTRY(__hyp_reset_vectors)
|
||||
mov r0, #HVC_RESET_VECTORS
|
||||
__HVC(0)
|
||||
ret lr
|
||||
ENDPROC(__hyp_reset_vectors)
|
||||
|
||||
#ifndef ZIMAGE
|
||||
.align 2
|
||||
.L__boot_cpu_mode_offset:
|
||||
@ -246,7 +271,7 @@ ENDPROC(__hyp_set_vectors)
|
||||
#endif
|
||||
|
||||
.align 5
|
||||
__hyp_stub_vectors:
|
||||
ENTRY(__hyp_stub_vectors)
|
||||
__hyp_stub_reset: W(b) .
|
||||
__hyp_stub_und: W(b) .
|
||||
__hyp_stub_svc: W(b) .
|
||||
|
@ -12,10 +12,11 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/idmap.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
#include "reboot.h"
|
||||
|
||||
typedef void (*phys_reset_t)(unsigned long);
|
||||
typedef void (*phys_reset_t)(unsigned long, bool);
|
||||
|
||||
/*
|
||||
* Function pointers to optional machine specific functions
|
||||
@ -51,7 +52,9 @@ static void __soft_restart(void *addr)
|
||||
|
||||
/* Switch to the identity mapping. */
|
||||
phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
|
||||
phys_reset((unsigned long)addr);
|
||||
|
||||
/* original stub should be restored by kvm */
|
||||
phys_reset((unsigned long)addr, is_hyp_mode_available());
|
||||
|
||||
/* Should never get here. */
|
||||
BUG();
|
||||
|
@ -53,7 +53,6 @@ __asm__(".arch_extension virt");
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
|
||||
static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
|
||||
static unsigned long hyp_default_vectors;
|
||||
|
||||
/* Per-CPU variable containing the currently running vcpu. */
|
||||
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
|
||||
@ -209,9 +208,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_IMMEDIATE_EXIT:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
case KVM_CAP_ARM_SET_DEVICE_ADDR:
|
||||
r = 1;
|
||||
break;
|
||||
@ -230,6 +226,13 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
else
|
||||
r = kvm->arch.vgic.msis_require_devid;
|
||||
break;
|
||||
case KVM_CAP_ARM_USER_IRQ:
|
||||
/*
|
||||
* 1: EL1_VTIMER, EL1_PTIMER, and PMU.
|
||||
* (bump this number if adding more devices)
|
||||
*/
|
||||
r = 1;
|
||||
break;
|
||||
default:
|
||||
r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
|
||||
break;
|
||||
@ -351,15 +354,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
|
||||
|
||||
kvm_arm_set_running_vcpu(vcpu);
|
||||
|
||||
kvm_vgic_load(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* The arch-generic KVM code expects the cpu field of a vcpu to be -1
|
||||
* if the vcpu is no longer assigned to a cpu. This is used for the
|
||||
* optimized make_all_cpus_request path.
|
||||
*/
|
||||
kvm_vgic_put(vcpu);
|
||||
|
||||
vcpu->cpu = -1;
|
||||
|
||||
kvm_arm_set_running_vcpu(NULL);
|
||||
@ -517,13 +519,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable the arch timers only if we have an in-kernel VGIC
|
||||
* and it has been properly initialized, since we cannot handle
|
||||
* interrupts from the virtual timer with a userspace gic.
|
||||
*/
|
||||
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
|
||||
ret = kvm_timer_enable(vcpu);
|
||||
ret = kvm_timer_enable(vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -633,16 +629,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
* non-preemptible context.
|
||||
*/
|
||||
preempt_disable();
|
||||
|
||||
kvm_pmu_flush_hwstate(vcpu);
|
||||
|
||||
kvm_timer_flush_hwstate(vcpu);
|
||||
kvm_vgic_flush_hwstate(vcpu);
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
/*
|
||||
* Re-check atomic conditions
|
||||
* If we have a singal pending, or need to notify a userspace
|
||||
* irqchip about timer or PMU level changes, then we exit (and
|
||||
* update the timer level state in kvm_timer_update_run
|
||||
* below).
|
||||
*/
|
||||
if (signal_pending(current)) {
|
||||
if (signal_pending(current) ||
|
||||
kvm_timer_should_notify_user(vcpu) ||
|
||||
kvm_pmu_should_notify_user(vcpu)) {
|
||||
ret = -EINTR;
|
||||
run->exit_reason = KVM_EXIT_INTR;
|
||||
}
|
||||
@ -714,6 +717,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
ret = handle_exit(vcpu, run, ret);
|
||||
}
|
||||
|
||||
/* Tell userspace about in-kernel device output levels */
|
||||
if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
|
||||
kvm_timer_update_run(vcpu);
|
||||
kvm_pmu_update_run(vcpu);
|
||||
}
|
||||
|
||||
if (vcpu->sigset_active)
|
||||
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
|
||||
return ret;
|
||||
@ -1112,8 +1121,16 @@ static void cpu_init_hyp_mode(void *dummy)
|
||||
kvm_arm_init_debug();
|
||||
}
|
||||
|
||||
static void cpu_hyp_reset(void)
|
||||
{
|
||||
if (!is_kernel_in_hyp_mode())
|
||||
__hyp_reset_vectors();
|
||||
}
|
||||
|
||||
static void cpu_hyp_reinit(void)
|
||||
{
|
||||
cpu_hyp_reset();
|
||||
|
||||
if (is_kernel_in_hyp_mode()) {
|
||||
/*
|
||||
* __cpu_init_stage2() is safe to call even if the PM
|
||||
@ -1121,21 +1138,13 @@ static void cpu_hyp_reinit(void)
|
||||
*/
|
||||
__cpu_init_stage2();
|
||||
} else {
|
||||
if (__hyp_get_vectors() == hyp_default_vectors)
|
||||
cpu_init_hyp_mode(NULL);
|
||||
cpu_init_hyp_mode(NULL);
|
||||
}
|
||||
|
||||
if (vgic_present)
|
||||
kvm_vgic_init_cpu_hardware();
|
||||
}
|
||||
|
||||
static void cpu_hyp_reset(void)
|
||||
{
|
||||
if (!is_kernel_in_hyp_mode())
|
||||
__cpu_reset_hyp_mode(hyp_default_vectors,
|
||||
kvm_get_idmap_start());
|
||||
}
|
||||
|
||||
static void _kvm_arch_hardware_enable(void *discard)
|
||||
{
|
||||
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
|
||||
@ -1318,12 +1327,6 @@ static int init_hyp_mode(void)
|
||||
if (err)
|
||||
goto out_err;
|
||||
|
||||
/*
|
||||
* It is probably enough to obtain the default on one
|
||||
* CPU. It's unlikely to be different on the others.
|
||||
*/
|
||||
hyp_default_vectors = __hyp_get_vectors();
|
||||
|
||||
/*
|
||||
* Allocate stack pages for Hypervisor-mode
|
||||
*/
|
||||
|
@ -40,6 +40,24 @@
|
||||
* Co-processor emulation
|
||||
*****************************************************************************/
|
||||
|
||||
static bool write_to_read_only(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *params)
|
||||
{
|
||||
WARN_ONCE(1, "CP15 write to read-only register\n");
|
||||
print_cp_instr(params);
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *params)
|
||||
{
|
||||
WARN_ONCE(1, "CP15 read to write-only register\n");
|
||||
print_cp_instr(params);
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
|
||||
static u32 cache_levels;
|
||||
|
||||
@ -502,15 +520,15 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
|
||||
if (likely(r->access(vcpu, params, r))) {
|
||||
/* Skip instruction, since it was emulated */
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
/* If access function fails, it should complain. */
|
||||
} else {
|
||||
/* If access function fails, it should complain. */
|
||||
kvm_err("Unsupported guest CP15 access at: %08lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_cp_instr(params);
|
||||
kvm_inject_undefined(vcpu);
|
||||
}
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -81,24 +81,6 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *params)
|
||||
{
|
||||
kvm_debug("CP15 write to read-only register at: %08lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_cp_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *params)
|
||||
{
|
||||
kvm_debug("CP15 read to write-only register at: %08lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_cp_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Reset functions */
|
||||
static inline void reset_unknown(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_reg *r)
|
||||
|
@ -160,6 +160,14 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
case ARM_EXCEPTION_DATA_ABORT:
|
||||
kvm_inject_vabt(vcpu);
|
||||
return 1;
|
||||
case ARM_EXCEPTION_HYP_GONE:
|
||||
/*
|
||||
* HYP has been reset to the hyp-stub. This happens
|
||||
* when a guest is pre-empted by kvm_reboot()'s
|
||||
* shutdown call.
|
||||
*/
|
||||
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||
return 0;
|
||||
default:
|
||||
kvm_pr_unimpl("Unsupported exception type: %d",
|
||||
exception_index);
|
||||
|
@ -126,11 +126,29 @@ hyp_hvc:
|
||||
*/
|
||||
pop {r0, r1, r2}
|
||||
|
||||
/* Check for __hyp_get_vectors */
|
||||
cmp r0, #-1
|
||||
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
|
||||
beq 1f
|
||||
/*
|
||||
* Check if we have a kernel function, which is guaranteed to be
|
||||
* bigger than the maximum hyp stub hypercall
|
||||
*/
|
||||
cmp r0, #HVC_STUB_HCALL_NR
|
||||
bhs 1f
|
||||
|
||||
/*
|
||||
* Not a kernel function, treat it as a stub hypercall.
|
||||
* Compute the physical address for __kvm_handle_stub_hvc
|
||||
* (as the code lives in the idmaped page) and branch there.
|
||||
* We hijack ip (r12) as a tmp register.
|
||||
*/
|
||||
push {r1}
|
||||
ldr r1, =kimage_voffset
|
||||
ldr r1, [r1]
|
||||
ldr ip, =__kvm_handle_stub_hvc
|
||||
sub ip, ip, r1
|
||||
pop {r1}
|
||||
|
||||
bx ip
|
||||
|
||||
1:
|
||||
push {lr}
|
||||
|
||||
mov lr, r0
|
||||
@ -142,7 +160,7 @@ THUMB( orr lr, #1)
|
||||
blx lr @ Call the HYP function
|
||||
|
||||
pop {lr}
|
||||
1: eret
|
||||
eret
|
||||
|
||||
guest_trap:
|
||||
load_vcpu r0 @ Load VCPU pointer to r0
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/********************************************************************
|
||||
* Hypervisor initialization
|
||||
@ -39,6 +40,10 @@
|
||||
* - Setup the page tables
|
||||
* - Enable the MMU
|
||||
* - Profit! (or eret, if you only care about the code).
|
||||
*
|
||||
* Another possibility is to get a HYP stub hypercall.
|
||||
* We discriminate between the two by checking if r0 contains a value
|
||||
* that is less than HVC_STUB_HCALL_NR.
|
||||
*/
|
||||
|
||||
.text
|
||||
@ -58,6 +63,10 @@ __kvm_hyp_init:
|
||||
W(b) .
|
||||
|
||||
__do_hyp_init:
|
||||
@ Check for a stub hypercall
|
||||
cmp r0, #HVC_STUB_HCALL_NR
|
||||
blo __kvm_handle_stub_hvc
|
||||
|
||||
@ Set stack pointer
|
||||
mov sp, r0
|
||||
|
||||
@ -112,20 +121,46 @@ __do_hyp_init:
|
||||
|
||||
eret
|
||||
|
||||
@ r0 : stub vectors address
|
||||
ENTRY(__kvm_hyp_reset)
|
||||
ENTRY(__kvm_handle_stub_hvc)
|
||||
cmp r0, #HVC_SOFT_RESTART
|
||||
bne 1f
|
||||
|
||||
/* The target is expected in r1 */
|
||||
msr ELR_hyp, r1
|
||||
mrs r0, cpsr
|
||||
bic r0, r0, #MODE_MASK
|
||||
orr r0, r0, #HYP_MODE
|
||||
THUMB( orr r0, r0, #PSR_T_BIT )
|
||||
msr spsr_cxsf, r0
|
||||
b reset
|
||||
|
||||
1: cmp r0, #HVC_RESET_VECTORS
|
||||
bne 1f
|
||||
|
||||
reset:
|
||||
/* We're now in idmap, disable MMU */
|
||||
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
|
||||
ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
|
||||
bic r1, r1, r2
|
||||
ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
|
||||
bic r1, r1, r0
|
||||
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
|
||||
|
||||
/* Install stub vectors */
|
||||
mcr p15, 4, r0, c12, c0, 0 @ HVBAR
|
||||
isb
|
||||
/*
|
||||
* Install stub vectors, using ardb's VA->PA trick.
|
||||
*/
|
||||
0: adr r0, 0b @ PA(0)
|
||||
movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0)
|
||||
movt r1, #:upper16:__hyp_stub_vectors - 0b
|
||||
add r1, r1, r0 @ PA(stub)
|
||||
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
|
||||
b exit
|
||||
|
||||
1: ldr r0, =HVC_STUB_ERR
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_reset)
|
||||
|
||||
exit:
|
||||
mov r0, #0
|
||||
eret
|
||||
ENDPROC(__kvm_handle_stub_hvc)
|
||||
|
||||
.ltorg
|
||||
|
||||
|
@ -37,10 +37,6 @@
|
||||
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
|
||||
* passed in r0 (strictly 32bit).
|
||||
*
|
||||
* A function pointer with a value of 0xffffffff has a special meaning,
|
||||
* and is used to implement __hyp_get_vectors in the same way as in
|
||||
* arch/arm/kernel/hyp_stub.S.
|
||||
*
|
||||
* The calling convention follows the standard AAPCS:
|
||||
* r0 - r3: caller save
|
||||
* r12: caller save
|
||||
|
@ -1524,7 +1524,8 @@ static int handle_hva_to_gpa(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
int (*handler)(struct kvm *kvm,
|
||||
gpa_t gpa, void *data),
|
||||
gpa_t gpa, u64 size,
|
||||
void *data),
|
||||
void *data)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
@ -1536,7 +1537,7 @@ static int handle_hva_to_gpa(struct kvm *kvm,
|
||||
/* we only care about the pages that the guest sees */
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
unsigned long hva_start, hva_end;
|
||||
gfn_t gfn, gfn_end;
|
||||
gfn_t gpa;
|
||||
|
||||
hva_start = max(start, memslot->userspace_addr);
|
||||
hva_end = min(end, memslot->userspace_addr +
|
||||
@ -1544,25 +1545,16 @@ static int handle_hva_to_gpa(struct kvm *kvm,
|
||||
if (hva_start >= hva_end)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* {gfn(page) | page intersects with [hva_start, hva_end)} =
|
||||
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
|
||||
*/
|
||||
gfn = hva_to_gfn_memslot(hva_start, memslot);
|
||||
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
||||
|
||||
for (; gfn < gfn_end; ++gfn) {
|
||||
gpa_t gpa = gfn << PAGE_SHIFT;
|
||||
ret |= handler(kvm, gpa, data);
|
||||
}
|
||||
gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
|
||||
ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
||||
{
|
||||
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
|
||||
unmap_stage2_range(kvm, gpa, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1589,10 +1581,11 @@ int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
||||
{
|
||||
pte_t *pte = (pte_t *)data;
|
||||
|
||||
WARN_ON(size != PAGE_SIZE);
|
||||
/*
|
||||
* We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
|
||||
* flag clear because MMU notifiers will have unmapped a huge PMD before
|
||||
@ -1618,11 +1611,12 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
|
||||
}
|
||||
|
||||
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
|
||||
pmd = stage2_get_pmd(kvm, NULL, gpa);
|
||||
if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
||||
return 0;
|
||||
@ -1637,11 +1631,12 @@ static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
return stage2_ptep_test_and_clear_young(pte);
|
||||
}
|
||||
|
||||
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
|
||||
pmd = stage2_get_pmd(kvm, NULL, gpa);
|
||||
if (!pmd || pmd_none(*pmd)) /* Nothing there */
|
||||
return 0;
|
||||
@ -1686,11 +1681,6 @@ phys_addr_t kvm_get_idmap_vector(void)
|
||||
return hyp_idmap_vector;
|
||||
}
|
||||
|
||||
phys_addr_t kvm_get_idmap_start(void)
|
||||
{
|
||||
return hyp_idmap_start;
|
||||
}
|
||||
|
||||
static int kvm_map_idmap_text(pgd_t *pgd)
|
||||
{
|
||||
int err;
|
||||
|
@ -208,9 +208,10 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret = 1;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||
unsigned long val;
|
||||
int ret = 1;
|
||||
|
||||
switch (psci_fn) {
|
||||
case PSCI_0_2_FN_PSCI_VERSION:
|
||||
@ -230,7 +231,9 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
case PSCI_0_2_FN_CPU_ON:
|
||||
case PSCI_0_2_FN64_CPU_ON:
|
||||
mutex_lock(&kvm->lock);
|
||||
val = kvm_psci_vcpu_on(vcpu);
|
||||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
case PSCI_0_2_FN_AFFINITY_INFO:
|
||||
case PSCI_0_2_FN64_AFFINITY_INFO:
|
||||
@ -279,6 +282,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||
unsigned long val;
|
||||
|
||||
@ -288,7 +292,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||
val = PSCI_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_PSCI_FN_CPU_ON:
|
||||
mutex_lock(&kvm->lock);
|
||||
val = kvm_psci_vcpu_on(vcpu);
|
||||
mutex_unlock(&kvm->lock);
|
||||
break;
|
||||
default:
|
||||
val = PSCI_RET_NOT_SUPPORTED;
|
||||
|
@ -87,6 +87,8 @@ struct cachepolicy {
|
||||
#define s2_policy(policy) 0
|
||||
#endif
|
||||
|
||||
unsigned long kimage_voffset __ro_after_init;
|
||||
|
||||
static struct cachepolicy cache_policies[] __initdata = {
|
||||
{
|
||||
.policy = "uncached",
|
||||
@ -1639,6 +1641,9 @@ void __init paging_init(const struct machine_desc *mdesc)
|
||||
|
||||
empty_zero_page = virt_to_page(zero_page);
|
||||
__flush_dcache_page(NULL, empty_zero_page);
|
||||
|
||||
/* Compute the virt/idmap offset, mostly for the sake of KVM */
|
||||
kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
|
||||
}
|
||||
|
||||
void __init early_mm_init(const struct machine_desc *mdesc)
|
||||
|
@ -39,13 +39,14 @@ ENTRY(cpu_v7_proc_fin)
|
||||
ENDPROC(cpu_v7_proc_fin)
|
||||
|
||||
/*
|
||||
* cpu_v7_reset(loc)
|
||||
* cpu_v7_reset(loc, hyp)
|
||||
*
|
||||
* Perform a soft reset of the system. Put the CPU into the
|
||||
* same state as it would be if it had been reset, and branch
|
||||
* to what would be the reset vector.
|
||||
*
|
||||
* - loc - location to jump to for soft reset
|
||||
* - hyp - indicate if restart occurs in HYP mode
|
||||
*
|
||||
* This code must be executed using a flat identity mapping with
|
||||
* caches disabled.
|
||||
@ -53,11 +54,15 @@ ENDPROC(cpu_v7_proc_fin)
|
||||
.align 5
|
||||
.pushsection .idmap.text, "ax"
|
||||
ENTRY(cpu_v7_reset)
|
||||
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
|
||||
bic r1, r1, #0x1 @ ...............m
|
||||
THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
|
||||
mcr p15, 0, r1, c1, c0, 0 @ disable MMU
|
||||
mrc p15, 0, r2, c1, c0, 0 @ ctrl register
|
||||
bic r2, r2, #0x1 @ ...............m
|
||||
THUMB( bic r2, r2, #1 << 30 ) @ SCTLR.TE (Thumb exceptions)
|
||||
mcr p15, 0, r2, c1, c0, 0 @ disable MMU
|
||||
isb
|
||||
#ifdef CONFIG_ARM_VIRT_EXT
|
||||
teq r1, #0
|
||||
bne __hyp_soft_restart
|
||||
#endif
|
||||
bx r0
|
||||
ENDPROC(cpu_v7_reset)
|
||||
.popsection
|
||||
|
@ -28,7 +28,7 @@
|
||||
#define ARM_EXCEPTION_EL1_SERROR 1
|
||||
#define ARM_EXCEPTION_TRAP 2
|
||||
/* The hyp-stub will return this for any kvm_call_hyp() call */
|
||||
#define ARM_EXCEPTION_HYP_GONE 3
|
||||
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
|
||||
|
||||
#define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
|
||||
#define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
|
||||
@ -47,7 +47,6 @@ struct kvm_vcpu;
|
||||
|
||||
extern char __kvm_hyp_init[];
|
||||
extern char __kvm_hyp_init_end[];
|
||||
extern char __kvm_hyp_reset[];
|
||||
|
||||
extern char __kvm_hyp_vector[];
|
||||
|
||||
@ -59,6 +58,8 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
||||
extern u32 __kvm_get_mdcr_el2(void);
|
||||
|
@ -31,7 +31,6 @@
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 512
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
@ -42,7 +41,7 @@
|
||||
|
||||
#define KVM_VCPU_MAX_FEATURES 4
|
||||
|
||||
#define KVM_REQ_VCPU_EXIT 8
|
||||
#define KVM_REQ_VCPU_EXIT (8 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
@ -362,13 +361,6 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
}
|
||||
|
||||
void __kvm_hyp_teardown(void);
|
||||
static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
|
||||
phys_addr_t phys_idmap_start)
|
||||
{
|
||||
kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
|
||||
|
@ -155,7 +155,6 @@ void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
phys_addr_t kvm_get_idmap_vector(void);
|
||||
phys_addr_t kvm_get_idmap_start(void);
|
||||
int kvm_mmu_init(void);
|
||||
void kvm_clear_hyp_idmap(void);
|
||||
|
||||
|
@ -19,25 +19,38 @@
|
||||
#define __ASM__VIRT_H
|
||||
|
||||
/*
|
||||
* The arm64 hcall implementation uses x0 to specify the hcall type. A value
|
||||
* less than 0xfff indicates a special hcall, such as get/set vector.
|
||||
* Any other value is used as a pointer to the function to call.
|
||||
* The arm64 hcall implementation uses x0 to specify the hcall
|
||||
* number. A value less than HVC_STUB_HCALL_NR indicates a special
|
||||
* hcall, such as set vector. Any other value is handled in a
|
||||
* hypervisor specific way.
|
||||
*
|
||||
* The hypercall is allowed to clobber any of the caller-saved
|
||||
* registers (x0-x18), so it is advisable to use it through the
|
||||
* indirection of a function call (as implemented in hyp-stub.S).
|
||||
*/
|
||||
|
||||
/* HVC_GET_VECTORS - Return the value of the vbar_el2 register. */
|
||||
#define HVC_GET_VECTORS 0
|
||||
|
||||
/*
|
||||
* HVC_SET_VECTORS - Set the value of the vbar_el2 register.
|
||||
*
|
||||
* @x1: Physical address of the new vector table.
|
||||
*/
|
||||
#define HVC_SET_VECTORS 1
|
||||
#define HVC_SET_VECTORS 0
|
||||
|
||||
/*
|
||||
* HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
|
||||
*/
|
||||
#define HVC_SOFT_RESTART 2
|
||||
#define HVC_SOFT_RESTART 1
|
||||
|
||||
/*
|
||||
* HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs
|
||||
*/
|
||||
#define HVC_RESET_VECTORS 2
|
||||
|
||||
/* Max number of HYP stub hypercalls */
|
||||
#define HVC_STUB_HCALL_NR 3
|
||||
|
||||
/* Error returned when an invalid stub number is passed into x0 */
|
||||
#define HVC_STUB_ERR 0xbadca11
|
||||
|
||||
#define BOOT_CPU_MODE_EL1 (0xe11)
|
||||
#define BOOT_CPU_MODE_EL2 (0xe12)
|
||||
@ -61,7 +74,7 @@
|
||||
extern u32 __boot_cpu_mode[2];
|
||||
|
||||
void __hyp_set_vectors(phys_addr_t phys_vector_base);
|
||||
phys_addr_t __hyp_get_vectors(void);
|
||||
void __hyp_reset_vectors(void);
|
||||
|
||||
/* Reports the availability of HYP mode */
|
||||
static inline bool is_hyp_mode_available(void)
|
||||
|
@ -39,6 +39,8 @@
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
#define KVM_REG_SIZE(id) \
|
||||
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
@ -143,6 +145,8 @@ struct kvm_debug_exit_arch {
|
||||
#define KVM_GUESTDBG_USE_HW (1 << 17)
|
||||
|
||||
struct kvm_sync_regs {
|
||||
/* Used with KVM_CAP_ARM_USER_IRQ */
|
||||
__u64 device_irq_level;
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
|
@ -55,18 +55,7 @@ ENDPROC(__hyp_stub_vectors)
|
||||
.align 11
|
||||
|
||||
el1_sync:
|
||||
mrs x30, esr_el2
|
||||
lsr x30, x30, #ESR_ELx_EC_SHIFT
|
||||
|
||||
cmp x30, #ESR_ELx_EC_HVC64
|
||||
b.ne 9f // Not an HVC trap
|
||||
|
||||
cmp x0, #HVC_GET_VECTORS
|
||||
b.ne 1f
|
||||
mrs x0, vbar_el2
|
||||
b 9f
|
||||
|
||||
1: cmp x0, #HVC_SET_VECTORS
|
||||
cmp x0, #HVC_SET_VECTORS
|
||||
b.ne 2f
|
||||
msr vbar_el2, x1
|
||||
b 9f
|
||||
@ -79,10 +68,15 @@ el1_sync:
|
||||
mov x1, x3
|
||||
br x4 // no return
|
||||
|
||||
/* Someone called kvm_call_hyp() against the hyp-stub... */
|
||||
3: mov x0, #ARM_EXCEPTION_HYP_GONE
|
||||
3: cmp x0, #HVC_RESET_VECTORS
|
||||
beq 9f // Nothing to reset!
|
||||
|
||||
9: eret
|
||||
/* Someone called kvm_call_hyp() against the hyp-stub... */
|
||||
ldr x0, =HVC_STUB_ERR
|
||||
eret
|
||||
|
||||
9: mov x0, xzr
|
||||
eret
|
||||
ENDPROC(el1_sync)
|
||||
|
||||
.macro invalid_vector label
|
||||
@ -121,19 +115,15 @@ ENDPROC(\label)
|
||||
* initialisation entry point.
|
||||
*/
|
||||
|
||||
ENTRY(__hyp_get_vectors)
|
||||
str lr, [sp, #-16]!
|
||||
mov x0, #HVC_GET_VECTORS
|
||||
hvc #0
|
||||
ldr lr, [sp], #16
|
||||
ret
|
||||
ENDPROC(__hyp_get_vectors)
|
||||
|
||||
ENTRY(__hyp_set_vectors)
|
||||
str lr, [sp, #-16]!
|
||||
mov x1, x0
|
||||
mov x0, #HVC_SET_VECTORS
|
||||
hvc #0
|
||||
ldr lr, [sp], #16
|
||||
ret
|
||||
ENDPROC(__hyp_set_vectors)
|
||||
|
||||
ENTRY(__hyp_reset_vectors)
|
||||
mov x0, #HVC_RESET_VECTORS
|
||||
hvc #0
|
||||
ret
|
||||
ENDPROC(__hyp_reset_vectors)
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.idmap.text, "ax"
|
||||
@ -58,6 +59,9 @@ __invalid:
|
||||
* x2: HYP vectors
|
||||
*/
|
||||
__do_hyp_init:
|
||||
/* Check for a stub HVC call */
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.lo __kvm_handle_stub_hvc
|
||||
|
||||
msr ttbr0_el2, x0
|
||||
|
||||
@ -119,23 +123,45 @@ __do_hyp_init:
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_init)
|
||||
|
||||
ENTRY(__kvm_handle_stub_hvc)
|
||||
cmp x0, #HVC_SOFT_RESTART
|
||||
b.ne 1f
|
||||
|
||||
/* This is where we're about to jump, staying at EL2 */
|
||||
msr elr_el2, x1
|
||||
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
|
||||
msr spsr_el2, x0
|
||||
|
||||
/* Shuffle the arguments, and don't come back */
|
||||
mov x0, x2
|
||||
mov x1, x3
|
||||
mov x2, x4
|
||||
b reset
|
||||
|
||||
1: cmp x0, #HVC_RESET_VECTORS
|
||||
b.ne 1f
|
||||
reset:
|
||||
/*
|
||||
* Reset kvm back to the hyp stub.
|
||||
* Reset kvm back to the hyp stub. Do not clobber x0-x4 in
|
||||
* case we coming via HVC_SOFT_RESTART.
|
||||
*/
|
||||
ENTRY(__kvm_hyp_reset)
|
||||
/* We're now in idmap, disable MMU */
|
||||
mrs x0, sctlr_el2
|
||||
ldr x1, =SCTLR_ELx_FLAGS
|
||||
bic x0, x0, x1 // Clear SCTL_M and etc
|
||||
msr sctlr_el2, x0
|
||||
mrs x5, sctlr_el2
|
||||
ldr x6, =SCTLR_ELx_FLAGS
|
||||
bic x5, x5, x6 // Clear SCTL_M and etc
|
||||
msr sctlr_el2, x5
|
||||
isb
|
||||
|
||||
/* Install stub vectors */
|
||||
adr_l x0, __hyp_stub_vectors
|
||||
msr vbar_el2, x0
|
||||
|
||||
adr_l x5, __hyp_stub_vectors
|
||||
msr vbar_el2, x5
|
||||
mov x0, xzr
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_reset)
|
||||
|
||||
1: /* Bad stub call */
|
||||
ldr x0, =HVC_STUB_ERR
|
||||
eret
|
||||
|
||||
ENDPROC(__kvm_handle_stub_hvc)
|
||||
|
||||
.ltorg
|
||||
|
||||
|
@ -36,15 +36,12 @@
|
||||
* passed in x0.
|
||||
*
|
||||
* A function pointer with a value less than 0xfff has a special meaning,
|
||||
* and is used to implement __hyp_get_vectors in the same way as in
|
||||
* and is used to implement hyp stubs in the same way as in
|
||||
* arch/arm64/kernel/hyp_stub.S.
|
||||
* HVC behaves as a 'bl' call and will clobber lr.
|
||||
*/
|
||||
ENTRY(__kvm_call_hyp)
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
str lr, [sp, #-16]!
|
||||
hvc #0
|
||||
ldr lr, [sp], #16
|
||||
ret
|
||||
alternative_else_nop_endif
|
||||
b __vhe_hyp_call
|
||||
|
@ -32,17 +32,17 @@
|
||||
* Shuffle the parameters before calling the function
|
||||
* pointed to in x0. Assumes parameters in x[1,2,3].
|
||||
*/
|
||||
str lr, [sp, #-16]!
|
||||
mov lr, x0
|
||||
mov x0, x1
|
||||
mov x1, x2
|
||||
mov x2, x3
|
||||
blr lr
|
||||
ldr lr, [sp], #16
|
||||
.endm
|
||||
|
||||
ENTRY(__vhe_hyp_call)
|
||||
str lr, [sp, #-16]!
|
||||
do_el2_call
|
||||
ldr lr, [sp], #16
|
||||
/*
|
||||
* We used to rely on having an exception return to get
|
||||
* an implicit isb. In the E2H case, we don't have it anymore.
|
||||
@ -53,21 +53,6 @@ ENTRY(__vhe_hyp_call)
|
||||
ret
|
||||
ENDPROC(__vhe_hyp_call)
|
||||
|
||||
/*
|
||||
* Compute the idmap address of __kvm_hyp_reset based on the idmap
|
||||
* start passed as a parameter, and jump there.
|
||||
*
|
||||
* x0: HYP phys_idmap_start
|
||||
*/
|
||||
ENTRY(__kvm_hyp_teardown)
|
||||
mov x4, x0
|
||||
adr_l x3, __kvm_hyp_reset
|
||||
|
||||
/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
|
||||
bfi x4, x3, #0, #PAGE_SHIFT
|
||||
br x4
|
||||
ENDPROC(__kvm_hyp_teardown)
|
||||
|
||||
el1_sync: // Guest trapped into EL2
|
||||
stp x0, x1, [sp, #-16]!
|
||||
|
||||
@ -87,10 +72,24 @@ alternative_endif
|
||||
/* Here, we're pretty sure the host called HVC. */
|
||||
ldp x0, x1, [sp], #16
|
||||
|
||||
cmp x0, #HVC_GET_VECTORS
|
||||
b.ne 1f
|
||||
mrs x0, vbar_el2
|
||||
b 2f
|
||||
/* Check for a stub HVC call */
|
||||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.hs 1f
|
||||
|
||||
/*
|
||||
* Compute the idmap address of __kvm_handle_stub_hvc and
|
||||
* jump there. Since we use kimage_voffset, do not use the
|
||||
* HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
|
||||
* (by loading it from the constant pool).
|
||||
*
|
||||
* Preserve x0-x4, which may contain stub parameters.
|
||||
*/
|
||||
ldr x5, =__kvm_handle_stub_hvc
|
||||
ldr_l x6, kimage_voffset
|
||||
|
||||
/* x5 = __pa(x5) */
|
||||
sub x5, x5, x6
|
||||
br x5
|
||||
|
||||
1:
|
||||
/*
|
||||
@ -99,7 +98,7 @@ alternative_endif
|
||||
kern_hyp_va x0
|
||||
do_el2_call
|
||||
|
||||
2: eret
|
||||
eret
|
||||
|
||||
el1_trap:
|
||||
/*
|
||||
|
@ -55,6 +55,15 @@
|
||||
* 64bit interface.
|
||||
*/
|
||||
|
||||
static bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *params)
|
||||
{
|
||||
WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
|
||||
print_sys_reg_instr(params);
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
|
||||
static u32 cache_levels;
|
||||
|
||||
@ -460,35 +469,35 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
||||
}
|
||||
|
||||
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
|
||||
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
||||
{
|
||||
u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
||||
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
|
||||
|
||||
return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
|
||||
if (!enabled)
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
||||
return !enabled;
|
||||
}
|
||||
|
||||
static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
|
||||
}
|
||||
|
||||
static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
||||
|
||||
return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
|
||||
|| vcpu_mode_priv(vcpu));
|
||||
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
|
||||
}
|
||||
|
||||
static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
||||
|
||||
return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
|
||||
|| vcpu_mode_priv(vcpu));
|
||||
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
|
||||
}
|
||||
|
||||
static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
||||
|
||||
return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
|
||||
|| vcpu_mode_priv(vcpu));
|
||||
return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
|
||||
}
|
||||
|
||||
static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
@ -567,8 +576,10 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
|
||||
|
||||
pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
|
||||
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
|
||||
if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -707,8 +718,10 @@ static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
if (!kvm_arm_pmu_v3_ready(vcpu))
|
||||
return trap_raz_wi(vcpu, p, r);
|
||||
|
||||
if (!vcpu_mode_priv(vcpu))
|
||||
if (!vcpu_mode_priv(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (p->is_write) {
|
||||
u64 val = p->regval & mask;
|
||||
@ -759,16 +772,15 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
if (!kvm_arm_pmu_v3_ready(vcpu))
|
||||
return trap_raz_wi(vcpu, p, r);
|
||||
|
||||
if (!p->is_write)
|
||||
return read_from_write_only(vcpu, p);
|
||||
|
||||
if (pmu_write_swinc_el0_disabled(vcpu))
|
||||
return false;
|
||||
|
||||
if (p->is_write) {
|
||||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
kvm_pmu_software_increment(vcpu, p->regval & mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
mask = kvm_pmu_valid_counter_mask(vcpu);
|
||||
kvm_pmu_software_increment(vcpu, p->regval & mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
@ -778,8 +790,10 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
return trap_raz_wi(vcpu, p, r);
|
||||
|
||||
if (p->is_write) {
|
||||
if (!vcpu_mode_priv(vcpu))
|
||||
if (!vcpu_mode_priv(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
|
||||
& ARMV8_PMU_USERENR_MASK;
|
||||
@ -793,31 +807,23 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
/* DBGBVRn_EL1 */ \
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
|
||||
/* DBGBCRn_EL1 */ \
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
|
||||
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
|
||||
trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
|
||||
/* DBGWVRn_EL1 */ \
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
|
||||
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
|
||||
trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
|
||||
/* DBGWCRn_EL1 */ \
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
|
||||
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
|
||||
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
/* PMEVCNTRn_EL0 */ \
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), \
|
||||
CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
|
||||
{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
|
||||
access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/* Macro to expand the PMEVTYPERn_EL0 register */
|
||||
#define PMU_PMEVTYPER_EL0(n) \
|
||||
/* PMEVTYPERn_EL0 */ \
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), \
|
||||
CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
|
||||
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
|
||||
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
static bool access_cntp_tval(struct kvm_vcpu *vcpu,
|
||||
@ -887,24 +893,14 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu,
|
||||
* more demanding guest...
|
||||
*/
|
||||
static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
/* DC ISW */
|
||||
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
|
||||
access_dcsw },
|
||||
/* DC CSW */
|
||||
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
|
||||
access_dcsw },
|
||||
/* DC CISW */
|
||||
{ Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
|
||||
access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(0),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(1),
|
||||
/* MDCCINT_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
|
||||
trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
|
||||
/* MDSCR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
|
||||
trap_debug_regs, reset_val, MDSCR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
|
||||
{ SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(2),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(3),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(4),
|
||||
@ -920,179 +916,77 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(14),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(15),
|
||||
|
||||
/* MDRAR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
/* OSLAR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
|
||||
trap_raz_wi },
|
||||
/* OSLSR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
|
||||
trap_oslsr_el1 },
|
||||
/* OSDLR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
|
||||
trap_raz_wi },
|
||||
/* DBGPRCR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
|
||||
trap_raz_wi },
|
||||
/* DBGCLAIMSET_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
|
||||
trap_raz_wi },
|
||||
/* DBGCLAIMCLR_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
|
||||
trap_raz_wi },
|
||||
/* DBGAUTHSTATUS_EL1 */
|
||||
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
|
||||
trap_dbgauthstatus_el1 },
|
||||
{ SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
|
||||
{ SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
|
||||
|
||||
/* MDCCSR_EL1 */
|
||||
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
/* DBGDTR_EL0 */
|
||||
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
/* DBGDTR[TR]X_EL0 */
|
||||
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
|
||||
trap_raz_wi },
|
||||
{ SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
|
||||
{ SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
|
||||
// DBGDTR[TR]X_EL0 share the same encoding
|
||||
{ SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
|
||||
|
||||
/* DBGVCR32_EL2 */
|
||||
{ Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
|
||||
NULL, reset_val, DBGVCR32_EL2, 0 },
|
||||
{ SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
|
||||
|
||||
/* MPIDR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
|
||||
NULL, reset_mpidr, MPIDR_EL1 },
|
||||
/* SCTLR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
|
||||
access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
|
||||
/* CPACR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
|
||||
NULL, reset_val, CPACR_EL1, 0 },
|
||||
/* TTBR0_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
|
||||
access_vm_reg, reset_unknown, TTBR0_EL1 },
|
||||
/* TTBR1_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
|
||||
access_vm_reg, reset_unknown, TTBR1_EL1 },
|
||||
/* TCR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
|
||||
access_vm_reg, reset_val, TCR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
|
||||
{ SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
|
||||
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
|
||||
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
|
||||
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
|
||||
|
||||
/* AFSR0_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
|
||||
access_vm_reg, reset_unknown, AFSR0_EL1 },
|
||||
/* AFSR1_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
|
||||
access_vm_reg, reset_unknown, AFSR1_EL1 },
|
||||
/* ESR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
|
||||
access_vm_reg, reset_unknown, ESR_EL1 },
|
||||
/* FAR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
|
||||
access_vm_reg, reset_unknown, FAR_EL1 },
|
||||
/* PAR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
|
||||
NULL, reset_unknown, PAR_EL1 },
|
||||
{ SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
|
||||
{ SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
|
||||
{ SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
|
||||
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
|
||||
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
|
||||
|
||||
/* PMINTENSET_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
|
||||
access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
/* PMINTENCLR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
|
||||
access_pminten, NULL, PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
|
||||
|
||||
/* MAIR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
|
||||
access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
/* AMAIR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
|
||||
access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
|
||||
/* VBAR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_val, VBAR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
|
||||
|
||||
/* ICC_SGI1R_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101),
|
||||
access_gic_sgi },
|
||||
/* ICC_SRE_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
|
||||
access_gic_sre },
|
||||
{ SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
|
||||
{ SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
|
||||
|
||||
/* CONTEXTIDR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
|
||||
access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
||||
/* TPIDR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
|
||||
NULL, reset_unknown, TPIDR_EL1 },
|
||||
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
|
||||
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
|
||||
|
||||
/* CNTKCTL_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
|
||||
NULL, reset_val, CNTKCTL_EL1, 0},
|
||||
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
|
||||
|
||||
/* CSSELR_EL1 */
|
||||
{ Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
|
||||
|
||||
/* PMCR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
|
||||
access_pmcr, reset_pmcr, },
|
||||
/* PMCNTENSET_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
|
||||
access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
/* PMCNTENCLR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
|
||||
access_pmcnten, NULL, PMCNTENSET_EL0 },
|
||||
/* PMOVSCLR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
|
||||
access_pmovs, NULL, PMOVSSET_EL0 },
|
||||
/* PMSWINC_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
|
||||
access_pmswinc, reset_unknown, PMSWINC_EL0 },
|
||||
/* PMSELR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
|
||||
access_pmselr, reset_unknown, PMSELR_EL0 },
|
||||
/* PMCEID0_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
|
||||
access_pmceid },
|
||||
/* PMCEID1_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
|
||||
access_pmceid },
|
||||
/* PMCCNTR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
|
||||
access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
|
||||
/* PMXEVTYPER_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
|
||||
access_pmu_evtyper },
|
||||
/* PMXEVCNTR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
|
||||
access_pmu_evcntr },
|
||||
/* PMUSERENR_EL0
|
||||
* This register resets as unknown in 64bit mode while it resets as zero
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
|
||||
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
|
||||
{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
|
||||
{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
|
||||
{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
|
||||
{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
|
||||
{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
|
||||
/*
|
||||
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
|
||||
access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
|
||||
/* PMOVSSET_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
|
||||
access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
|
||||
{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
|
||||
/* TPIDR_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
|
||||
NULL, reset_unknown, TPIDR_EL0 },
|
||||
/* TPIDRRO_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
|
||||
NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
|
||||
/* CNTP_TVAL_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b000),
|
||||
access_cntp_tval },
|
||||
/* CNTP_CTL_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b001),
|
||||
access_cntp_ctl },
|
||||
/* CNTP_CVAL_EL0 */
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b0010), Op2(0b010),
|
||||
access_cntp_cval },
|
||||
{ SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
|
||||
{ SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
|
||||
{ SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
|
||||
|
||||
/* PMEVCNTRn_EL0 */
|
||||
PMU_PMEVCNTR_EL0(0),
|
||||
@ -1158,22 +1052,15 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
PMU_PMEVTYPER_EL0(28),
|
||||
PMU_PMEVTYPER_EL0(29),
|
||||
PMU_PMEVTYPER_EL0(30),
|
||||
/* PMCCFILTR_EL0
|
||||
* This register resets as unknown in 64bit mode while it resets as zero
|
||||
/*
|
||||
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
|
||||
access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
|
||||
{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
|
||||
|
||||
/* DACR32_EL2 */
|
||||
{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
|
||||
NULL, reset_unknown, DACR32_EL2 },
|
||||
/* IFSR32_EL2 */
|
||||
{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
|
||||
NULL, reset_unknown, IFSR32_EL2 },
|
||||
/* FPEXC32_EL2 */
|
||||
{ Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
|
||||
NULL, reset_val, FPEXC32_EL2, 0x70 },
|
||||
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
|
||||
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
|
||||
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
|
||||
};
|
||||
|
||||
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
||||
@ -1557,6 +1444,22 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void perform_access(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
/*
|
||||
* Not having an accessor means that we have configured a trap
|
||||
* that we don't know how to handle. This certainly qualifies
|
||||
* as a gross bug that should be fixed right away.
|
||||
*/
|
||||
BUG_ON(!r->access);
|
||||
|
||||
/* Skip instruction if instructed so */
|
||||
if (likely(r->access(vcpu, params, r)))
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
}
|
||||
|
||||
/*
|
||||
* emulate_cp -- tries to match a sys_reg access in a handling table, and
|
||||
* call the corresponding trap handler.
|
||||
@ -1580,20 +1483,8 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
|
||||
r = find_reg(params, table, num);
|
||||
|
||||
if (r) {
|
||||
/*
|
||||
* Not having an accessor means that we have
|
||||
* configured a trap that we don't know how to
|
||||
* handle. This certainly qualifies as a gross bug
|
||||
* that should be fixed right away.
|
||||
*/
|
||||
BUG_ON(!r->access);
|
||||
|
||||
if (likely(r->access(vcpu, params, r))) {
|
||||
/* Skip instruction, since it was emulated */
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
/* Handled */
|
||||
return 0;
|
||||
}
|
||||
perform_access(vcpu, params, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Not handled */
|
||||
@ -1660,20 +1551,25 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
||||
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
|
||||
}
|
||||
|
||||
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
|
||||
goto out;
|
||||
if (!emulate_cp(vcpu, ¶ms, global, nr_global))
|
||||
goto out;
|
||||
/*
|
||||
* Try to emulate the coprocessor access using the target
|
||||
* specific table first, and using the global table afterwards.
|
||||
* If either of the tables contains a handler, handle the
|
||||
* potential register operation in the case of a read and return
|
||||
* with success.
|
||||
*/
|
||||
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
|
||||
!emulate_cp(vcpu, ¶ms, global, nr_global)) {
|
||||
/* Split up the value between registers for the read side */
|
||||
if (!params.is_write) {
|
||||
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
|
||||
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
|
||||
}
|
||||
|
||||
unhandled_cp_access(vcpu, ¶ms);
|
||||
|
||||
out:
|
||||
/* Split up the value between registers for the read side */
|
||||
if (!params.is_write) {
|
||||
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
|
||||
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
|
||||
return 1;
|
||||
}
|
||||
|
||||
unhandled_cp_access(vcpu, ¶ms);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1763,26 +1659,13 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||
r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
|
||||
|
||||
if (likely(r)) {
|
||||
/*
|
||||
* Not having an accessor means that we have
|
||||
* configured a trap that we don't know how to
|
||||
* handle. This certainly qualifies as a gross bug
|
||||
* that should be fixed right away.
|
||||
*/
|
||||
BUG_ON(!r->access);
|
||||
|
||||
if (likely(r->access(vcpu, params, r))) {
|
||||
/* Skip instruction, since it was emulated */
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
/* If access function fails, it should complain. */
|
||||
perform_access(vcpu, params, r);
|
||||
} else {
|
||||
kvm_err("Unsupported guest sys_reg access at: %lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_sys_reg_instr(params);
|
||||
kvm_inject_undefined(vcpu);
|
||||
}
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1932,44 +1815,25 @@ FUNCTION_INVARIANT(aidr_el1)
|
||||
|
||||
/* ->val is filled in by kvm_sys_reg_table_init() */
|
||||
static struct sys_reg_desc invariant_sys_regs[] = {
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
|
||||
NULL, get_midr_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
|
||||
NULL, get_revidr_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
|
||||
NULL, get_id_pfr0_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
|
||||
NULL, get_id_pfr1_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
|
||||
NULL, get_id_dfr0_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
|
||||
NULL, get_id_afr0_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
|
||||
NULL, get_id_mmfr0_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
|
||||
NULL, get_id_mmfr1_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
|
||||
NULL, get_id_mmfr2_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
|
||||
NULL, get_id_mmfr3_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
|
||||
NULL, get_id_isar0_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
|
||||
NULL, get_id_isar1_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
|
||||
NULL, get_id_isar2_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
|
||||
NULL, get_id_isar3_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
|
||||
NULL, get_id_isar4_el1 },
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
|
||||
NULL, get_id_isar5_el1 },
|
||||
{ Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
|
||||
NULL, get_clidr_el1 },
|
||||
{ Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
|
||||
NULL, get_aidr_el1 },
|
||||
{ Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
|
||||
NULL, get_ctr_el0 },
|
||||
{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
|
||||
{ SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
|
||||
{ SYS_DESC(SYS_ID_PFR0_EL1), NULL, get_id_pfr0_el1 },
|
||||
{ SYS_DESC(SYS_ID_PFR1_EL1), NULL, get_id_pfr1_el1 },
|
||||
{ SYS_DESC(SYS_ID_DFR0_EL1), NULL, get_id_dfr0_el1 },
|
||||
{ SYS_DESC(SYS_ID_AFR0_EL1), NULL, get_id_afr0_el1 },
|
||||
{ SYS_DESC(SYS_ID_MMFR0_EL1), NULL, get_id_mmfr0_el1 },
|
||||
{ SYS_DESC(SYS_ID_MMFR1_EL1), NULL, get_id_mmfr1_el1 },
|
||||
{ SYS_DESC(SYS_ID_MMFR2_EL1), NULL, get_id_mmfr2_el1 },
|
||||
{ SYS_DESC(SYS_ID_MMFR3_EL1), NULL, get_id_mmfr3_el1 },
|
||||
{ SYS_DESC(SYS_ID_ISAR0_EL1), NULL, get_id_isar0_el1 },
|
||||
{ SYS_DESC(SYS_ID_ISAR1_EL1), NULL, get_id_isar1_el1 },
|
||||
{ SYS_DESC(SYS_ID_ISAR2_EL1), NULL, get_id_isar2_el1 },
|
||||
{ SYS_DESC(SYS_ID_ISAR3_EL1), NULL, get_id_isar3_el1 },
|
||||
{ SYS_DESC(SYS_ID_ISAR4_EL1), NULL, get_id_isar4_el1 },
|
||||
{ SYS_DESC(SYS_ID_ISAR5_EL1), NULL, get_id_isar5_el1 },
|
||||
{ SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
|
||||
{ SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
|
||||
};
|
||||
|
||||
static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
|
||||
|
@ -83,24 +83,6 @@ static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *params)
|
||||
{
|
||||
kvm_debug("sys_reg write to read-only register at: %lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_sys_reg_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_params *params)
|
||||
{
|
||||
kvm_debug("sys_reg read to write-only register at: %lx\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_sys_reg_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Reset functions */
|
||||
static inline void reset_unknown(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
@ -147,4 +129,9 @@ const struct sys_reg_desc *find_reg_by_id(u64 id,
|
||||
#define CRm(_x) .CRm = _x
|
||||
#define Op2(_x) .Op2 = _x
|
||||
|
||||
#define SYS_DESC(reg) \
|
||||
Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \
|
||||
CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
|
||||
Op2(sys_reg_Op2(reg))
|
||||
|
||||
#endif /* __ARM64_KVM_SYS_REGS_LOCAL_H__ */
|
||||
|
@ -52,9 +52,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
*/
|
||||
static const struct sys_reg_desc genericv8_sys_regs[] = {
|
||||
/* ACTLR_EL1 */
|
||||
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b001),
|
||||
access_actlr, reset_actlr, ACTLR_EL1 },
|
||||
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
|
||||
};
|
||||
|
||||
static const struct sys_reg_desc genericv8_cp15_regs[] = {
|
||||
|
@ -1686,6 +1686,7 @@ config CPU_CAVIUM_OCTEON
|
||||
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
|
||||
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
|
||||
select MIPS_L1_CACHE_SHIFT_7
|
||||
select HAVE_KVM
|
||||
help
|
||||
The Cavium Octeon processor is a highly integrated chip containing
|
||||
many ethernet hardware widgets for networking tasks. The processor
|
||||
|
@ -444,6 +444,10 @@
|
||||
# define cpu_has_msa 0
|
||||
#endif
|
||||
|
||||
#ifndef cpu_has_ufr
|
||||
# define cpu_has_ufr (cpu_data[0].options & MIPS_CPU_UFR)
|
||||
#endif
|
||||
|
||||
#ifndef cpu_has_fre
|
||||
# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
|
||||
#endif
|
||||
@ -528,6 +532,9 @@
|
||||
#ifndef cpu_guest_has_htw
|
||||
#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
|
||||
#endif
|
||||
#ifndef cpu_guest_has_mvh
|
||||
#define cpu_guest_has_mvh (cpu_data[0].guest.options & MIPS_CPU_MVH)
|
||||
#endif
|
||||
#ifndef cpu_guest_has_msa
|
||||
#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
|
||||
#endif
|
||||
@ -543,6 +550,9 @@
|
||||
#ifndef cpu_guest_has_maar
|
||||
#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
|
||||
#endif
|
||||
#ifndef cpu_guest_has_userlocal
|
||||
#define cpu_guest_has_userlocal (cpu_data[0].guest.options & MIPS_CPU_ULRI)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Guest dynamic capabilities
|
||||
|
@ -33,6 +33,7 @@ struct guest_info {
|
||||
unsigned long ases_dyn;
|
||||
unsigned long long options;
|
||||
unsigned long long options_dyn;
|
||||
int tlbsize;
|
||||
u8 conf;
|
||||
u8 kscratch_mask;
|
||||
};
|
||||
@ -109,6 +110,7 @@ struct cpuinfo_mips {
|
||||
struct guest_info guest;
|
||||
unsigned int gtoffset_mask;
|
||||
unsigned int guestid_mask;
|
||||
unsigned int guestid_cache;
|
||||
} __attribute__((aligned(SMP_CACHE_BYTES)));
|
||||
|
||||
extern struct cpuinfo_mips cpu_data[];
|
||||
|
@ -415,6 +415,7 @@ enum cpu_type_enum {
|
||||
#define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */
|
||||
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
|
||||
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
|
||||
#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
|
||||
|
||||
/*
|
||||
* CPU ASE encodings
|
||||
|
@ -10,6 +10,7 @@
|
||||
#ifndef __MIPS_KVM_HOST_H__
|
||||
#define __MIPS_KVM_HOST_H__
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -33,12 +34,23 @@
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
|
||||
#define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
|
||||
#define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
|
||||
#define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
|
||||
#define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
|
||||
#define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
|
||||
#define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
|
||||
#define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
|
||||
#define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
|
||||
#define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
|
||||
#define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
|
||||
#define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
|
||||
#define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
|
||||
#define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
|
||||
#define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
|
||||
#define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
|
||||
#define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
|
||||
#define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
|
||||
#define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
|
||||
#define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
|
||||
@ -55,6 +67,7 @@
|
||||
#define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
|
||||
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
||||
#define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
||||
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
|
||||
@ -70,9 +83,13 @@
|
||||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 0
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
extern unsigned long GUESTID_MASK;
|
||||
extern unsigned long GUESTID_FIRST_VERSION;
|
||||
extern unsigned long GUESTID_VERSION_MASK;
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
@ -145,6 +162,16 @@ struct kvm_vcpu_stat {
|
||||
u64 fpe_exits;
|
||||
u64 msa_disabled_exits;
|
||||
u64 flush_dcache_exits;
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
u64 vz_gpsi_exits;
|
||||
u64 vz_gsfc_exits;
|
||||
u64 vz_hc_exits;
|
||||
u64 vz_grr_exits;
|
||||
u64 vz_gva_exits;
|
||||
u64 vz_ghfc_exits;
|
||||
u64 vz_gpa_exits;
|
||||
u64 vz_resvd_exits;
|
||||
#endif
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_invalid;
|
||||
@ -157,6 +184,8 @@ struct kvm_arch_memory_slot {
|
||||
struct kvm_arch {
|
||||
/* Guest physical mm */
|
||||
struct mm_struct gpa_mm;
|
||||
/* Mask of CPUs needing GPA ASID flush */
|
||||
cpumask_t asid_flush_mask;
|
||||
};
|
||||
|
||||
#define N_MIPS_COPROC_REGS 32
|
||||
@ -214,6 +243,11 @@ struct mips_coproc {
|
||||
#define MIPS_CP0_CONFIG4_SEL 4
|
||||
#define MIPS_CP0_CONFIG5_SEL 5
|
||||
|
||||
#define MIPS_CP0_GUESTCTL2 10
|
||||
#define MIPS_CP0_GUESTCTL2_SEL 5
|
||||
#define MIPS_CP0_GTOFFSET 12
|
||||
#define MIPS_CP0_GTOFFSET_SEL 7
|
||||
|
||||
/* Resume Flags */
|
||||
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
@ -229,6 +263,7 @@ enum emulation_result {
|
||||
EMULATE_WAIT, /* WAIT instruction */
|
||||
EMULATE_PRIV_FAIL,
|
||||
EMULATE_EXCEPT, /* A guest exception has been generated */
|
||||
EMULATE_HYPERCALL, /* HYPCALL instruction */
|
||||
};
|
||||
|
||||
#define mips3_paddr_to_tlbpfn(x) \
|
||||
@ -276,13 +311,18 @@ struct kvm_mmu_memory_cache {
|
||||
struct kvm_vcpu_arch {
|
||||
void *guest_ebase;
|
||||
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Host registers preserved across guest mode execution */
|
||||
unsigned long host_stack;
|
||||
unsigned long host_gp;
|
||||
unsigned long host_pgd;
|
||||
unsigned long host_entryhi;
|
||||
|
||||
/* Host CP0 registers used when handling exits from guest */
|
||||
unsigned long host_cp0_badvaddr;
|
||||
unsigned long host_cp0_epc;
|
||||
u32 host_cp0_cause;
|
||||
u32 host_cp0_guestctl0;
|
||||
u32 host_cp0_badinstr;
|
||||
u32 host_cp0_badinstrp;
|
||||
|
||||
@ -340,7 +380,23 @@ struct kvm_vcpu_arch {
|
||||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
/* vcpu's vzguestid is different on each host cpu in an smp system */
|
||||
u32 vzguestid[NR_CPUS];
|
||||
|
||||
/* wired guest TLB entries */
|
||||
struct kvm_mips_tlb *wired_tlb;
|
||||
unsigned int wired_tlb_limit;
|
||||
unsigned int wired_tlb_used;
|
||||
|
||||
/* emulated guest MAAR registers */
|
||||
unsigned long maar[6];
|
||||
#endif
|
||||
|
||||
/* Last CPU the VCPU state was loaded on */
|
||||
int last_sched_cpu;
|
||||
/* Last CPU the VCPU actually executed guest code on */
|
||||
int last_exec_cpu;
|
||||
|
||||
/* WAIT executed */
|
||||
int wait;
|
||||
@ -349,78 +405,6 @@ struct kvm_vcpu_arch {
|
||||
u8 msa_enabled;
|
||||
};
|
||||
|
||||
|
||||
#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
|
||||
#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
|
||||
#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
|
||||
#define kvm_write_c0_guest_entrylo0(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO0][0] = (val))
|
||||
#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
|
||||
#define kvm_write_c0_guest_entrylo1(cop0, val) (cop0->reg[MIPS_CP0_TLB_LO1][0] = (val))
|
||||
#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
|
||||
#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
|
||||
#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
|
||||
#define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
|
||||
#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
|
||||
#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
|
||||
#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
|
||||
#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
|
||||
#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
|
||||
#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
|
||||
#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
|
||||
#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
|
||||
#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
|
||||
#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
|
||||
#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
|
||||
#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
|
||||
#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
|
||||
#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
|
||||
#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
|
||||
#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
|
||||
#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
|
||||
#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
|
||||
#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
|
||||
#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
|
||||
#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
|
||||
#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
|
||||
#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
|
||||
#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
|
||||
#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
|
||||
#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
|
||||
#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
|
||||
#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
|
||||
#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
|
||||
#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
|
||||
#define kvm_read_c0_guest_config4(cop0) (cop0->reg[MIPS_CP0_CONFIG][4])
|
||||
#define kvm_read_c0_guest_config5(cop0) (cop0->reg[MIPS_CP0_CONFIG][5])
|
||||
#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
|
||||
#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
|
||||
#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
|
||||
#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
|
||||
#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
|
||||
#define kvm_write_c0_guest_config4(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][4] = (val))
|
||||
#define kvm_write_c0_guest_config5(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][5] = (val))
|
||||
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
|
||||
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
|
||||
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
|
||||
#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
|
||||
#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
|
||||
#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
|
||||
#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
|
||||
#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
|
||||
#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
|
||||
#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
|
||||
#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
|
||||
#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
|
||||
#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
|
||||
#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
|
||||
#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
|
||||
|
||||
/*
|
||||
* Some of the guest registers may be modified asynchronously (e.g. from a
|
||||
* hrtimer callback in hard irq context) and therefore need stronger atomicity
|
||||
* guarantees than other registers.
|
||||
*/
|
||||
|
||||
static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
|
||||
unsigned long val)
|
||||
{
|
||||
@ -471,26 +455,286 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
||||
} while (unlikely(!temp));
|
||||
}
|
||||
|
||||
#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
|
||||
#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
|
||||
/* Guest register types, used in accessor build below */
|
||||
#define __KVMT32 u32
|
||||
#define __KVMTl unsigned long
|
||||
|
||||
/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
||||
#define kvm_set_c0_guest_cause(cop0, val) \
|
||||
_kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
||||
#define kvm_clear_c0_guest_cause(cop0, val) \
|
||||
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
|
||||
#define kvm_change_c0_guest_cause(cop0, change, val) \
|
||||
_kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
|
||||
change, val)
|
||||
/*
|
||||
* __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
|
||||
* These operate on the saved guest C0 state in RAM.
|
||||
*/
|
||||
|
||||
#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
|
||||
#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
|
||||
#define kvm_change_c0_guest_ebase(cop0, change, val) \
|
||||
/* Generate saved context simple accessors */
|
||||
#define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
||||
static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
kvm_clear_c0_guest_ebase(cop0, change); \
|
||||
kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
|
||||
return cop0->reg[(_reg)][(sel)]; \
|
||||
} \
|
||||
static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] = val; \
|
||||
}
|
||||
|
||||
/* Generate saved context bitwise modifiers */
|
||||
#define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
||||
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] |= val; \
|
||||
} \
|
||||
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] &= ~val; \
|
||||
} \
|
||||
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
unsigned long _mask = mask; \
|
||||
cop0->reg[(_reg)][(sel)] &= ~_mask; \
|
||||
cop0->reg[(_reg)][(sel)] |= val & _mask; \
|
||||
}
|
||||
|
||||
/* Generate saved context atomic bitwise modifiers */
|
||||
#define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
||||
static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
||||
} \
|
||||
static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
|
||||
} \
|
||||
static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
|
||||
val); \
|
||||
}
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
|
||||
* These operate on the VZ guest C0 context in hardware.
|
||||
*/
|
||||
|
||||
/* Generate VZ guest context simple accessors */
|
||||
#define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
||||
static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
return read_gc0_##name(); \
|
||||
} \
|
||||
static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
write_gc0_##name(val); \
|
||||
}
|
||||
|
||||
/* Generate VZ guest context bitwise modifiers */
|
||||
#define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
||||
static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
set_gc0_##name(val); \
|
||||
} \
|
||||
static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
clear_gc0_##name(val); \
|
||||
} \
|
||||
static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
change_gc0_##name(mask, val); \
|
||||
}
|
||||
|
||||
/* Generate VZ guest context save/restore to/from saved context */
|
||||
#define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
|
||||
static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
|
||||
} \
|
||||
static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
|
||||
}
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
|
||||
* These wrap a set of operations to provide them with a different name.
|
||||
*/
|
||||
|
||||
/* Generate simple accessor wrapper */
|
||||
#define __BUILD_KVM_RW_WRAP(name1, name2, type) \
|
||||
static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
|
||||
{ \
|
||||
return kvm_read_##name2(cop0); \
|
||||
} \
|
||||
static inline void kvm_write_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_write_##name2(cop0, val); \
|
||||
}
|
||||
|
||||
/* Generate bitwise modifier wrapper */
|
||||
#define __BUILD_KVM_SET_WRAP(name1, name2, type) \
|
||||
static inline void kvm_set_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_set_##name2(cop0, val); \
|
||||
} \
|
||||
static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_clear_##name2(cop0, val); \
|
||||
} \
|
||||
static inline void kvm_change_##name1(struct mips_coproc *cop0, \
|
||||
__KVMT##type mask, \
|
||||
__KVMT##type val) \
|
||||
{ \
|
||||
kvm_change_##name2(cop0, mask, val); \
|
||||
}
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
|
||||
* These generate accessors operating on the saved context in RAM, and wrap them
|
||||
* with the common guest C0 accessors (for use by common emulation code).
|
||||
*/
|
||||
|
||||
#define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
||||
|
||||
#define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
||||
|
||||
#define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
|
||||
|
||||
#ifndef CONFIG_KVM_MIPS_VZ
|
||||
|
||||
/*
|
||||
* T&E (trap & emulate software based virtualisation)
|
||||
* We generate the common accessors operating exclusively on the saved context
|
||||
* in RAM.
|
||||
*/
|
||||
|
||||
#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
|
||||
#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
|
||||
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* VZ (hardware assisted virtualisation)
|
||||
* These macros use the active guest state in VZ mode (hardware registers),
|
||||
*/
|
||||
|
||||
/*
|
||||
* __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
|
||||
* These generate accessors operating on the VZ guest context in hardware, and
|
||||
* wrap them with the common guest C0 accessors (for use by common emulation
|
||||
* code).
|
||||
*
|
||||
* Accessors operating on the saved context in RAM are also generated to allow
|
||||
* convenient explicit saving and restoring of the state.
|
||||
*/
|
||||
|
||||
#define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_VZ(name, type, _reg, sel) \
|
||||
__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
|
||||
__BUILD_KVM_SAVE_VZ(name, _reg, sel)
|
||||
|
||||
#define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_VZ(name, type, _reg, sel) \
|
||||
__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
|
||||
|
||||
/*
|
||||
* We can't do atomic modifications of COP0 state if hardware can modify it.
|
||||
* Races must be handled explicitly.
|
||||
*/
|
||||
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define accessors for CP0 registers that are accessible to the guest. These
|
||||
* are primarily used by common emulation code, which may need to access the
|
||||
* registers differently depending on the implementation.
|
||||
*
|
||||
* fns_hw/sw name type reg num select
|
||||
*/
|
||||
__BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
|
||||
__BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
|
||||
__BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
|
||||
__BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
|
||||
__BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
|
||||
__BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
|
||||
__BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
|
||||
__BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
|
||||
__BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
|
||||
__BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
|
||||
__BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
|
||||
__BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
|
||||
__BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
|
||||
__BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
|
||||
__BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
|
||||
__BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
|
||||
__BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
|
||||
__BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
|
||||
__BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
|
||||
__BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
|
||||
__BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
|
||||
__BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
|
||||
__BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
|
||||
__BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
|
||||
__BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
|
||||
__BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
|
||||
__BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
||||
__BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
|
||||
__BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
|
||||
__BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
|
||||
__BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
|
||||
__BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
|
||||
__BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
|
||||
__BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
|
||||
__BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
|
||||
__BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
|
||||
__BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
|
||||
__BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
|
||||
__BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
|
||||
__BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
|
||||
__BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
|
||||
__BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
|
||||
__BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
|
||||
__BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
|
||||
__BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
|
||||
__BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
|
||||
__BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
|
||||
|
||||
/* Bitwise operations (on HW state) */
|
||||
__BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
|
||||
/* Cause can be modified asynchronously from hardirq hrtimer callback */
|
||||
__BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
|
||||
__BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
|
||||
|
||||
/* Bitwise operations (on saved state) */
|
||||
__BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
|
||||
__BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
|
||||
__BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
|
||||
__BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
|
||||
__BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
|
||||
__BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
|
||||
|
||||
/* Helpers */
|
||||
|
||||
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|
||||
@ -531,6 +775,10 @@ struct kvm_mips_callbacks {
|
||||
int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_fpe)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
|
||||
int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
|
||||
int (*hardware_enable)(void);
|
||||
void (*hardware_disable)(void);
|
||||
int (*check_extension)(struct kvm *kvm, long ext);
|
||||
int (*vcpu_init)(struct kvm_vcpu *vcpu);
|
||||
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
|
||||
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
|
||||
@ -599,6 +847,10 @@ u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
|
||||
|
||||
u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu, bool write_fault);
|
||||
#endif
|
||||
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
|
||||
struct kvm_vcpu *vcpu,
|
||||
bool write_fault);
|
||||
@ -625,6 +877,18 @@ extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
||||
extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
|
||||
unsigned long entryhi);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
|
||||
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa);
|
||||
void kvm_vz_local_flush_roottlb_all_guests(void);
|
||||
void kvm_vz_local_flush_guesttlb_all(void);
|
||||
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count);
|
||||
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count);
|
||||
#endif
|
||||
|
||||
void kvm_mips_suspend_mm(int cpu);
|
||||
void kvm_mips_resume_mm(int cpu);
|
||||
|
||||
@ -795,7 +1059,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
|
||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
|
||||
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
|
||||
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
|
||||
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
|
||||
@ -803,6 +1067,20 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
|
||||
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* fairly internal functions requiring some care to use */
|
||||
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
|
||||
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
|
||||
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
|
||||
u32 count, int min_drift);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
|
||||
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
|
||||
#else
|
||||
static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
|
||||
#endif
|
||||
|
||||
enum emulation_result kvm_mips_check_privilege(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
@ -827,11 +1105,20 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
/* COP0 */
|
||||
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
|
||||
|
||||
unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
|
||||
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Hypercalls (hypcall.c) */
|
||||
|
||||
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
|
||||
union mips_instruction inst);
|
||||
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Dynamic binary translation */
|
||||
extern int kvm_mips_trans_cache_index(union mips_instruction inst,
|
||||
u32 *opc, struct kvm_vcpu *vcpu);
|
||||
@ -846,7 +1133,6 @@ extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
|
||||
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
|
||||
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
|
||||
|
||||
static inline void kvm_arch_hardware_disable(void) {}
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
|
@ -36,7 +36,7 @@ unsigned platform_maar_init(unsigned num_pairs);
|
||||
* @upper: The highest address that the MAAR pair will affect. Must be
|
||||
* aligned to one byte before a 2^16 byte boundary.
|
||||
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
|
||||
* MIPS_MAAR_V attribute will automatically be set.
|
||||
* MIPS_MAAR_VL attribute will automatically be set.
|
||||
*
|
||||
* Program the pair of MAAR registers specified by idx to apply the attributes
|
||||
* specified by attrs to the range of addresses from lower to higher.
|
||||
@ -49,10 +49,10 @@ static inline void write_maar_pair(unsigned idx, phys_addr_t lower,
|
||||
BUG_ON(((upper & 0xffff) != 0xffff)
|
||||
|| ((upper & ~0xffffull) & ~(MIPS_MAAR_ADDR << 4)));
|
||||
|
||||
/* Automatically set MIPS_MAAR_V */
|
||||
attrs |= MIPS_MAAR_V;
|
||||
/* Automatically set MIPS_MAAR_VL */
|
||||
attrs |= MIPS_MAAR_VL;
|
||||
|
||||
/* Write the upper address & attributes (only MIPS_MAAR_V matters) */
|
||||
/* Write the upper address & attributes (only MIPS_MAAR_VL matters) */
|
||||
write_c0_maari(idx << 1);
|
||||
back_to_back_c0_hazard();
|
||||
write_c0_maar(((upper >> 4) & MIPS_MAAR_ADDR) | attrs);
|
||||
@ -81,7 +81,7 @@ extern void maar_init(void);
|
||||
* @upper: The highest address that the MAAR pair will affect. Must be
|
||||
* aligned to one byte before a 2^16 byte boundary.
|
||||
* @attrs: The accessibility attributes to program, eg. MIPS_MAAR_S. The
|
||||
* MIPS_MAAR_V attribute will automatically be set.
|
||||
* MIPS_MAAR_VL attribute will automatically be set.
|
||||
*
|
||||
* Describes the configuration of a pair of Memory Accessibility Attribute
|
||||
* Registers - applying attributes from attrs to the range of physical
|
||||
|
@ -34,8 +34,10 @@
|
||||
*/
|
||||
#ifdef __ASSEMBLY__
|
||||
#define _ULCAST_
|
||||
#define _U64CAST_
|
||||
#else
|
||||
#define _ULCAST_ (unsigned long)
|
||||
#define _U64CAST_ (u64)
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -217,8 +219,10 @@
|
||||
/*
|
||||
* Wired register bits
|
||||
*/
|
||||
#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16)
|
||||
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0)
|
||||
#define MIPSR6_WIRED_LIMIT_SHIFT 16
|
||||
#define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << MIPSR6_WIRED_LIMIT_SHIFT)
|
||||
#define MIPSR6_WIRED_WIRED_SHIFT 0
|
||||
#define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << MIPSR6_WIRED_WIRED_SHIFT)
|
||||
|
||||
/*
|
||||
* Values used for computation of new tlb entries
|
||||
@ -645,6 +649,7 @@
|
||||
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
|
||||
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
|
||||
#define MIPS_CONF5_VP (_ULCAST_(1) << 7)
|
||||
#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
|
||||
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
|
||||
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
|
||||
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
|
||||
@ -719,10 +724,14 @@
|
||||
#define XLR_PERFCTRL_ALLTHREADS (_ULCAST_(1) << 13)
|
||||
|
||||
/* MAAR bit definitions */
|
||||
#define MIPS_MAAR_VH (_U64CAST_(1) << 63)
|
||||
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
|
||||
#define MIPS_MAAR_ADDR_SHIFT 12
|
||||
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
|
||||
#define MIPS_MAAR_V (_ULCAST_(1) << 0)
|
||||
#define MIPS_MAAR_VL (_ULCAST_(1) << 0)
|
||||
|
||||
/* MAARI bit definitions */
|
||||
#define MIPS_MAARI_INDEX (_ULCAST_(0x3f) << 0)
|
||||
|
||||
/* EBase bit definitions */
|
||||
#define MIPS_EBASE_CPUNUM_SHIFT 0
|
||||
@ -736,6 +745,10 @@
|
||||
#define MIPS_CMGCRB_BASE 11
|
||||
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
|
||||
|
||||
/* LLAddr bit definitions */
|
||||
#define MIPS_LLADDR_LLB_SHIFT 0
|
||||
#define MIPS_LLADDR_LLB (_ULCAST_(1) << MIPS_LLADDR_LLB_SHIFT)
|
||||
|
||||
/*
|
||||
* Bits in the MIPS32 Memory Segmentation registers.
|
||||
*/
|
||||
@ -961,6 +974,22 @@
|
||||
/* Flush FTLB */
|
||||
#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
|
||||
|
||||
/* CvmCtl register field definitions */
|
||||
#define CVMCTL_IPPCI_SHIFT 7
|
||||
#define CVMCTL_IPPCI (_U64CAST_(0x7) << CVMCTL_IPPCI_SHIFT)
|
||||
#define CVMCTL_IPTI_SHIFT 4
|
||||
#define CVMCTL_IPTI (_U64CAST_(0x7) << CVMCTL_IPTI_SHIFT)
|
||||
|
||||
/* CvmMemCtl2 register field definitions */
|
||||
#define CVMMEMCTL2_INHIBITTS (_U64CAST_(1) << 17)
|
||||
|
||||
/* CvmVMConfig register field definitions */
|
||||
#define CVMVMCONF_DGHT (_U64CAST_(1) << 60)
|
||||
#define CVMVMCONF_MMUSIZEM1_S 12
|
||||
#define CVMVMCONF_MMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_MMUSIZEM1_S)
|
||||
#define CVMVMCONF_RMMUSIZEM1_S 0
|
||||
#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
|
||||
|
||||
/*
|
||||
* Coprocessor 1 (FPU) register names
|
||||
*/
|
||||
@ -1720,6 +1749,13 @@ do { \
|
||||
|
||||
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
|
||||
#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
|
||||
|
||||
#define read_c0_cvmmemctl2() __read_64bit_c0_register($16, 6)
|
||||
#define write_c0_cvmmemctl2(val) __write_64bit_c0_register($16, 6, val)
|
||||
|
||||
#define read_c0_cvmvmconfig() __read_64bit_c0_register($16, 7)
|
||||
#define write_c0_cvmvmconfig(val) __write_64bit_c0_register($16, 7, val)
|
||||
|
||||
/*
|
||||
* The cacheerr registers are not standardized. On OCTEON, they are
|
||||
* 64 bits wide.
|
||||
@ -1989,6 +2025,8 @@ do { \
|
||||
#define read_gc0_epc() __read_ulong_gc0_register(14, 0)
|
||||
#define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val)
|
||||
|
||||
#define read_gc0_prid() __read_32bit_gc0_register(15, 0)
|
||||
|
||||
#define read_gc0_ebase() __read_32bit_gc0_register(15, 1)
|
||||
#define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val)
|
||||
|
||||
@ -2012,6 +2050,9 @@ do { \
|
||||
#define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val)
|
||||
#define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val)
|
||||
|
||||
#define read_gc0_lladdr() __read_ulong_gc0_register(17, 0)
|
||||
#define write_gc0_lladdr(val) __write_ulong_gc0_register(17, 0, val)
|
||||
|
||||
#define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0)
|
||||
#define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1)
|
||||
#define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2)
|
||||
@ -2090,6 +2131,19 @@ do { \
|
||||
#define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val)
|
||||
#define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val)
|
||||
|
||||
/* Cavium OCTEON (cnMIPS) */
|
||||
#define read_gc0_cvmcount() __read_ulong_gc0_register(9, 6)
|
||||
#define write_gc0_cvmcount(val) __write_ulong_gc0_register(9, 6, val)
|
||||
|
||||
#define read_gc0_cvmctl() __read_64bit_gc0_register(9, 7)
|
||||
#define write_gc0_cvmctl(val) __write_64bit_gc0_register(9, 7, val)
|
||||
|
||||
#define read_gc0_cvmmemctl() __read_64bit_gc0_register(11, 7)
|
||||
#define write_gc0_cvmmemctl(val) __write_64bit_gc0_register(11, 7, val)
|
||||
|
||||
#define read_gc0_cvmmemctl2() __read_64bit_gc0_register(16, 6)
|
||||
#define write_gc0_cvmmemctl2(val) __write_64bit_gc0_register(16, 6, val)
|
||||
|
||||
/*
|
||||
* Macros to access the floating point coprocessor control registers
|
||||
*/
|
||||
@ -2696,9 +2750,11 @@ __BUILD_SET_C0(brcm_mode)
|
||||
*/
|
||||
#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
|
||||
|
||||
__BUILD_SET_GC0(wired)
|
||||
__BUILD_SET_GC0(status)
|
||||
__BUILD_SET_GC0(cause)
|
||||
__BUILD_SET_GC0(ebase)
|
||||
__BUILD_SET_GC0(config1)
|
||||
|
||||
/*
|
||||
* Return low 10 bits of ebase.
|
||||
|
@ -21,9 +21,11 @@
|
||||
*/
|
||||
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
|
||||
|
||||
#define UNIQUE_ENTRYHI(idx) \
|
||||
((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \
|
||||
#define _UNIQUE_ENTRYHI(base, idx) \
|
||||
(((base) + ((idx) << (PAGE_SHIFT + 1))) | \
|
||||
(cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0))
|
||||
#define UNIQUE_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG0, idx)
|
||||
#define UNIQUE_GUEST_ENTRYHI(idx) _UNIQUE_ENTRYHI(CKSEG1, idx)
|
||||
|
||||
static inline unsigned int num_wired_entries(void)
|
||||
{
|
||||
|
@ -179,7 +179,7 @@ enum cop0_coi_func {
|
||||
tlbr_op = 0x01, tlbwi_op = 0x02,
|
||||
tlbwr_op = 0x06, tlbp_op = 0x08,
|
||||
rfe_op = 0x10, eret_op = 0x18,
|
||||
wait_op = 0x20,
|
||||
wait_op = 0x20, hypcall_op = 0x28
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -21,6 +21,8 @@
|
||||
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
/*
|
||||
* for KVM_GET_REGS and KVM_SET_REGS
|
||||
*
|
||||
@ -54,9 +56,14 @@ struct kvm_fpu {
|
||||
* Register set = 0: GP registers from kvm_regs (see definitions below).
|
||||
*
|
||||
* Register set = 1: CP0 registers.
|
||||
* bits[15..8] - Must be zero.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
* bits[15..8] - COP0 register set.
|
||||
*
|
||||
* COP0 register set = 0: Main CP0 registers.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
*
|
||||
* COP0 register set = 1: MAARs.
|
||||
* bits[7..0] - MAAR index.
|
||||
*
|
||||
* Register set = 2: KVM specific registers (see definitions below).
|
||||
*
|
||||
@ -114,6 +121,15 @@ struct kvm_fpu {
|
||||
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_CP0 - Coprocessor 0 registers.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_MAAR (KVM_REG_MIPS_CP0 | (1 << 8))
|
||||
#define KVM_REG_MIPS_CP0_MAAR(n) (KVM_REG_MIPS_MAAR | \
|
||||
KVM_REG_SIZE_U64 | (n))
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_KVM - KVM specific control registers.
|
||||
*/
|
||||
|
@ -289,6 +289,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
|
||||
MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
|
||||
if (c->fpu_id & MIPS_FPIR_3D)
|
||||
c->ases |= MIPS_ASE_MIPS3D;
|
||||
if (c->fpu_id & MIPS_FPIR_UFRP)
|
||||
c->options |= MIPS_CPU_UFR;
|
||||
if (c->fpu_id & MIPS_FPIR_FREP)
|
||||
c->options |= MIPS_CPU_FRE;
|
||||
}
|
||||
@ -1003,7 +1005,8 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
|
||||
unsigned int config3, config3_dyn;
|
||||
|
||||
probe_gc0_config_dyn(config3, config3, config3_dyn,
|
||||
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC);
|
||||
MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
|
||||
MIPS_CONF3_CTXTC);
|
||||
|
||||
if (config3 & MIPS_CONF3_CTXTC)
|
||||
c->guest.options |= MIPS_CPU_CTXTC;
|
||||
@ -1013,6 +1016,9 @@ static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
|
||||
if (config3 & MIPS_CONF3_PW)
|
||||
c->guest.options |= MIPS_CPU_HTW;
|
||||
|
||||
if (config3 & MIPS_CONF3_ULRI)
|
||||
c->guest.options |= MIPS_CPU_ULRI;
|
||||
|
||||
if (config3 & MIPS_CONF3_SC)
|
||||
c->guest.options |= MIPS_CPU_SEGMENTS;
|
||||
|
||||
@ -1051,7 +1057,7 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
|
||||
unsigned int config5, config5_dyn;
|
||||
|
||||
probe_gc0_config_dyn(config5, config5, config5_dyn,
|
||||
MIPS_CONF_M | MIPS_CONF5_MRP);
|
||||
MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
|
||||
|
||||
if (config5 & MIPS_CONF5_MRP)
|
||||
c->guest.options |= MIPS_CPU_MAAR;
|
||||
@ -1061,6 +1067,9 @@ static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
|
||||
if (config5 & MIPS_CONF5_LLB)
|
||||
c->guest.options |= MIPS_CPU_RW_LLB;
|
||||
|
||||
if (config5 & MIPS_CONF5_MVH)
|
||||
c->guest.options |= MIPS_CPU_MVH;
|
||||
|
||||
if (config5 & MIPS_CONF_M)
|
||||
c->guest.conf |= BIT(6);
|
||||
return config5 & MIPS_CONF_M;
|
||||
|
@ -70,6 +70,7 @@ EXPORT_SYMBOL(perf_irq);
|
||||
*/
|
||||
|
||||
unsigned int mips_hpt_frequency;
|
||||
EXPORT_SYMBOL_GPL(mips_hpt_frequency);
|
||||
|
||||
/*
|
||||
* This function exists in order to cause an error due to a duplicate
|
||||
|
@ -26,11 +26,34 @@ config KVM
|
||||
select SRCU
|
||||
---help---
|
||||
Support for hosting Guest kernels.
|
||||
Currently supported on MIPS32 processors.
|
||||
|
||||
choice
|
||||
prompt "Virtualization mode"
|
||||
depends on KVM
|
||||
default KVM_MIPS_TE
|
||||
|
||||
config KVM_MIPS_TE
|
||||
bool "Trap & Emulate"
|
||||
---help---
|
||||
Use trap and emulate to virtualize 32-bit guests in user mode. This
|
||||
does not require any special hardware Virtualization support beyond
|
||||
standard MIPS32/64 r2 or later, but it does require the guest kernel
|
||||
to be configured with CONFIG_KVM_GUEST=y so that it resides in the
|
||||
user address segment.
|
||||
|
||||
config KVM_MIPS_VZ
|
||||
bool "MIPS Virtualization (VZ) ASE"
|
||||
---help---
|
||||
Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
|
||||
supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
|
||||
but requires hardware support.
|
||||
|
||||
endchoice
|
||||
|
||||
config KVM_MIPS_DYN_TRANS
|
||||
bool "KVM/MIPS: Dynamic binary translation to reduce traps"
|
||||
depends on KVM
|
||||
depends on KVM_MIPS_TE
|
||||
default y
|
||||
---help---
|
||||
When running in Trap & Emulate mode patch privileged
|
||||
instructions to reduce the number of traps.
|
||||
|
@ -9,8 +9,15 @@ common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
|
||||
|
||||
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
|
||||
interrupt.o stats.o commpage.o \
|
||||
dyntrans.o trap_emul.o fpu.o
|
||||
fpu.o
|
||||
kvm-objs += hypcall.o
|
||||
kvm-objs += mmu.o
|
||||
|
||||
ifdef CONFIG_KVM_MIPS_VZ
|
||||
kvm-objs += vz.o
|
||||
else
|
||||
kvm-objs += dyntrans.o
|
||||
kvm-objs += trap_emul.o
|
||||
endif
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-y += callback.o tlb.o
|
||||
|
@ -308,7 +308,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
||||
* CP0_Cause.DC bit or the count_ctl.DC bit.
|
||||
* 0 otherwise (in which case CP0_Count timer is running).
|
||||
*/
|
||||
static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
||||
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
||||
@ -467,7 +467,7 @@ u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
|
||||
*
|
||||
* Returns: The ktime at the point of freeze.
|
||||
*/
|
||||
static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
|
||||
ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
|
||||
{
|
||||
ktime_t now;
|
||||
|
||||
@ -516,6 +516,82 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
|
||||
hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @before: Time before Count was saved, lower bound of drift calculation.
|
||||
* @count: CP0_Count at point of restore.
|
||||
* @min_drift: Minimum amount of drift permitted before correction.
|
||||
* Must be <= 0.
|
||||
*
|
||||
* Restores the timer from a particular @count, accounting for drift. This can
|
||||
* be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is
|
||||
* to be used for a period of time, but the exact ktime corresponding to the
|
||||
* final Count that must be restored is not known.
|
||||
*
|
||||
* It is gauranteed that a timer interrupt immediately after restore will be
|
||||
* handled, but not if CP0_Compare is exactly at @count. That case should
|
||||
* already be handled when the hardware timer state is saved.
|
||||
*
|
||||
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not
|
||||
* stopped).
|
||||
*
|
||||
* Returns: Amount of correction to count_bias due to drift.
|
||||
*/
|
||||
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
|
||||
u32 count, int min_drift)
|
||||
{
|
||||
ktime_t now, count_time;
|
||||
u32 now_count, before_count;
|
||||
u64 delta;
|
||||
int drift, ret = 0;
|
||||
|
||||
/* Calculate expected count at before */
|
||||
before_count = vcpu->arch.count_bias +
|
||||
kvm_mips_ktime_to_count(vcpu, before);
|
||||
|
||||
/*
|
||||
* Detect significantly negative drift, where count is lower than
|
||||
* expected. Some negative drift is expected when hardware counter is
|
||||
* set after kvm_mips_freeze_timer(), and it is harmless to allow the
|
||||
* time to jump forwards a little, within reason. If the drift is too
|
||||
* significant, adjust the bias to avoid a big Guest.CP0_Count jump.
|
||||
*/
|
||||
drift = count - before_count;
|
||||
if (drift < min_drift) {
|
||||
count_time = before;
|
||||
vcpu->arch.count_bias += drift;
|
||||
ret = drift;
|
||||
goto resume;
|
||||
}
|
||||
|
||||
/* Calculate expected count right now */
|
||||
now = ktime_get();
|
||||
now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
|
||||
|
||||
/*
|
||||
* Detect positive drift, where count is higher than expected, and
|
||||
* adjust the bias to avoid guest time going backwards.
|
||||
*/
|
||||
drift = count - now_count;
|
||||
if (drift > 0) {
|
||||
count_time = now;
|
||||
vcpu->arch.count_bias += drift;
|
||||
ret = drift;
|
||||
goto resume;
|
||||
}
|
||||
|
||||
/* Subtract nanosecond delta to find ktime when count was read */
|
||||
delta = (u64)(u32)(now_count - count);
|
||||
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
|
||||
count_time = ktime_sub_ns(now, delta);
|
||||
|
||||
resume:
|
||||
/* Resume using the calculated ktime */
|
||||
kvm_mips_resume_hrtimer(vcpu, count_time, count);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_write_count() - Modify the count and update timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
@ -543,16 +619,15 @@ void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
|
||||
/**
|
||||
* kvm_mips_init_count() - Initialise timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @count_hz: Frequency of timer.
|
||||
*
|
||||
* Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
|
||||
* it going if it's enabled.
|
||||
* Initialise the timer to the specified frequency, zero it, and set it going if
|
||||
* it's enabled.
|
||||
*/
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu)
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
|
||||
{
|
||||
/* 100 MHz */
|
||||
vcpu->arch.count_hz = 100*1000*1000;
|
||||
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
|
||||
vcpu->arch.count_hz);
|
||||
vcpu->arch.count_hz = count_hz;
|
||||
vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
|
||||
vcpu->arch.count_dyn_bias = 0;
|
||||
|
||||
/* Starting at 0 */
|
||||
@ -622,7 +697,9 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
int dc;
|
||||
u32 old_compare = kvm_read_c0_guest_compare(cop0);
|
||||
ktime_t now;
|
||||
s32 delta = compare - old_compare;
|
||||
u32 cause;
|
||||
ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */
|
||||
u32 count;
|
||||
|
||||
/* if unchanged, must just be an ack */
|
||||
@ -634,6 +711,21 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted
|
||||
* too to prevent guest CP0_Count hitting guest CP0_Compare.
|
||||
*
|
||||
* The new GTOffset corresponds to the new value of CP0_Compare, and is
|
||||
* set prior to it being written into the guest context. We disable
|
||||
* preemption until the new value is written to prevent restore of a
|
||||
* GTOffset corresponding to the old CP0_Compare value.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
|
||||
preempt_disable();
|
||||
write_c0_gtoffset(compare - read_c0_count());
|
||||
back_to_back_c0_hazard();
|
||||
}
|
||||
|
||||
/* freeze_hrtimer() takes care of timer interrupts <= count */
|
||||
dc = kvm_mips_count_disabled(vcpu);
|
||||
if (!dc)
|
||||
@ -641,12 +733,36 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
|
||||
|
||||
if (ack)
|
||||
kvm_mips_callbacks->dequeue_timer_int(vcpu);
|
||||
else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
|
||||
/*
|
||||
* With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
|
||||
* preserve guest CP0_Cause.TI if we don't want to ack it.
|
||||
*/
|
||||
cause = kvm_read_c0_guest_cause(cop0);
|
||||
|
||||
kvm_write_c0_guest_compare(cop0, compare);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
if (delta > 0)
|
||||
preempt_enable();
|
||||
|
||||
back_to_back_c0_hazard();
|
||||
|
||||
if (!ack && cause & CAUSEF_TI)
|
||||
kvm_write_c0_guest_cause(cop0, cause);
|
||||
}
|
||||
|
||||
/* resume_hrtimer() takes care of timer interrupts > count */
|
||||
if (!dc)
|
||||
kvm_mips_resume_hrtimer(vcpu, now, count);
|
||||
|
||||
/*
|
||||
* If guest CP0_Compare is moving backward, we delay CP0_GTOffset change
|
||||
* until after the new CP0_Compare is written, otherwise new guest
|
||||
* CP0_Count could hit new guest CP0_Compare.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
|
||||
write_c0_gtoffset(compare - read_c0_count());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -857,6 +973,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
||||
++vcpu->stat.wait_exits;
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
|
||||
if (!vcpu->arch.pending_exceptions) {
|
||||
kvm_vz_lose_htimer(vcpu);
|
||||
vcpu->arch.wait = 1;
|
||||
kvm_vcpu_block(vcpu);
|
||||
|
||||
@ -865,7 +982,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
||||
* check if any I/O interrupts are pending.
|
||||
*/
|
||||
if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
|
||||
}
|
||||
}
|
||||
@ -873,17 +990,62 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
|
||||
* we can catch this, if things ever change
|
||||
*/
|
||||
static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
|
||||
unsigned long entryhi)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
int cpu, i;
|
||||
u32 nasid = entryhi & KVM_ENTRYHI_ASID;
|
||||
|
||||
if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
|
||||
trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
|
||||
KVM_ENTRYHI_ASID, nasid);
|
||||
|
||||
/*
|
||||
* Flush entries from the GVA page tables.
|
||||
* Guest user page table will get flushed lazily on re-entry to
|
||||
* guest user if the guest ASID actually changes.
|
||||
*/
|
||||
kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
|
||||
|
||||
/*
|
||||
* Regenerate/invalidate kernel MMU context.
|
||||
* The user MMU context will be regenerated lazily on re-entry
|
||||
* to guest user if the guest ASID actually changes.
|
||||
*/
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
get_new_mmu_context(kern_mm, cpu);
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
kvm_write_c0_guest_entryhi(cop0, entryhi);
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_mips_tlb *tlb;
|
||||
unsigned long pc = vcpu->arch.pc;
|
||||
int index;
|
||||
|
||||
kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
|
||||
return EMULATE_FAIL;
|
||||
index = kvm_read_c0_guest_index(cop0);
|
||||
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
|
||||
/* UNDEFINED */
|
||||
kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
|
||||
index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
|
||||
}
|
||||
|
||||
tlb = &vcpu->arch.guest_tlb[index];
|
||||
kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
|
||||
kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
|
||||
kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
|
||||
kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
|
||||
|
||||
return EMULATE_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1105,11 +1267,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
u32 rt, rd, sel;
|
||||
unsigned long curr_pc;
|
||||
int cpu, i;
|
||||
|
||||
/*
|
||||
* Update PC and hold onto current PC in case there is
|
||||
@ -1143,6 +1303,9 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
||||
case wait_op:
|
||||
er = kvm_mips_emul_wait(vcpu);
|
||||
break;
|
||||
case hypcall_op:
|
||||
er = kvm_mips_emul_hypcall(vcpu, inst);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
rt = inst.c0r_format.rt;
|
||||
@ -1208,44 +1371,8 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
||||
kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
|
||||
vcpu->arch.gprs[rt]);
|
||||
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
|
||||
u32 nasid =
|
||||
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
|
||||
if (((kvm_read_c0_guest_entryhi(cop0) &
|
||||
KVM_ENTRYHI_ASID) != nasid)) {
|
||||
trace_kvm_asid_change(vcpu,
|
||||
kvm_read_c0_guest_entryhi(cop0)
|
||||
& KVM_ENTRYHI_ASID,
|
||||
nasid);
|
||||
|
||||
/*
|
||||
* Flush entries from the GVA page
|
||||
* tables.
|
||||
* Guest user page table will get
|
||||
* flushed lazily on re-entry to guest
|
||||
* user if the guest ASID actually
|
||||
* changes.
|
||||
*/
|
||||
kvm_mips_flush_gva_pt(kern_mm->pgd,
|
||||
KMF_KERN);
|
||||
|
||||
/*
|
||||
* Regenerate/invalidate kernel MMU
|
||||
* context.
|
||||
* The user MMU context will be
|
||||
* regenerated lazily on re-entry to
|
||||
* guest user if the guest ASID actually
|
||||
* changes.
|
||||
*/
|
||||
preempt_disable();
|
||||
cpu = smp_processor_id();
|
||||
get_new_mmu_context(kern_mm, cpu);
|
||||
for_each_possible_cpu(i)
|
||||
if (i != cpu)
|
||||
cpu_context(i, kern_mm) = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
kvm_write_c0_guest_entryhi(cop0,
|
||||
vcpu->arch.gprs[rt]);
|
||||
kvm_mips_change_entryhi(vcpu,
|
||||
vcpu->arch.gprs[rt]);
|
||||
}
|
||||
/* Are we writing to COUNT */
|
||||
else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
|
||||
@ -1474,9 +1601,8 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DO_MMIO;
|
||||
enum emulation_result er;
|
||||
u32 rt;
|
||||
u32 bytes;
|
||||
void *data = run->mmio.data;
|
||||
unsigned long curr_pc;
|
||||
|
||||
@ -1491,103 +1617,74 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
||||
|
||||
rt = inst.i_format.rt;
|
||||
|
||||
switch (inst.i_format.opcode) {
|
||||
case sb_op:
|
||||
bytes = 1;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
*(u8 *) data = vcpu->arch.gprs[rt];
|
||||
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
|
||||
*(u8 *) data);
|
||||
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
|
||||
vcpu->arch.host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
|
||||
goto out_fail;
|
||||
|
||||
switch (inst.i_format.opcode) {
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
|
||||
case sd_op:
|
||||
run->mmio.len = 8;
|
||||
*(u64 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u64 *)data);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case sw_op:
|
||||
bytes = 4;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
*(u32 *) data = vcpu->arch.gprs[rt];
|
||||
run->mmio.len = 4;
|
||||
*(u32 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u32 *) data);
|
||||
vcpu->arch.gprs[rt], *(u32 *)data);
|
||||
break;
|
||||
|
||||
case sh_op:
|
||||
bytes = 2;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
*(u16 *) data = vcpu->arch.gprs[rt];
|
||||
run->mmio.len = 2;
|
||||
*(u16 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u32 *) data);
|
||||
vcpu->arch.gprs[rt], *(u16 *)data);
|
||||
break;
|
||||
|
||||
case sb_op:
|
||||
run->mmio.len = 1;
|
||||
*(u8 *)data = vcpu->arch.gprs[rt];
|
||||
|
||||
kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
|
||||
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
|
||||
vcpu->arch.gprs[rt], *(u8 *)data);
|
||||
break;
|
||||
|
||||
default:
|
||||
kvm_err("Store not yet supported (inst=0x%08x)\n",
|
||||
inst.word);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
goto out_fail;
|
||||
}
|
||||
|
||||
/* Rollback PC if emulation was unsuccessful */
|
||||
if (er == EMULATE_FAIL)
|
||||
vcpu->arch.pc = curr_pc;
|
||||
run->mmio.is_write = 1;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
return EMULATE_DO_MMIO;
|
||||
|
||||
return er;
|
||||
out_fail:
|
||||
/* Rollback PC if emulation was unsuccessful */
|
||||
vcpu->arch.pc = curr_pc;
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
u32 cause, struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
enum emulation_result er = EMULATE_DO_MMIO;
|
||||
enum emulation_result er;
|
||||
unsigned long curr_pc;
|
||||
u32 op, rt;
|
||||
u32 bytes;
|
||||
|
||||
rt = inst.i_format.rt;
|
||||
op = inst.i_format.opcode;
|
||||
@ -1606,96 +1703,53 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
|
||||
vcpu->arch.io_gpr = rt;
|
||||
|
||||
switch (op) {
|
||||
case lw_op:
|
||||
bytes = 4;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
|
||||
vcpu->arch.host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR)
|
||||
return EMULATE_FAIL;
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 0;
|
||||
vcpu->mmio_needed = 2; /* signed */
|
||||
switch (op) {
|
||||
#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
|
||||
case ld_op:
|
||||
run->mmio.len = 8;
|
||||
break;
|
||||
|
||||
case lwu_op:
|
||||
vcpu->mmio_needed = 1; /* unsigned */
|
||||
/* fall through */
|
||||
#endif
|
||||
case lw_op:
|
||||
run->mmio.len = 4;
|
||||
break;
|
||||
|
||||
case lh_op:
|
||||
case lhu_op:
|
||||
bytes = 2;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 0;
|
||||
|
||||
if (op == lh_op)
|
||||
vcpu->mmio_needed = 2;
|
||||
else
|
||||
vcpu->mmio_needed = 1;
|
||||
|
||||
vcpu->mmio_needed = 1; /* unsigned */
|
||||
/* fall through */
|
||||
case lh_op:
|
||||
run->mmio.len = 2;
|
||||
break;
|
||||
|
||||
case lbu_op:
|
||||
vcpu->mmio_needed = 1; /* unsigned */
|
||||
/* fall through */
|
||||
case lb_op:
|
||||
bytes = 1;
|
||||
if (bytes > sizeof(run->mmio.data)) {
|
||||
kvm_err("%s: bad MMIO length: %d\n", __func__,
|
||||
run->mmio.len);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
run->mmio.phys_addr =
|
||||
kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
|
||||
host_cp0_badvaddr);
|
||||
if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
}
|
||||
|
||||
run->mmio.len = bytes;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_is_write = 0;
|
||||
|
||||
if (op == lb_op)
|
||||
vcpu->mmio_needed = 2;
|
||||
else
|
||||
vcpu->mmio_needed = 1;
|
||||
|
||||
run->mmio.len = 1;
|
||||
break;
|
||||
|
||||
default:
|
||||
kvm_err("Load not yet supported (inst=0x%08x)\n",
|
||||
inst.word);
|
||||
er = EMULATE_FAIL;
|
||||
break;
|
||||
vcpu->mmio_needed = 0;
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
return er;
|
||||
run->mmio.is_write = 0;
|
||||
vcpu->mmio_is_write = 0;
|
||||
return EMULATE_DO_MMIO;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_KVM_MIPS_VZ
|
||||
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
|
||||
unsigned long curr_pc,
|
||||
unsigned long addr,
|
||||
@ -1786,11 +1840,35 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
||||
vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
|
||||
arch->gprs[base], offset);
|
||||
|
||||
if (cache == Cache_D)
|
||||
if (cache == Cache_D) {
|
||||
#ifdef CONFIG_CPU_R4K_CACHE_TLB
|
||||
r4k_blast_dcache();
|
||||
else if (cache == Cache_I)
|
||||
#else
|
||||
switch (boot_cpu_type()) {
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
/* locally flush icache */
|
||||
local_flush_icache_range(0, 0);
|
||||
break;
|
||||
default:
|
||||
__flush_cache_all();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
} else if (cache == Cache_I) {
|
||||
#ifdef CONFIG_CPU_R4K_CACHE_TLB
|
||||
r4k_blast_icache();
|
||||
else {
|
||||
#else
|
||||
switch (boot_cpu_type()) {
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
/* locally flush icache */
|
||||
local_flush_icache_range(0, 0);
|
||||
break;
|
||||
default:
|
||||
flush_icache_all();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
kvm_err("%s: unsupported CACHE INDEX operation\n",
|
||||
__func__);
|
||||
return EMULATE_FAIL;
|
||||
@ -1870,18 +1948,6 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
||||
case cop0_op:
|
||||
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
|
||||
break;
|
||||
case sb_op:
|
||||
case sh_op:
|
||||
case sw_op:
|
||||
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
|
||||
break;
|
||||
case lb_op:
|
||||
case lbu_op:
|
||||
case lhu_op:
|
||||
case lh_op:
|
||||
case lw_op:
|
||||
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
|
||||
break;
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
case cache_op:
|
||||
@ -1915,6 +1981,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
|
||||
|
||||
return er;
|
||||
}
|
||||
#endif /* CONFIG_KVM_MIPS_VZ */
|
||||
|
||||
/**
|
||||
* kvm_mips_guest_exception_base() - Find guest exception vector base address.
|
||||
@ -2524,8 +2591,15 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.pc = vcpu->arch.io_pc;
|
||||
|
||||
switch (run->mmio.len) {
|
||||
case 8:
|
||||
*gpr = *(s64 *)run->mmio.data;
|
||||
break;
|
||||
|
||||
case 4:
|
||||
*gpr = *(s32 *) run->mmio.data;
|
||||
if (vcpu->mmio_needed == 2)
|
||||
*gpr = *(s32 *)run->mmio.data;
|
||||
else
|
||||
*gpr = *(u32 *)run->mmio.data;
|
||||
break;
|
||||
|
||||
case 2:
|
||||
|
@ -51,12 +51,15 @@
|
||||
#define RA 31
|
||||
|
||||
/* Some CP0 registers */
|
||||
#define C0_PWBASE 5, 5
|
||||
#define C0_HWRENA 7, 0
|
||||
#define C0_BADVADDR 8, 0
|
||||
#define C0_BADINSTR 8, 1
|
||||
#define C0_BADINSTRP 8, 2
|
||||
#define C0_ENTRYHI 10, 0
|
||||
#define C0_GUESTCTL1 10, 4
|
||||
#define C0_STATUS 12, 0
|
||||
#define C0_GUESTCTL0 12, 6
|
||||
#define C0_CAUSE 13, 0
|
||||
#define C0_EPC 14, 0
|
||||
#define C0_EBASE 15, 1
|
||||
@ -292,8 +295,8 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
||||
unsigned int i;
|
||||
struct uasm_label labels[2];
|
||||
struct uasm_reloc relocs[2];
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
struct uasm_label __maybe_unused *l = labels;
|
||||
struct uasm_reloc __maybe_unused *r = relocs;
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
@ -302,7 +305,67 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
||||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
|
||||
UASM_i_MTC0(&p, T0, C0_EPC);
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
|
||||
UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg);
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1);
|
||||
|
||||
/*
|
||||
* Set up KVM GPA pgd.
|
||||
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
|
||||
* - call tlbmiss_handler_setup_pgd(mm->pgd)
|
||||
* - write mm->pgd into CP0_PWBase
|
||||
*
|
||||
* We keep S0 pointing at struct kvm so we can load the ASID below.
|
||||
*/
|
||||
UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) -
|
||||
(int)offsetof(struct kvm_vcpu, arch), K1);
|
||||
UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0);
|
||||
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
/* delay slot */
|
||||
if (cpu_has_htw)
|
||||
UASM_i_MTC0(&p, A0, C0_PWBASE);
|
||||
else
|
||||
uasm_i_nop(&p);
|
||||
|
||||
/* Set GM bit to setup eret to VZ guest context */
|
||||
uasm_i_addiu(&p, V1, ZERO, 1);
|
||||
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
|
||||
uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1);
|
||||
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
|
||||
|
||||
if (cpu_has_guestid) {
|
||||
/*
|
||||
* Set root mode GuestID, so that root TLB refill handler can
|
||||
* use the correct GuestID in the root TLB.
|
||||
*/
|
||||
|
||||
/* Get current GuestID */
|
||||
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
|
||||
/* Set GuestCtl1.RID = GuestCtl1.ID */
|
||||
uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT,
|
||||
MIPS_GCTL1_ID_WIDTH);
|
||||
uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT,
|
||||
MIPS_GCTL1_RID_WIDTH);
|
||||
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
|
||||
|
||||
/* GuestID handles dealiasing so we don't need to touch ASID */
|
||||
goto skip_asid_restore;
|
||||
}
|
||||
|
||||
/* Root ASID Dealias (RAD) */
|
||||
|
||||
/* Save host ASID */
|
||||
UASM_i_MFC0(&p, K0, C0_ENTRYHI);
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
|
||||
K1);
|
||||
|
||||
/* Set the root ASID for the Guest */
|
||||
UASM_i_ADDIU(&p, T1, S0,
|
||||
offsetof(struct kvm, arch.gpa_mm.context.asid));
|
||||
#else
|
||||
/* Set the ASID for the Guest Kernel or User */
|
||||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
|
||||
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
|
||||
T0);
|
||||
@ -315,6 +378,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
||||
UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
|
||||
guest_user_mm.context.asid));
|
||||
uasm_l_kernel_asid(&l, p);
|
||||
#endif
|
||||
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
/* smp_processor_id */
|
||||
@ -339,6 +403,7 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
||||
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_KVM_MIPS_VZ
|
||||
/*
|
||||
* Set up KVM T&E GVA pgd.
|
||||
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
|
||||
@ -351,7 +416,11 @@ static void *kvm_mips_build_enter_guest(void *addr)
|
||||
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
|
||||
|
||||
#else
|
||||
/* Set up KVM VZ root ASID (!guestid) */
|
||||
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
|
||||
skip_asid_restore:
|
||||
#endif
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
/* Disable RDHWR access */
|
||||
@ -559,13 +628,10 @@ void *kvm_mips_build_exit(void *addr)
|
||||
/* Now that context has been saved, we can use other registers */
|
||||
|
||||
/* Restore vcpu */
|
||||
UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
uasm_i_move(&p, S1, A1);
|
||||
UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
|
||||
/* Restore run (vcpu->run) */
|
||||
UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
|
||||
/* Save pointer to run in s0, will be saved by the compiler */
|
||||
uasm_i_move(&p, S0, A0);
|
||||
UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
|
||||
|
||||
/*
|
||||
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
|
||||
@ -641,6 +707,52 @@ void *kvm_mips_build_exit(void *addr)
|
||||
uasm_l_msa_1(&l, p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
/* Restore host ASID */
|
||||
if (!cpu_has_guestid) {
|
||||
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
|
||||
K1);
|
||||
UASM_i_MTC0(&p, K0, C0_ENTRYHI);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up normal Linux process pgd.
|
||||
* This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
|
||||
* - call tlbmiss_handler_setup_pgd(mm->pgd)
|
||||
* - write mm->pgd into CP0_PWBase
|
||||
*/
|
||||
UASM_i_LW(&p, A0,
|
||||
offsetof(struct kvm_vcpu_arch, host_pgd), K1);
|
||||
UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
/* delay slot */
|
||||
if (cpu_has_htw)
|
||||
UASM_i_MTC0(&p, A0, C0_PWBASE);
|
||||
else
|
||||
uasm_i_nop(&p);
|
||||
|
||||
/* Clear GM bit so we don't enter guest mode when EXL is cleared */
|
||||
uasm_i_mfc0(&p, K0, C0_GUESTCTL0);
|
||||
uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1);
|
||||
uasm_i_mtc0(&p, K0, C0_GUESTCTL0);
|
||||
|
||||
/* Save GuestCtl0 so we can access GExcCode after CPU migration */
|
||||
uasm_i_sw(&p, K0,
|
||||
offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1);
|
||||
|
||||
if (cpu_has_guestid) {
|
||||
/*
|
||||
* Clear root mode GuestID, so that root TLB operations use the
|
||||
* root GuestID in the root TLB.
|
||||
*/
|
||||
uasm_i_mfc0(&p, T0, C0_GUESTCTL1);
|
||||
/* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */
|
||||
uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT,
|
||||
MIPS_GCTL1_RID_WIDTH);
|
||||
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
|
||||
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
|
||||
uasm_i_and(&p, V0, V0, AT);
|
||||
@ -680,6 +792,8 @@ void *kvm_mips_build_exit(void *addr)
|
||||
* Now jump to the kvm_mips_handle_exit() to see if we can deal
|
||||
* with this in the kernel
|
||||
*/
|
||||
uasm_i_move(&p, A0, S0);
|
||||
uasm_i_move(&p, A1, S1);
|
||||
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
|
||||
|
53
arch/mips/kvm/hypcall.c
Normal file
53
arch/mips/kvm/hypcall.c
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS: Hypercall handling.
|
||||
*
|
||||
* Copyright (C) 2015 Imagination Technologies Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm_para.h>
|
||||
|
||||
#define MAX_HYPCALL_ARGS 4
|
||||
|
||||
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
|
||||
union mips_instruction inst)
|
||||
{
|
||||
unsigned int code = (inst.co_format.code >> 5) & 0x3ff;
|
||||
|
||||
kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code);
|
||||
|
||||
switch (code) {
|
||||
case 0:
|
||||
return EMULATE_HYPERCALL;
|
||||
default:
|
||||
return EMULATE_FAIL;
|
||||
};
|
||||
}
|
||||
|
||||
static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num,
|
||||
const unsigned long *args, unsigned long *hret)
|
||||
{
|
||||
/* Report unimplemented hypercall to guest */
|
||||
*hret = -KVM_ENOSYS;
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
||||
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long num, args[MAX_HYPCALL_ARGS];
|
||||
|
||||
/* read hypcall number and arguments */
|
||||
num = vcpu->arch.gprs[2]; /* v0 */
|
||||
args[0] = vcpu->arch.gprs[4]; /* a0 */
|
||||
args[1] = vcpu->arch.gprs[5]; /* a1 */
|
||||
args[2] = vcpu->arch.gprs[6]; /* a2 */
|
||||
args[3] = vcpu->arch.gprs[7]; /* a3 */
|
||||
|
||||
return kvm_mips_hypercall(vcpu, num,
|
||||
args, &vcpu->arch.gprs[2] /* v0 */);
|
||||
}
|
@ -30,8 +30,13 @@
|
||||
|
||||
#define C_TI (_ULCAST_(1) << 30)
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
|
||||
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
|
||||
#else
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
|
||||
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
|
||||
#endif
|
||||
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
|
||||
|
@ -59,6 +59,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
|
||||
{ "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
|
||||
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
{ "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU },
|
||||
{ "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU },
|
||||
{ "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU },
|
||||
{ "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU },
|
||||
#endif
|
||||
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
|
||||
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
|
||||
{ "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
|
||||
@ -66,6 +76,19 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{NULL}
|
||||
};
|
||||
|
||||
bool kvm_trace_guest_mode_change;
|
||||
|
||||
int kvm_guest_mode_change_trace_reg(void)
|
||||
{
|
||||
kvm_trace_guest_mode_change = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_guest_mode_change_trace_unreg(void)
|
||||
{
|
||||
kvm_trace_guest_mode_change = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXXKYMA: We are simulatoring a processor that has the WII bit set in
|
||||
* Config7, so we are "runnable" if interrupts are pending
|
||||
@ -82,7 +105,12 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
|
||||
|
||||
int kvm_arch_hardware_enable(void)
|
||||
{
|
||||
return 0;
|
||||
return kvm_mips_callbacks->hardware_enable();
|
||||
}
|
||||
|
||||
void kvm_arch_hardware_disable(void)
|
||||
{
|
||||
kvm_mips_callbacks->hardware_disable();
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void)
|
||||
@ -97,6 +125,18 @@ void kvm_arch_check_processor_compat(void *rtn)
|
||||
|
||||
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
switch (type) {
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
case KVM_VM_MIPS_VZ:
|
||||
#else
|
||||
case KVM_VM_MIPS_TE:
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
/* Unsupported KVM type */
|
||||
return -EINVAL;
|
||||
};
|
||||
|
||||
/* Allocate page table to map GPA -> RPA */
|
||||
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
|
||||
if (!kvm->arch.gpa_mm.pgd)
|
||||
@ -301,8 +341,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
/* Build guest exception vectors dynamically in unmapped memory */
|
||||
handler = gebase + 0x2000;
|
||||
|
||||
/* TLB refill */
|
||||
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
|
||||
refill_start = gebase;
|
||||
if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
|
||||
refill_start += 0x080;
|
||||
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
|
||||
|
||||
/* General Exception Entry point */
|
||||
@ -353,9 +395,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
|
||||
/* Init */
|
||||
vcpu->arch.last_sched_cpu = -1;
|
||||
|
||||
/* Start off the timer */
|
||||
kvm_mips_init_count(vcpu);
|
||||
vcpu->arch.last_exec_cpu = -1;
|
||||
|
||||
return vcpu;
|
||||
|
||||
@ -1030,9 +1070,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_IMMEDIATE_EXIT:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
r = num_online_cpus();
|
||||
break;
|
||||
@ -1059,7 +1096,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
r = kvm_mips_callbacks->check_extension(kvm, ext);
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
@ -1067,7 +1104,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_mips_pending_timer(vcpu);
|
||||
return kvm_mips_pending_timer(vcpu) ||
|
||||
kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
||||
@ -1092,7 +1130,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
|
||||
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
|
||||
|
||||
cop0 = vcpu->arch.cop0;
|
||||
kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
|
||||
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
|
||||
kvm_read_c0_guest_status(cop0),
|
||||
kvm_read_c0_guest_cause(cop0));
|
||||
|
||||
@ -1208,7 +1246,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
vcpu->mode = OUTSIDE_GUEST_MODE;
|
||||
|
||||
/* re-enable HTW before enabling interrupts */
|
||||
htw_start();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
|
||||
htw_start();
|
||||
|
||||
/* Set a default exit reason */
|
||||
run->exit_reason = KVM_EXIT_UNKNOWN;
|
||||
@ -1226,17 +1265,20 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
cause, opc, run, vcpu);
|
||||
trace_kvm_exit(vcpu, exccode);
|
||||
|
||||
/*
|
||||
* Do a privilege check, if in UM most of these exit conditions end up
|
||||
* causing an exception to be delivered to the Guest Kernel
|
||||
*/
|
||||
er = kvm_mips_check_privilege(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_PRIV_FAIL) {
|
||||
goto skip_emul;
|
||||
} else if (er == EMULATE_FAIL) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
goto skip_emul;
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
/*
|
||||
* Do a privilege check, if in UM most of these exit conditions
|
||||
* end up causing an exception to be delivered to the Guest
|
||||
* Kernel
|
||||
*/
|
||||
er = kvm_mips_check_privilege(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_PRIV_FAIL) {
|
||||
goto skip_emul;
|
||||
} else if (er == EMULATE_FAIL) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
goto skip_emul;
|
||||
}
|
||||
}
|
||||
|
||||
switch (exccode) {
|
||||
@ -1267,7 +1309,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
break;
|
||||
|
||||
case EXCCODE_TLBS:
|
||||
kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
|
||||
badvaddr);
|
||||
|
||||
@ -1328,12 +1370,17 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_GE:
|
||||
/* defer exit accounting to handler */
|
||||
ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (cause & CAUSEF_BD)
|
||||
opc += 1;
|
||||
inst = 0;
|
||||
kvm_get_badinstr(opc, vcpu, &inst);
|
||||
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
|
||||
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
|
||||
exccode, opc, inst, badvaddr,
|
||||
kvm_read_c0_guest_status(vcpu->arch.cop0));
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
@ -1346,6 +1393,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
skip_emul:
|
||||
local_irq_disable();
|
||||
|
||||
if (ret == RESUME_GUEST)
|
||||
kvm_vz_acquire_htimer(vcpu);
|
||||
|
||||
if (er == EMULATE_DONE && !(ret & RESUME_HOST))
|
||||
kvm_mips_deliver_interrupts(vcpu, cause);
|
||||
|
||||
@ -1391,7 +1441,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* Disable HTW before returning to guest or host */
|
||||
htw_stop();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
|
||||
htw_stop();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1527,16 +1578,18 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu)
|
||||
void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* FPU & MSA get disabled in root context (hardware) when it is disabled
|
||||
* in guest context (software), but the register state in the hardware
|
||||
* may still be in use. This is why we explicitly re-enable the hardware
|
||||
* before saving.
|
||||
* With T&E, FPU & MSA get disabled in root context (hardware) when it
|
||||
* is disabled in guest context (software), but the register state in
|
||||
* the hardware may still be in use.
|
||||
* This is why we explicitly re-enable the hardware before saving.
|
||||
*/
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
}
|
||||
|
||||
__kvm_save_msa(&vcpu->arch);
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
|
||||
@ -1549,8 +1602,10 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
|
||||
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
||||
set_c0_status(ST0_CU1);
|
||||
enable_fpu_hazard();
|
||||
if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
|
||||
set_c0_status(ST0_CU1);
|
||||
enable_fpu_hazard();
|
||||
}
|
||||
|
||||
__kvm_save_fpu(&vcpu->arch);
|
||||
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
|
||||
|
@ -992,6 +992,22 @@ static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
|
||||
return kvm_mips_gpa_pte_to_gva_unmapped(pte);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu,
|
||||
bool write_fault)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Invalidate this entry in the TLB */
|
||||
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu,
|
||||
@ -1225,6 +1241,10 @@ int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
|
||||
"Expect BadInstr/BadInstrP registers to be used with VZ\n"))
|
||||
return -EINVAL;
|
||||
|
||||
retry:
|
||||
kvm_trap_emul_gva_lockless_begin(vcpu);
|
||||
err = get_user(*out, opc);
|
||||
|
@ -33,6 +33,25 @@
|
||||
#define KVM_GUEST_PC_TLB 0
|
||||
#define KVM_GUEST_SP_TLB 1
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
unsigned long GUESTID_MASK;
|
||||
EXPORT_SYMBOL_GPL(GUESTID_MASK);
|
||||
unsigned long GUESTID_FIRST_VERSION;
|
||||
EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION);
|
||||
unsigned long GUESTID_VERSION_MASK;
|
||||
EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK);
|
||||
|
||||
static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm;
|
||||
|
||||
if (cpu_has_guestid)
|
||||
return 0;
|
||||
else
|
||||
return cpu_asid(smp_processor_id(), gpa_mm);
|
||||
}
|
||||
#endif
|
||||
|
||||
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
@ -166,6 +185,13 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* We don't want to get reserved instruction exceptions for missing tlb
|
||||
* entries.
|
||||
*/
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
if (user && idx_user >= 0)
|
||||
kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
|
||||
__func__, (va & VPN2_MASK) |
|
||||
@ -179,6 +205,421 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
|
||||
|
||||
#ifdef CONFIG_KVM_MIPS_VZ
|
||||
|
||||
/* GuestID management */
|
||||
|
||||
/**
|
||||
* clear_root_gid() - Set GuestCtl1.RID for normal root operation.
|
||||
*/
|
||||
static inline void clear_root_gid(void)
|
||||
{
|
||||
if (cpu_has_guestid) {
|
||||
clear_c0_guestctl1(MIPS_GCTL1_RID);
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID.
|
||||
*
|
||||
* Sets the root GuestID to match the current guest GuestID, for TLB operation
|
||||
* on the GPA->RPA mappings in the root TLB.
|
||||
*
|
||||
* The caller must be sure to disable HTW while the root GID is set, and
|
||||
* possibly longer if TLB registers are modified.
|
||||
*/
|
||||
static inline void set_root_gid_to_guest_gid(void)
|
||||
{
|
||||
unsigned int guestctl1;
|
||||
|
||||
if (cpu_has_guestid) {
|
||||
back_to_back_c0_hazard();
|
||||
guestctl1 = read_c0_guestctl1();
|
||||
guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) |
|
||||
((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT)
|
||||
<< MIPS_GCTL1_RID_SHIFT;
|
||||
write_c0_guestctl1(guestctl1);
|
||||
mtc0_tlbw_hazard();
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
|
||||
{
|
||||
int idx;
|
||||
unsigned long flags, old_entryhi;
|
||||
|
||||
local_irq_save(flags);
|
||||
htw_stop();
|
||||
|
||||
/* Set root GuestID for root probe and write of guest TLB entry */
|
||||
set_root_gid_to_guest_gid();
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
|
||||
idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
|
||||
kvm_mips_get_root_asid(vcpu));
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
clear_root_gid();
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/*
|
||||
* We don't want to get reserved instruction exceptions for missing tlb
|
||||
* entries.
|
||||
*/
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
if (idx > 0)
|
||||
kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n",
|
||||
__func__, (va & VPN2_MASK) |
|
||||
kvm_mips_get_root_asid(vcpu), idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv);
|
||||
|
||||
/**
|
||||
* kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping.
|
||||
* @vcpu: KVM VCPU pointer.
|
||||
* @gpa: Guest virtual address in a TLB mapped guest segment.
|
||||
* @gpa: Ponter to output guest physical address it maps to.
|
||||
*
|
||||
* Converts a guest virtual address in a guest TLB mapped segment to a guest
|
||||
* physical address, by probing the guest TLB.
|
||||
*
|
||||
* Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been
|
||||
* written.
|
||||
* -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not
|
||||
* have been written.
|
||||
*/
|
||||
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
|
||||
unsigned long *gpa)
|
||||
{
|
||||
unsigned long o_entryhi, o_entrylo[2], o_pagemask;
|
||||
unsigned int o_index;
|
||||
unsigned long entrylo[2], pagemask, pagemaskbit, pa;
|
||||
unsigned long flags;
|
||||
int index;
|
||||
|
||||
/* Probe the guest TLB for a mapping */
|
||||
local_irq_save(flags);
|
||||
/* Set root GuestID for root probe of guest TLB entry */
|
||||
htw_stop();
|
||||
set_root_gid_to_guest_gid();
|
||||
|
||||
o_entryhi = read_gc0_entryhi();
|
||||
o_index = read_gc0_index();
|
||||
|
||||
write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl));
|
||||
mtc0_tlbw_hazard();
|
||||
guest_tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
|
||||
index = read_gc0_index();
|
||||
if (index < 0) {
|
||||
/* No match, fail */
|
||||
write_gc0_entryhi(o_entryhi);
|
||||
write_gc0_index(o_index);
|
||||
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* Match! read the TLB entry */
|
||||
o_entrylo[0] = read_gc0_entrylo0();
|
||||
o_entrylo[1] = read_gc0_entrylo1();
|
||||
o_pagemask = read_gc0_pagemask();
|
||||
|
||||
mtc0_tlbr_hazard();
|
||||
guest_tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
entrylo[0] = read_gc0_entrylo0();
|
||||
entrylo[1] = read_gc0_entrylo1();
|
||||
pagemask = ~read_gc0_pagemask() & ~0x1fffl;
|
||||
|
||||
write_gc0_entryhi(o_entryhi);
|
||||
write_gc0_index(o_index);
|
||||
write_gc0_entrylo0(o_entrylo[0]);
|
||||
write_gc0_entrylo1(o_entrylo[1]);
|
||||
write_gc0_pagemask(o_pagemask);
|
||||
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Select one of the EntryLo values and interpret the GPA */
|
||||
pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1;
|
||||
pa = entrylo[!!(gva & pagemaskbit)];
|
||||
|
||||
/*
|
||||
* TLB entry may have become invalid since TLB probe if physical FTLB
|
||||
* entries are shared between threads (e.g. I6400).
|
||||
*/
|
||||
if (!(pa & ENTRYLO_V))
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Note, this doesn't take guest MIPS32 XPA into account, where PFN is
|
||||
* split with XI/RI in the middle.
|
||||
*/
|
||||
pa = (pa << 6) & ~0xfffl;
|
||||
pa |= gva & ~(pagemask | pagemaskbit);
|
||||
|
||||
*gpa = pa;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup);
|
||||
|
||||
/**
|
||||
* kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for
|
||||
* guests.
|
||||
*
|
||||
* Invalidate all entries in root tlb which are GPA mappings.
|
||||
*/
|
||||
void kvm_vz_local_flush_roottlb_all_guests(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_entryhi, old_pagemask, old_guestctl1;
|
||||
int entry;
|
||||
|
||||
if (WARN_ON(!cpu_has_guestid))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
htw_stop();
|
||||
|
||||
/* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */
|
||||
old_entryhi = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
old_guestctl1 = read_c0_guestctl1();
|
||||
|
||||
/*
|
||||
* Invalidate guest entries in root TLB while leaving root entries
|
||||
* intact when possible.
|
||||
*/
|
||||
for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
/* Don't invalidate non-guest (RVA) mappings in the root TLB */
|
||||
if (!(read_c0_guestctl1() & MIPS_GCTL1_RID))
|
||||
continue;
|
||||
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
write_c0_guestctl1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
}
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
write_c0_guestctl1(old_guestctl1);
|
||||
tlbw_use_hazard();
|
||||
|
||||
htw_start();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests);
|
||||
|
||||
/**
|
||||
* kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries.
|
||||
*
|
||||
* Invalidate all entries in guest tlb irrespective of guestid.
|
||||
*/
|
||||
void kvm_vz_local_flush_guesttlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long old_index;
|
||||
unsigned long old_entryhi;
|
||||
unsigned long old_entrylo[2];
|
||||
unsigned long old_pagemask;
|
||||
int entry;
|
||||
u64 cvmmemctl2 = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Preserve all clobbered guest registers */
|
||||
old_index = read_gc0_index();
|
||||
old_entryhi = read_gc0_entryhi();
|
||||
old_entrylo[0] = read_gc0_entrylo0();
|
||||
old_entrylo[1] = read_gc0_entrylo1();
|
||||
old_pagemask = read_gc0_pagemask();
|
||||
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_CAVIUM_OCTEON3:
|
||||
/* Inhibit machine check due to multiple matching TLB entries */
|
||||
cvmmemctl2 = read_c0_cvmmemctl2();
|
||||
cvmmemctl2 |= CVMMEMCTL2_INHIBITTS;
|
||||
write_c0_cvmmemctl2(cvmmemctl2);
|
||||
break;
|
||||
};
|
||||
|
||||
/* Invalidate guest entries in guest TLB */
|
||||
write_gc0_entrylo0(0);
|
||||
write_gc0_entrylo1(0);
|
||||
write_gc0_pagemask(0);
|
||||
for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) {
|
||||
/* Make sure all entries differ. */
|
||||
write_gc0_index(entry);
|
||||
write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry));
|
||||
mtc0_tlbw_hazard();
|
||||
guest_tlb_write_indexed();
|
||||
}
|
||||
|
||||
if (cvmmemctl2) {
|
||||
cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS;
|
||||
write_c0_cvmmemctl2(cvmmemctl2);
|
||||
};
|
||||
|
||||
write_gc0_index(old_index);
|
||||
write_gc0_entryhi(old_entryhi);
|
||||
write_gc0_entrylo0(old_entrylo[0]);
|
||||
write_gc0_entrylo1(old_entrylo[1]);
|
||||
write_gc0_pagemask(old_pagemask);
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all);
|
||||
|
||||
/**
|
||||
* kvm_vz_save_guesttlb() - Save a range of guest TLB entries.
|
||||
* @buf: Buffer to write TLB entries into.
|
||||
* @index: Start index.
|
||||
* @count: Number of entries to save.
|
||||
*
|
||||
* Save a range of guest TLB entries. The caller must ensure interrupts are
|
||||
* disabled.
|
||||
*/
|
||||
void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count)
|
||||
{
|
||||
unsigned int end = index + count;
|
||||
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
|
||||
unsigned int guestctl1 = 0;
|
||||
int old_index, i;
|
||||
|
||||
/* Save registers we're about to clobber */
|
||||
old_index = read_gc0_index();
|
||||
old_entryhi = read_gc0_entryhi();
|
||||
old_entrylo0 = read_gc0_entrylo0();
|
||||
old_entrylo1 = read_gc0_entrylo1();
|
||||
old_pagemask = read_gc0_pagemask();
|
||||
|
||||
/* Set root GuestID for root probe */
|
||||
htw_stop();
|
||||
set_root_gid_to_guest_gid();
|
||||
if (cpu_has_guestid)
|
||||
guestctl1 = read_c0_guestctl1();
|
||||
|
||||
/* Read each entry from guest TLB */
|
||||
for (i = index; i < end; ++i, ++buf) {
|
||||
write_gc0_index(i);
|
||||
|
||||
mtc0_tlbr_hazard();
|
||||
guest_tlb_read();
|
||||
tlb_read_hazard();
|
||||
|
||||
if (cpu_has_guestid &&
|
||||
(read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) {
|
||||
/* Entry invalid or belongs to another guest */
|
||||
buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
|
||||
buf->tlb_lo[0] = 0;
|
||||
buf->tlb_lo[1] = 0;
|
||||
buf->tlb_mask = 0;
|
||||
} else {
|
||||
/* Entry belongs to the right guest */
|
||||
buf->tlb_hi = read_gc0_entryhi();
|
||||
buf->tlb_lo[0] = read_gc0_entrylo0();
|
||||
buf->tlb_lo[1] = read_gc0_entrylo1();
|
||||
buf->tlb_mask = read_gc0_pagemask();
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear root GuestID again */
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
|
||||
/* Restore clobbered registers */
|
||||
write_gc0_index(old_index);
|
||||
write_gc0_entryhi(old_entryhi);
|
||||
write_gc0_entrylo0(old_entrylo0);
|
||||
write_gc0_entrylo1(old_entrylo1);
|
||||
write_gc0_pagemask(old_pagemask);
|
||||
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb);
|
||||
|
||||
/**
|
||||
* kvm_vz_load_guesttlb() - Save a range of guest TLB entries.
|
||||
* @buf: Buffer to read TLB entries from.
|
||||
* @index: Start index.
|
||||
* @count: Number of entries to load.
|
||||
*
|
||||
* Load a range of guest TLB entries. The caller must ensure interrupts are
|
||||
* disabled.
|
||||
*/
|
||||
void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
|
||||
unsigned int count)
|
||||
{
|
||||
unsigned int end = index + count;
|
||||
unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask;
|
||||
int old_index, i;
|
||||
|
||||
/* Save registers we're about to clobber */
|
||||
old_index = read_gc0_index();
|
||||
old_entryhi = read_gc0_entryhi();
|
||||
old_entrylo0 = read_gc0_entrylo0();
|
||||
old_entrylo1 = read_gc0_entrylo1();
|
||||
old_pagemask = read_gc0_pagemask();
|
||||
|
||||
/* Set root GuestID for root probe */
|
||||
htw_stop();
|
||||
set_root_gid_to_guest_gid();
|
||||
|
||||
/* Write each entry to guest TLB */
|
||||
for (i = index; i < end; ++i, ++buf) {
|
||||
write_gc0_index(i);
|
||||
write_gc0_entryhi(buf->tlb_hi);
|
||||
write_gc0_entrylo0(buf->tlb_lo[0]);
|
||||
write_gc0_entrylo1(buf->tlb_lo[1]);
|
||||
write_gc0_pagemask(buf->tlb_mask);
|
||||
|
||||
mtc0_tlbw_hazard();
|
||||
guest_tlb_write_indexed();
|
||||
}
|
||||
|
||||
/* Clear root GuestID again */
|
||||
clear_root_gid();
|
||||
htw_start();
|
||||
|
||||
/* Restore clobbered registers */
|
||||
write_gc0_index(old_index);
|
||||
write_gc0_entryhi(old_entryhi);
|
||||
write_gc0_entrylo0(old_entrylo0);
|
||||
write_gc0_entrylo1(old_entrylo1);
|
||||
write_gc0_pagemask(old_pagemask);
|
||||
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb);
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* kvm_mips_suspend_mm() - Suspend the active mm.
|
||||
* @cpu The CPU we're running on.
|
||||
|
@ -17,6 +17,13 @@
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/*
|
||||
* arch/mips/kvm/mips.c
|
||||
*/
|
||||
extern bool kvm_trace_guest_mode_change;
|
||||
int kvm_guest_mode_change_trace_reg(void);
|
||||
void kvm_guest_mode_change_trace_unreg(void);
|
||||
|
||||
/*
|
||||
* Tracepoints for VM enters
|
||||
*/
|
||||
@ -62,10 +69,20 @@ DEFINE_EVENT(kvm_transition, kvm_out,
|
||||
#define KVM_TRACE_EXIT_MSA_FPE 14
|
||||
#define KVM_TRACE_EXIT_FPE 15
|
||||
#define KVM_TRACE_EXIT_MSA_DISABLED 21
|
||||
#define KVM_TRACE_EXIT_GUEST_EXIT 27
|
||||
/* Further exit reasons */
|
||||
#define KVM_TRACE_EXIT_WAIT 32
|
||||
#define KVM_TRACE_EXIT_CACHE 33
|
||||
#define KVM_TRACE_EXIT_SIGNAL 34
|
||||
/* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */
|
||||
#define KVM_TRACE_EXIT_GEXCCODE_BASE 64
|
||||
#define KVM_TRACE_EXIT_GPSI 64 /* 0 */
|
||||
#define KVM_TRACE_EXIT_GSFC 65 /* 1 */
|
||||
#define KVM_TRACE_EXIT_HC 66 /* 2 */
|
||||
#define KVM_TRACE_EXIT_GRR 67 /* 3 */
|
||||
#define KVM_TRACE_EXIT_GVA 72 /* 8 */
|
||||
#define KVM_TRACE_EXIT_GHFC 73 /* 9 */
|
||||
#define KVM_TRACE_EXIT_GPA 74 /* 10 */
|
||||
|
||||
/* Tracepoints for VM exits */
|
||||
#define kvm_trace_symbol_exit_types \
|
||||
@ -83,9 +100,17 @@ DEFINE_EVENT(kvm_transition, kvm_out,
|
||||
{ KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
|
||||
{ KVM_TRACE_EXIT_FPE, "FPE" }, \
|
||||
{ KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
|
||||
{ KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \
|
||||
{ KVM_TRACE_EXIT_WAIT, "WAIT" }, \
|
||||
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \
|
||||
{ KVM_TRACE_EXIT_SIGNAL, "Signal" }
|
||||
{ KVM_TRACE_EXIT_SIGNAL, "Signal" }, \
|
||||
{ KVM_TRACE_EXIT_GPSI, "GPSI" }, \
|
||||
{ KVM_TRACE_EXIT_GSFC, "GSFC" }, \
|
||||
{ KVM_TRACE_EXIT_HC, "HC" }, \
|
||||
{ KVM_TRACE_EXIT_GRR, "GRR" }, \
|
||||
{ KVM_TRACE_EXIT_GVA, "GVA" }, \
|
||||
{ KVM_TRACE_EXIT_GHFC, "GHFC" }, \
|
||||
{ KVM_TRACE_EXIT_GPA, "GPA" }
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
||||
@ -158,6 +183,8 @@ TRACE_EVENT(kvm_exit,
|
||||
{ KVM_TRACE_COP0(16, 4), "Config4" }, \
|
||||
{ KVM_TRACE_COP0(16, 5), "Config5" }, \
|
||||
{ KVM_TRACE_COP0(16, 7), "Config7" }, \
|
||||
{ KVM_TRACE_COP0(17, 1), "MAAR" }, \
|
||||
{ KVM_TRACE_COP0(17, 2), "MAARI" }, \
|
||||
{ KVM_TRACE_COP0(26, 0), "ECC" }, \
|
||||
{ KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
|
||||
{ KVM_TRACE_COP0(31, 2), "KScratch1" }, \
|
||||
@ -268,6 +295,51 @@ TRACE_EVENT(kvm_asid_change,
|
||||
__entry->new_asid)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_guestid_change,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid),
|
||||
TP_ARGS(vcpu, guestid),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, guestid)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->guestid = guestid;
|
||||
),
|
||||
|
||||
TP_printk("GuestID: 0x%02x",
|
||||
__entry->guestid)
|
||||
);
|
||||
|
||||
TRACE_EVENT_FN(kvm_guest_mode_change,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(vcpu),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, epc)
|
||||
__field(unsigned long, pc)
|
||||
__field(unsigned long, badvaddr)
|
||||
__field(unsigned int, status)
|
||||
__field(unsigned int, cause)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
|
||||
__entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
|
||||
__entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
|
||||
),
|
||||
|
||||
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
|
||||
__entry->epc,
|
||||
__entry->pc,
|
||||
__entry->status,
|
||||
__entry->cause,
|
||||
__entry->badvaddr),
|
||||
|
||||
kvm_guest_mode_change_trace_reg,
|
||||
kvm_guest_mode_change_trace_unreg
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
@ -40,6 +41,29 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
|
||||
return gpa;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
u32 inst = 0;
|
||||
|
||||
/*
|
||||
* Fetch the instruction.
|
||||
*/
|
||||
if (cause & CAUSEF_BD)
|
||||
opc += 1;
|
||||
kvm_get_badinstr(opc, vcpu, &inst);
|
||||
|
||||
kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
|
||||
exccode, opc, inst, badvaddr,
|
||||
kvm_read_c0_guest_status(vcpu->arch.cop0));
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
@ -82,6 +106,10 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
||||
ret = RESUME_HOST;
|
||||
break;
|
||||
|
||||
case EMULATE_HYPERCALL:
|
||||
ret = kvm_mips_handle_hypcall(vcpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
@ -484,6 +512,31 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_hardware_enable(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_trap_emul_hardware_disable(void)
|
||||
{
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
int r;
|
||||
|
||||
switch (ext) {
|
||||
case KVM_CAP_MIPS_TE:
|
||||
r = 1;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
|
||||
@ -561,6 +614,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
u32 config, config1;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
|
||||
/* Start off the timer at 100 MHz */
|
||||
kvm_mips_init_count(vcpu, 100*1000*1000);
|
||||
|
||||
/*
|
||||
* Arch specific stuff, set up config registers properly so that the
|
||||
* guest will come up as expected
|
||||
@ -589,6 +645,13 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
/* Read the cache characteristics from the host Config1 Register */
|
||||
config1 = (read_c0_config1() & ~0x7f);
|
||||
|
||||
/* DCache line size not correctly reported in Config1 on Octeon CPUs */
|
||||
if (cpu_dcache_line_size()) {
|
||||
config1 &= ~MIPS_CONF1_DL;
|
||||
config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
|
||||
MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
|
||||
}
|
||||
|
||||
/* Set up MMU size */
|
||||
config1 &= ~(0x3f << 25);
|
||||
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
|
||||
@ -892,10 +955,12 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
if (v & CAUSEF_DC) {
|
||||
/* disable timer first */
|
||||
kvm_mips_count_disable_cause(vcpu);
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
|
||||
v);
|
||||
} else {
|
||||
/* enable timer last */
|
||||
kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
|
||||
kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
|
||||
v);
|
||||
kvm_mips_count_enable_cause(vcpu);
|
||||
}
|
||||
} else {
|
||||
@ -1230,7 +1295,11 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
||||
.handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
|
||||
.handle_fpe = kvm_trap_emul_handle_fpe,
|
||||
.handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
|
||||
.handle_guest_exit = kvm_trap_emul_no_handler,
|
||||
|
||||
.hardware_enable = kvm_trap_emul_hardware_enable,
|
||||
.hardware_disable = kvm_trap_emul_hardware_disable,
|
||||
.check_extension = kvm_trap_emul_check_extension,
|
||||
.vcpu_init = kvm_trap_emul_vcpu_init,
|
||||
.vcpu_uninit = kvm_trap_emul_vcpu_uninit,
|
||||
.vcpu_setup = kvm_trap_emul_vcpu_setup,
|
||||
|
3223
arch/mips/kvm/vz.c
Normal file
3223
arch/mips/kvm/vz.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -24,6 +24,7 @@
|
||||
/* Cache operations. */
|
||||
void (*flush_cache_all)(void);
|
||||
void (*__flush_cache_all)(void);
|
||||
EXPORT_SYMBOL_GPL(__flush_cache_all);
|
||||
void (*flush_cache_mm)(struct mm_struct *mm);
|
||||
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
|
@ -348,7 +348,7 @@ void maar_init(void)
|
||||
upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
|
||||
|
||||
pr_info(" [%d]: ", i / 2);
|
||||
if (!(attr & MIPS_MAAR_V)) {
|
||||
if (!(attr & MIPS_MAAR_VL)) {
|
||||
pr_cont("disabled\n");
|
||||
continue;
|
||||
}
|
||||
|
@ -87,6 +87,11 @@ static inline unsigned int get_oc(u32 inst)
|
||||
return (inst >> 11) & 0x7fff;
|
||||
}
|
||||
|
||||
static inline unsigned int get_tx_or_sx(u32 inst)
|
||||
{
|
||||
return (inst) & 0x1;
|
||||
}
|
||||
|
||||
#define IS_XFORM(inst) (get_op(inst) == 31)
|
||||
#define IS_DSFORM(inst) (get_op(inst) >= 56)
|
||||
|
||||
|
@ -296,11 +296,21 @@ static inline void iommu_restore(void)
|
||||
#endif
|
||||
|
||||
/* The API to support IOMMU operations for VFIO */
|
||||
extern int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce_value,
|
||||
unsigned long npages);
|
||||
extern int iommu_tce_put_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce);
|
||||
extern int iommu_tce_check_ioba(unsigned long page_shift,
|
||||
unsigned long offset, unsigned long size,
|
||||
unsigned long ioba, unsigned long npages);
|
||||
extern int iommu_tce_check_gpa(unsigned long page_shift,
|
||||
unsigned long gpa);
|
||||
|
||||
#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
|
||||
(iommu_tce_check_ioba((tbl)->it_page_shift, \
|
||||
(tbl)->it_offset, (tbl)->it_size, \
|
||||
(ioba), (npages)) || (tce_value))
|
||||
#define iommu_tce_put_param_check(tbl, ioba, gpa) \
|
||||
(iommu_tce_check_ioba((tbl)->it_page_shift, \
|
||||
(tbl)->it_offset, (tbl)->it_size, \
|
||||
(ioba), 1) || \
|
||||
iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
|
||||
|
||||
extern void iommu_flush_tce(struct iommu_table *tbl);
|
||||
extern int iommu_take_ownership(struct iommu_table *tbl);
|
||||
|
@ -45,9 +45,6 @@
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#ifdef CONFIG_KVM_MMIO
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#endif
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 10000 /* 10 us */
|
||||
|
||||
/* These values are internal and can be increased later */
|
||||
@ -191,6 +188,13 @@ struct kvmppc_pginfo {
|
||||
atomic_t refcnt;
|
||||
};
|
||||
|
||||
struct kvmppc_spapr_tce_iommu_table {
|
||||
struct rcu_head rcu;
|
||||
struct list_head next;
|
||||
struct iommu_table *tbl;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
struct kvmppc_spapr_tce_table {
|
||||
struct list_head list;
|
||||
struct kvm *kvm;
|
||||
@ -199,6 +203,7 @@ struct kvmppc_spapr_tce_table {
|
||||
u32 page_shift;
|
||||
u64 offset; /* in pages */
|
||||
u64 size; /* window size in pages */
|
||||
struct list_head iommu_tables;
|
||||
struct page *pages[0];
|
||||
};
|
||||
|
||||
@ -345,6 +350,7 @@ struct kvmppc_pte {
|
||||
bool may_read : 1;
|
||||
bool may_write : 1;
|
||||
bool may_execute : 1;
|
||||
unsigned long wimg;
|
||||
u8 page_size; /* MMU_PAGE_xxx */
|
||||
};
|
||||
|
||||
@ -441,6 +447,11 @@ struct mmio_hpte_cache {
|
||||
unsigned int index;
|
||||
};
|
||||
|
||||
#define KVMPPC_VSX_COPY_NONE 0
|
||||
#define KVMPPC_VSX_COPY_WORD 1
|
||||
#define KVMPPC_VSX_COPY_DWORD 2
|
||||
#define KVMPPC_VSX_COPY_DWORD_LOAD_DUMP 3
|
||||
|
||||
struct openpic;
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
@ -644,6 +655,21 @@ struct kvm_vcpu_arch {
|
||||
u8 io_gpr; /* GPR used as IO source/target */
|
||||
u8 mmio_host_swabbed;
|
||||
u8 mmio_sign_extend;
|
||||
/* conversion between single and double precision */
|
||||
u8 mmio_sp64_extend;
|
||||
/*
|
||||
* Number of simulations for vsx.
|
||||
* If we use 2*8bytes to simulate 1*16bytes,
|
||||
* then the number should be 2 and
|
||||
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_DWORD.
|
||||
* If we use 4*4bytes to simulate 1*16bytes,
|
||||
* the number should be 4 and
|
||||
* mmio_vsx_copy_type=KVMPPC_VSX_COPY_WORD.
|
||||
*/
|
||||
u8 mmio_vsx_copy_nums;
|
||||
u8 mmio_vsx_offset;
|
||||
u8 mmio_vsx_copy_type;
|
||||
u8 mmio_vsx_tx_sx_enabled;
|
||||
u8 osi_needed;
|
||||
u8 osi_enabled;
|
||||
u8 papr_enabled;
|
||||
@ -732,6 +758,8 @@ struct kvm_vcpu_arch {
|
||||
};
|
||||
|
||||
#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET]
|
||||
#define VCPU_VSX_FPR(vcpu, i, j) ((vcpu)->arch.fp.fpr[i][j])
|
||||
#define VCPU_VSX_VR(vcpu, i) ((vcpu)->arch.vr.vr[i])
|
||||
|
||||
/* Values for vcpu->arch.state */
|
||||
#define KVMPPC_VCPU_NOTREADY 0
|
||||
@ -745,6 +773,7 @@ struct kvm_vcpu_arch {
|
||||
#define KVM_MMIO_REG_FPR 0x0020
|
||||
#define KVM_MMIO_REG_QPR 0x0040
|
||||
#define KVM_MMIO_REG_FQPR 0x0060
|
||||
#define KVM_MMIO_REG_VSX 0x0080
|
||||
|
||||
#define __KVM_HAVE_ARCH_WQP
|
||||
#define __KVM_HAVE_CREATE_DEVICE
|
||||
|
@ -78,9 +78,15 @@ extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian);
|
||||
extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian, int mmio_sign_extend);
|
||||
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes,
|
||||
int is_default_endian);
|
||||
extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int rs, unsigned int bytes,
|
||||
int is_default_endian);
|
||||
|
||||
extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
|
||||
enum instruction_type type, u32 *inst);
|
||||
@ -132,6 +138,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
|
||||
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
|
||||
@ -164,13 +173,19 @@ extern long kvmppc_prepare_vrma(struct kvm *kvm,
|
||||
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
|
||||
struct kvm_memory_slot *memslot, unsigned long porder);
|
||||
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
|
||||
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
struct iommu_group *grp);
|
||||
extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
|
||||
struct iommu_group *grp);
|
||||
|
||||
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
struct kvm_create_spapr_tce_64 *args);
|
||||
extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
|
||||
struct kvm_vcpu *vcpu, unsigned long liobn);
|
||||
extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
|
||||
unsigned long ioba, unsigned long npages);
|
||||
struct kvm *kvm, unsigned long liobn);
|
||||
#define kvmppc_ioba_validate(stt, ioba, npages) \
|
||||
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
|
||||
(stt)->size, (ioba), (npages)) ? \
|
||||
H_PARAMETER : H_SUCCESS)
|
||||
extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
|
||||
unsigned long tce);
|
||||
extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
@ -240,6 +255,7 @@ union kvmppc_one_reg {
|
||||
u64 dval;
|
||||
vector128 vval;
|
||||
u64 vsxval[2];
|
||||
u32 vsx32val[4];
|
||||
struct {
|
||||
u64 addr;
|
||||
u64 length;
|
||||
|
@ -86,32 +86,79 @@
|
||||
#define OP_TRAP_64 2
|
||||
|
||||
#define OP_31_XOP_TRAP 4
|
||||
#define OP_31_XOP_LDX 21
|
||||
#define OP_31_XOP_LWZX 23
|
||||
#define OP_31_XOP_LDUX 53
|
||||
#define OP_31_XOP_DCBST 54
|
||||
#define OP_31_XOP_LWZUX 55
|
||||
#define OP_31_XOP_TRAP_64 68
|
||||
#define OP_31_XOP_DCBF 86
|
||||
#define OP_31_XOP_LBZX 87
|
||||
#define OP_31_XOP_STDX 149
|
||||
#define OP_31_XOP_STWX 151
|
||||
#define OP_31_XOP_STDUX 181
|
||||
#define OP_31_XOP_STWUX 183
|
||||
#define OP_31_XOP_STBX 215
|
||||
#define OP_31_XOP_LBZUX 119
|
||||
#define OP_31_XOP_STBUX 247
|
||||
#define OP_31_XOP_LHZX 279
|
||||
#define OP_31_XOP_LHZUX 311
|
||||
#define OP_31_XOP_MFSPR 339
|
||||
#define OP_31_XOP_LWAX 341
|
||||
#define OP_31_XOP_LHAX 343
|
||||
#define OP_31_XOP_LWAUX 373
|
||||
#define OP_31_XOP_LHAUX 375
|
||||
#define OP_31_XOP_STHX 407
|
||||
#define OP_31_XOP_STHUX 439
|
||||
#define OP_31_XOP_MTSPR 467
|
||||
#define OP_31_XOP_DCBI 470
|
||||
#define OP_31_XOP_LDBRX 532
|
||||
#define OP_31_XOP_LWBRX 534
|
||||
#define OP_31_XOP_TLBSYNC 566
|
||||
#define OP_31_XOP_STDBRX 660
|
||||
#define OP_31_XOP_STWBRX 662
|
||||
#define OP_31_XOP_STFSX 663
|
||||
#define OP_31_XOP_STFSUX 695
|
||||
#define OP_31_XOP_STFDX 727
|
||||
#define OP_31_XOP_STFDUX 759
|
||||
#define OP_31_XOP_LHBRX 790
|
||||
#define OP_31_XOP_LFIWAX 855
|
||||
#define OP_31_XOP_LFIWZX 887
|
||||
#define OP_31_XOP_STHBRX 918
|
||||
#define OP_31_XOP_STFIWX 983
|
||||
|
||||
/* VSX Scalar Load Instructions */
|
||||
#define OP_31_XOP_LXSDX 588
|
||||
#define OP_31_XOP_LXSSPX 524
|
||||
#define OP_31_XOP_LXSIWAX 76
|
||||
#define OP_31_XOP_LXSIWZX 12
|
||||
|
||||
/* VSX Scalar Store Instructions */
|
||||
#define OP_31_XOP_STXSDX 716
|
||||
#define OP_31_XOP_STXSSPX 652
|
||||
#define OP_31_XOP_STXSIWX 140
|
||||
|
||||
/* VSX Vector Load Instructions */
|
||||
#define OP_31_XOP_LXVD2X 844
|
||||
#define OP_31_XOP_LXVW4X 780
|
||||
|
||||
/* VSX Vector Load and Splat Instruction */
|
||||
#define OP_31_XOP_LXVDSX 332
|
||||
|
||||
/* VSX Vector Store Instructions */
|
||||
#define OP_31_XOP_STXVD2X 972
|
||||
#define OP_31_XOP_STXVW4X 908
|
||||
|
||||
#define OP_31_XOP_LFSX 535
|
||||
#define OP_31_XOP_LFSUX 567
|
||||
#define OP_31_XOP_LFDX 599
|
||||
#define OP_31_XOP_LFDUX 631
|
||||
|
||||
#define OP_LWZ 32
|
||||
#define OP_STFS 52
|
||||
#define OP_STFSU 53
|
||||
#define OP_STFD 54
|
||||
#define OP_STFDU 55
|
||||
#define OP_LD 58
|
||||
#define OP_LWZU 33
|
||||
#define OP_LBZ 34
|
||||
@ -127,6 +174,17 @@
|
||||
#define OP_LHAU 43
|
||||
#define OP_STH 44
|
||||
#define OP_STHU 45
|
||||
#define OP_LMW 46
|
||||
#define OP_STMW 47
|
||||
#define OP_LFS 48
|
||||
#define OP_LFSU 49
|
||||
#define OP_LFD 50
|
||||
#define OP_LFDU 51
|
||||
#define OP_STFS 52
|
||||
#define OP_STFSU 53
|
||||
#define OP_STFD 54
|
||||
#define OP_STFDU 55
|
||||
#define OP_LQ 56
|
||||
|
||||
/* sorted alphabetically */
|
||||
#define PPC_INST_BHRBE 0x7c00025c
|
||||
|
@ -29,6 +29,9 @@
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
||||
/* Not always available, but if it is, this is the correct offset. */
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
struct kvm_regs {
|
||||
__u64 pc;
|
||||
__u64 cr;
|
||||
|
@ -963,47 +963,36 @@ void iommu_flush_tce(struct iommu_table *tbl)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_flush_tce);
|
||||
|
||||
int iommu_tce_clear_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce_value,
|
||||
unsigned long npages)
|
||||
int iommu_tce_check_ioba(unsigned long page_shift,
|
||||
unsigned long offset, unsigned long size,
|
||||
unsigned long ioba, unsigned long npages)
|
||||
{
|
||||
/* tbl->it_ops->clear() does not support any value but 0 */
|
||||
if (tce_value)
|
||||
unsigned long mask = (1UL << page_shift) - 1;
|
||||
|
||||
if (ioba & mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (ioba & ~IOMMU_PAGE_MASK(tbl))
|
||||
ioba >>= page_shift;
|
||||
if (ioba < offset)
|
||||
return -EINVAL;
|
||||
|
||||
ioba >>= tbl->it_page_shift;
|
||||
if (ioba < tbl->it_offset)
|
||||
return -EINVAL;
|
||||
|
||||
if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
|
||||
if ((ioba + 1) > (offset + size))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
|
||||
|
||||
int iommu_tce_put_param_check(struct iommu_table *tbl,
|
||||
unsigned long ioba, unsigned long tce)
|
||||
int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
|
||||
{
|
||||
if (tce & ~IOMMU_PAGE_MASK(tbl))
|
||||
return -EINVAL;
|
||||
unsigned long mask = (1UL << page_shift) - 1;
|
||||
|
||||
if (ioba & ~IOMMU_PAGE_MASK(tbl))
|
||||
return -EINVAL;
|
||||
|
||||
ioba >>= tbl->it_page_shift;
|
||||
if (ioba < tbl->it_offset)
|
||||
return -EINVAL;
|
||||
|
||||
if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
|
||||
if (gpa & mask)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
|
||||
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
|
||||
|
||||
long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
|
||||
unsigned long *hpa, enum dma_data_direction *direction)
|
||||
|
@ -67,6 +67,7 @@ config KVM_BOOK3S_64
|
||||
select KVM_BOOK3S_64_HANDLER
|
||||
select KVM
|
||||
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
|
||||
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT
|
||||
---help---
|
||||
Support running unmodified book3s_64 and book3s_32 guest kernels
|
||||
in virtual machines on book3s_64 host processors.
|
||||
|
@ -197,6 +197,24 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
|
||||
|
||||
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, 0);
|
||||
}
|
||||
|
||||
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_ALTIVEC, 0);
|
||||
}
|
||||
|
||||
void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* might as well deliver this straight away */
|
||||
kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_VSX, 0);
|
||||
}
|
||||
|
||||
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
|
||||
|
@ -319,6 +319,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
||||
gpte->may_execute = true;
|
||||
gpte->may_read = false;
|
||||
gpte->may_write = false;
|
||||
gpte->wimg = r & HPTE_R_WIMG;
|
||||
|
||||
switch (pp) {
|
||||
case 0:
|
||||
|
@ -145,6 +145,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
else
|
||||
kvmppc_mmu_flush_icache(pfn);
|
||||
|
||||
rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
|
||||
|
||||
/*
|
||||
* Use 64K pages if possible; otherwise, on 64K page kernels,
|
||||
* we need to transfer 4 more bits from guest real to host real addr.
|
||||
@ -177,12 +179,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
|
||||
hpsize, hpsize, MMU_SEGSIZE_256M);
|
||||
|
||||
if (ret < 0) {
|
||||
if (ret == -1) {
|
||||
/* If we couldn't map a primary PTE, try a secondary */
|
||||
hash = ~hash;
|
||||
vflags ^= HPTE_V_SECONDARY;
|
||||
attempt++;
|
||||
goto map_again;
|
||||
} else if (ret < 0) {
|
||||
r = -EIO;
|
||||
goto out_unlock;
|
||||
} else {
|
||||
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
|
||||
vpn, hpaddr, orig_pte);
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/anon_inodes.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
@ -40,6 +42,7 @@
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/tce.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
|
||||
{
|
||||
@ -91,6 +94,137 @@ static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
|
||||
{
|
||||
struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
|
||||
struct kvmppc_spapr_tce_iommu_table, rcu);
|
||||
|
||||
iommu_tce_table_put(stit->tbl);
|
||||
|
||||
kfree(stit);
|
||||
}
|
||||
|
||||
static void kvm_spapr_tce_liobn_put(struct kref *kref)
|
||||
{
|
||||
struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
|
||||
struct kvmppc_spapr_tce_iommu_table, kref);
|
||||
|
||||
list_del_rcu(&stit->next);
|
||||
|
||||
call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
|
||||
}
|
||||
|
||||
extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
|
||||
struct iommu_group *grp)
|
||||
{
|
||||
int i;
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
|
||||
struct iommu_table_group *table_group = NULL;
|
||||
|
||||
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||
|
||||
table_group = iommu_group_get_iommudata(grp);
|
||||
if (WARN_ON(!table_group))
|
||||
continue;
|
||||
|
||||
list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
|
||||
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
||||
if (table_group->tables[i] != stit->tbl)
|
||||
continue;
|
||||
|
||||
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
|
||||
struct iommu_group *grp)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||
bool found = false;
|
||||
struct iommu_table *tbl = NULL;
|
||||
struct iommu_table_group *table_group;
|
||||
long i;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
struct fd f;
|
||||
|
||||
f = fdget(tablefd);
|
||||
if (!f.file)
|
||||
return -EBADF;
|
||||
|
||||
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (stt == f.file->private_data) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fdput(f);
|
||||
|
||||
if (!found)
|
||||
return -EINVAL;
|
||||
|
||||
table_group = iommu_group_get_iommudata(grp);
|
||||
if (WARN_ON(!table_group))
|
||||
return -EFAULT;
|
||||
|
||||
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
||||
struct iommu_table *tbltmp = table_group->tables[i];
|
||||
|
||||
if (!tbltmp)
|
||||
continue;
|
||||
/*
|
||||
* Make sure hardware table parameters are exactly the same;
|
||||
* this is used in the TCE handlers where boundary checks
|
||||
* use only the first attached table.
|
||||
*/
|
||||
if ((tbltmp->it_page_shift == stt->page_shift) &&
|
||||
(tbltmp->it_offset == stt->offset) &&
|
||||
(tbltmp->it_size == stt->size)) {
|
||||
/*
|
||||
* Reference the table to avoid races with
|
||||
* add/remove DMA windows.
|
||||
*/
|
||||
tbl = iommu_tce_table_get(tbltmp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!tbl)
|
||||
return -EINVAL;
|
||||
|
||||
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
|
||||
if (tbl != stit->tbl)
|
||||
continue;
|
||||
|
||||
if (!kref_get_unless_zero(&stit->kref)) {
|
||||
/* stit is being destroyed */
|
||||
iommu_tce_table_put(tbl);
|
||||
return -ENOTTY;
|
||||
}
|
||||
/*
|
||||
* The table is already known to this KVM, we just increased
|
||||
* its KVM reference counter and can return.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
|
||||
if (!stit) {
|
||||
iommu_tce_table_put(tbl);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
stit->tbl = tbl;
|
||||
kref_init(&stit->kref);
|
||||
|
||||
list_add_rcu(&stit->next, &stt->iommu_tables);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void release_spapr_tce_table(struct rcu_head *head)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = container_of(head,
|
||||
@ -130,9 +264,18 @@ static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = filp->private_data;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
|
||||
|
||||
list_del_rcu(&stt->list);
|
||||
|
||||
list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
|
||||
WARN_ON(!kref_read(&stit->kref));
|
||||
while (1) {
|
||||
if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kvm_put_kvm(stt->kvm);
|
||||
|
||||
kvmppc_account_memlimit(
|
||||
@ -164,7 +307,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = args->size;
|
||||
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
||||
npages = kvmppc_tce_pages(size);
|
||||
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
|
||||
if (ret) {
|
||||
@ -183,6 +326,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
stt->offset = args->offset;
|
||||
stt->size = size;
|
||||
stt->kvm = kvm;
|
||||
INIT_LIST_HEAD_RCU(&stt->iommu_tables);
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||
@ -211,15 +355,106 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
unsigned long hpa = 0;
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
|
||||
iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||
}
|
||||
|
||||
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||
struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
||||
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||
|
||||
if (!pua)
|
||||
/* it_userspace allocation might be delayed */
|
||||
return H_TOO_HARD;
|
||||
|
||||
mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
|
||||
if (!mem)
|
||||
return H_TOO_HARD;
|
||||
|
||||
mm_iommu_mapped_dec(mem);
|
||||
|
||||
*pua = 0;
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
|
||||
struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
unsigned long hpa = 0;
|
||||
long ret;
|
||||
|
||||
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
|
||||
return H_HARDWARE;
|
||||
|
||||
if (dir == DMA_NONE)
|
||||
return H_SUCCESS;
|
||||
|
||||
ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||
if (ret != H_SUCCESS)
|
||||
iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long ua,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
long ret;
|
||||
unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
|
||||
if (!pua)
|
||||
/* it_userspace allocation might be delayed */
|
||||
return H_TOO_HARD;
|
||||
|
||||
mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
|
||||
if (!mem)
|
||||
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
|
||||
return H_HARDWARE;
|
||||
|
||||
if (mm_iommu_mapped_inc(mem))
|
||||
return H_CLOSED;
|
||||
|
||||
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
|
||||
if (WARN_ON_ONCE(ret)) {
|
||||
mm_iommu_mapped_dec(mem);
|
||||
return H_HARDWARE;
|
||||
}
|
||||
|
||||
if (dir != DMA_NONE)
|
||||
kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||
|
||||
*pua = ua;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba, unsigned long tce)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
|
||||
long ret;
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
long ret, idx;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
unsigned long entry, ua = 0;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
||||
/* liobn, ioba, tce); */
|
||||
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
@ -231,7 +466,35 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
if (ret != H_SUCCESS)
|
||||
return ret;
|
||||
|
||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
|
||||
dir = iommu_tce_direction(tce);
|
||||
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
entry = ioba >> stt->page_shift;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
if (dir == DMA_NONE) {
|
||||
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
|
||||
stit->tbl, entry);
|
||||
} else {
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
|
||||
entry, ua, dir);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
}
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry, tce);
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
@ -246,8 +509,9 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
unsigned long entry, ua = 0;
|
||||
u64 __user *tces;
|
||||
u64 tce;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
|
||||
stt = kvmppc_find_table(vcpu, liobn);
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
@ -284,6 +548,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
if (ret != H_SUCCESS)
|
||||
goto unlock_exit;
|
||||
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
||||
&ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
ret = kvmppc_tce_iommu_map(vcpu->kvm,
|
||||
stit->tbl, entry + i, ua,
|
||||
iommu_tce_direction(tce));
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
goto unlock_exit;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry + i, tce);
|
||||
}
|
||||
|
||||
@ -300,8 +584,9 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
long i, ret;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
|
||||
stt = kvmppc_find_table(vcpu, liobn);
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
@ -313,6 +598,24 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
unsigned long entry = ioba >> stit->tbl->it_page_shift;
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
|
||||
stit->tbl, entry + i);
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
||||
|
||||
|
@ -40,6 +40,31 @@
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/tce.h>
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
#define WARN_ON_ONCE_RM(condition) ({ \
|
||||
static bool __section(.data.unlikely) __warned; \
|
||||
int __ret_warn_once = !!(condition); \
|
||||
\
|
||||
if (unlikely(__ret_warn_once && !__warned)) { \
|
||||
__warned = true; \
|
||||
pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
|
||||
__stringify(condition), \
|
||||
__func__, __LINE__); \
|
||||
dump_stack(); \
|
||||
} \
|
||||
unlikely(__ret_warn_once); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
||||
#define WARN_ON_ONCE_RM(condition) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
|
||||
|
||||
/*
|
||||
@ -48,10 +73,9 @@
|
||||
* WARNING: This will be called in real or virtual mode on HV KVM and virtual
|
||||
* mode on PR KVM
|
||||
*/
|
||||
struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
|
||||
struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
|
||||
unsigned long liobn)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
|
||||
list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
|
||||
@ -62,27 +86,6 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_find_table);
|
||||
|
||||
/*
|
||||
* Validates IO address.
|
||||
*
|
||||
* WARNING: This will be called in real-mode on HV KVM and virtual
|
||||
* mode on PR KVM
|
||||
*/
|
||||
long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
|
||||
unsigned long ioba, unsigned long npages)
|
||||
{
|
||||
unsigned long mask = (1ULL << stt->page_shift) - 1;
|
||||
unsigned long idx = ioba >> stt->page_shift;
|
||||
|
||||
if ((ioba & mask) || (idx < stt->offset) ||
|
||||
(idx - stt->offset + npages > stt->size) ||
|
||||
(idx + npages < idx))
|
||||
return H_PARAMETER;
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
|
||||
|
||||
/*
|
||||
* Validates TCE address.
|
||||
* At the moment flags and page mask are validated.
|
||||
@ -96,10 +99,14 @@ EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
|
||||
*/
|
||||
long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
|
||||
{
|
||||
unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
|
||||
unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
|
||||
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
||||
enum dma_data_direction dir = iommu_tce_direction(tce);
|
||||
|
||||
if (tce & mask)
|
||||
/* Allow userspace to poison TCE table */
|
||||
if (dir == DMA_NONE)
|
||||
return H_SUCCESS;
|
||||
|
||||
if (iommu_tce_check_gpa(stt->page_shift, gpa))
|
||||
return H_PARAMETER;
|
||||
|
||||
return H_SUCCESS;
|
||||
@ -179,15 +186,122 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
|
||||
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
|
||||
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
unsigned long hpa = 0;
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
|
||||
iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||
}
|
||||
|
||||
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
|
||||
struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
struct mm_iommu_table_group_mem_t *mem = NULL;
|
||||
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
|
||||
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||
|
||||
if (!pua)
|
||||
/* it_userspace allocation might be delayed */
|
||||
return H_TOO_HARD;
|
||||
|
||||
pua = (void *) vmalloc_to_phys(pua);
|
||||
if (WARN_ON_ONCE_RM(!pua))
|
||||
return H_HARDWARE;
|
||||
|
||||
mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
|
||||
if (!mem)
|
||||
return H_TOO_HARD;
|
||||
|
||||
mm_iommu_mapped_dec(mem);
|
||||
|
||||
*pua = 0;
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
|
||||
static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
|
||||
struct iommu_table *tbl, unsigned long entry)
|
||||
{
|
||||
enum dma_data_direction dir = DMA_NONE;
|
||||
unsigned long hpa = 0;
|
||||
long ret;
|
||||
|
||||
if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
|
||||
/*
|
||||
* real mode xchg can fail if struct page crosses
|
||||
* a page boundary
|
||||
*/
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (dir == DMA_NONE)
|
||||
return H_SUCCESS;
|
||||
|
||||
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||
if (ret)
|
||||
iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long ua,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
long ret;
|
||||
unsigned long hpa = 0;
|
||||
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
|
||||
if (!pua)
|
||||
/* it_userspace allocation might be delayed */
|
||||
return H_TOO_HARD;
|
||||
|
||||
mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
|
||||
if (!mem)
|
||||
return H_TOO_HARD;
|
||||
|
||||
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
|
||||
return H_HARDWARE;
|
||||
|
||||
pua = (void *) vmalloc_to_phys(pua);
|
||||
if (WARN_ON_ONCE_RM(!pua))
|
||||
return H_HARDWARE;
|
||||
|
||||
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
|
||||
return H_CLOSED;
|
||||
|
||||
ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
|
||||
if (ret) {
|
||||
mm_iommu_mapped_dec(mem);
|
||||
/*
|
||||
* real mode xchg can fail if struct page crosses
|
||||
* a page boundary
|
||||
*/
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
|
||||
if (dir != DMA_NONE)
|
||||
kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
|
||||
|
||||
*pua = ua;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba, unsigned long tce)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
long ret;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
unsigned long entry, ua = 0;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
|
||||
/* liobn, ioba, tce); */
|
||||
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
@ -199,7 +313,32 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
if (ret != H_SUCCESS)
|
||||
return ret;
|
||||
|
||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
|
||||
dir = iommu_tce_direction(tce);
|
||||
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
entry = ioba >> stt->page_shift;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
if (dir == DMA_NONE)
|
||||
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
|
||||
stit->tbl, entry);
|
||||
else
|
||||
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
|
||||
stit->tbl, entry, ua, dir);
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry, tce);
|
||||
|
||||
return H_SUCCESS;
|
||||
}
|
||||
@ -239,8 +378,10 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
long i, ret = H_SUCCESS;
|
||||
unsigned long tces, entry, ua = 0;
|
||||
unsigned long *rmap = NULL;
|
||||
bool prereg = false;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
|
||||
stt = kvmppc_find_table(vcpu, liobn);
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
@ -259,23 +400,49 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
if (ret != H_SUCCESS)
|
||||
return ret;
|
||||
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||
return H_TOO_HARD;
|
||||
if (mm_iommu_preregistered(vcpu->kvm->mm)) {
|
||||
/*
|
||||
* We get here if guest memory was pre-registered which
|
||||
* is normally VFIO case and gpa->hpa translation does not
|
||||
* depend on hpt.
|
||||
*/
|
||||
struct mm_iommu_table_group_mem_t *mem;
|
||||
|
||||
rmap = (void *) vmalloc_to_phys(rmap);
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
|
||||
return H_TOO_HARD;
|
||||
|
||||
/*
|
||||
* Synchronize with the MMU notifier callbacks in
|
||||
* book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
|
||||
* While we have the rmap lock, code running on other CPUs
|
||||
* cannot finish unmapping the host real page that backs
|
||||
* this guest real page, so we are OK to access the host
|
||||
* real page.
|
||||
*/
|
||||
lock_rmap(rmap);
|
||||
if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
|
||||
ret = H_TOO_HARD;
|
||||
goto unlock_exit;
|
||||
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
|
||||
if (mem)
|
||||
prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
|
||||
}
|
||||
|
||||
if (!prereg) {
|
||||
/*
|
||||
* This is usually a case of a guest with emulated devices only
|
||||
* when TCE list is not in preregistered memory.
|
||||
* We do not require memory to be preregistered in this case
|
||||
* so lock rmap and do __find_linux_pte_or_hugepte().
|
||||
*/
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
|
||||
return H_TOO_HARD;
|
||||
|
||||
rmap = (void *) vmalloc_to_phys(rmap);
|
||||
if (WARN_ON_ONCE_RM(!rmap))
|
||||
return H_HARDWARE;
|
||||
|
||||
/*
|
||||
* Synchronize with the MMU notifier callbacks in
|
||||
* book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
|
||||
* While we have the rmap lock, code running on other CPUs
|
||||
* cannot finish unmapping the host real page that backs
|
||||
* this guest real page, so we are OK to access the host
|
||||
* real page.
|
||||
*/
|
||||
lock_rmap(rmap);
|
||||
if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
|
||||
ret = H_TOO_HARD;
|
||||
goto unlock_exit;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
@ -285,11 +452,33 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
|
||||
if (ret != H_SUCCESS)
|
||||
goto unlock_exit;
|
||||
|
||||
ua = 0;
|
||||
if (kvmppc_gpa_to_ua(vcpu->kvm,
|
||||
tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
|
||||
&ua, NULL))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
|
||||
stit->tbl, entry + i, ua,
|
||||
iommu_tce_direction(tce));
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
goto unlock_exit;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
|
||||
kvmppc_tce_put(stt, entry + i, tce);
|
||||
}
|
||||
|
||||
unlock_exit:
|
||||
unlock_rmap(rmap);
|
||||
if (rmap)
|
||||
unlock_rmap(rmap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -300,8 +489,9 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
long i, ret;
|
||||
struct kvmppc_spapr_tce_iommu_table *stit;
|
||||
|
||||
stt = kvmppc_find_table(vcpu, liobn);
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
@ -313,6 +503,24 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
|
||||
return H_PARAMETER;
|
||||
|
||||
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
|
||||
unsigned long entry = ioba >> stit->tbl->it_page_shift;
|
||||
|
||||
for (i = 0; i < npages; ++i) {
|
||||
ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
|
||||
stit->tbl, entry + i);
|
||||
|
||||
if (ret == H_SUCCESS)
|
||||
continue;
|
||||
|
||||
if (ret == H_TOO_HARD)
|
||||
return ret;
|
||||
|
||||
WARN_ON_ONCE_RM(1);
|
||||
kvmppc_rm_clear_tce(stit->tbl, entry);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
|
||||
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
|
||||
|
||||
@ -322,12 +530,13 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
|
||||
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
long ret;
|
||||
unsigned long idx;
|
||||
struct page *page;
|
||||
u64 *tbl;
|
||||
|
||||
stt = kvmppc_find_table(vcpu->kvm, liobn);
|
||||
if (!stt)
|
||||
return H_TOO_HARD;
|
||||
|
||||
|
@ -503,10 +503,18 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
|
||||
break;
|
||||
unprivileged:
|
||||
default:
|
||||
printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
|
||||
#ifndef DEBUG_SPR
|
||||
emulated = EMULATE_FAIL;
|
||||
#endif
|
||||
pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
|
||||
if (sprn & 0x10) {
|
||||
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
||||
emulated = EMULATE_AGAIN;
|
||||
}
|
||||
} else {
|
||||
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
emulated = EMULATE_AGAIN;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -648,10 +656,20 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
|
||||
break;
|
||||
default:
|
||||
unprivileged:
|
||||
printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
|
||||
#ifndef DEBUG_SPR
|
||||
emulated = EMULATE_FAIL;
|
||||
#endif
|
||||
pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
|
||||
if (sprn & 0x10) {
|
||||
if (kvmppc_get_msr(vcpu) & MSR_PR) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
|
||||
emulated = EMULATE_AGAIN;
|
||||
}
|
||||
} else {
|
||||
if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
|
||||
sprn == 4 || sprn == 5 || sprn == 6) {
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
emulated = EMULATE_AGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3624,11 +3624,9 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||
return -EIO;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
if (!kvm->arch.pimap)
|
||||
goto unlock;
|
||||
|
||||
if (kvm->arch.pimap == NULL) {
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
}
|
||||
pimap = kvm->arch.pimap;
|
||||
|
||||
for (i = 0; i < pimap->n_mapped; i++) {
|
||||
@ -3650,7 +3648,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||
* We don't free this structure even when the count goes to
|
||||
* zero. The structure is freed when we destroy the VM.
|
||||
*/
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&kvm->lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -349,7 +349,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
|
||||
if (msr & MSR_POW) {
|
||||
if (!vcpu->arch.pending_exceptions) {
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
|
||||
/* Unset POW bit after we woke up */
|
||||
@ -537,8 +537,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int r = RESUME_GUEST;
|
||||
int relocated;
|
||||
int page_found = 0;
|
||||
struct kvmppc_pte pte;
|
||||
bool is_mmio = false;
|
||||
struct kvmppc_pte pte = { 0 };
|
||||
bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
|
||||
bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
|
||||
u64 vsid;
|
||||
@ -616,8 +615,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
/* Page not found in guest SLB */
|
||||
kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
|
||||
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
|
||||
} else if (!is_mmio &&
|
||||
kvmppc_visible_gpa(vcpu, pte.raddr)) {
|
||||
} else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
|
||||
if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
|
||||
/*
|
||||
* There is already a host HPTE there, presumably
|
||||
@ -627,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
kvmppc_mmu_unmap_page(vcpu, &pte);
|
||||
}
|
||||
/* The guest's PTE is not mapped yet. Map on the host */
|
||||
kvmppc_mmu_map_page(vcpu, &pte, iswrite);
|
||||
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
|
||||
/* Exit KVM if mapping failed */
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return RESUME_HOST;
|
||||
}
|
||||
if (data)
|
||||
vcpu->stat.sp_storage++;
|
||||
else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
|
||||
|
@ -344,7 +344,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
|
||||
case H_CEDE:
|
||||
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
vcpu->stat.halt_wakeup++;
|
||||
return EMULATE_DONE;
|
||||
case H_LOGICAL_CI_LOAD:
|
||||
|
@ -300,6 +300,11 @@ void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
|
||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
|
||||
}
|
||||
|
||||
void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
|
||||
}
|
||||
|
||||
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
|
||||
@ -579,7 +584,7 @@ static void arm_next_watchdog(struct kvm_vcpu *vcpu)
|
||||
* userspace, so clear the KVM_REQ_WATCHDOG request.
|
||||
*/
|
||||
if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
|
||||
clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
|
||||
|
||||
spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
|
||||
nr_jiffies = watchdog_next_timeout(vcpu);
|
||||
@ -690,7 +695,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.shared->msr & MSR_WE) {
|
||||
local_irq_enable();
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
hard_irq_disable();
|
||||
|
||||
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
|
||||
|
@ -797,9 +797,8 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
host_tlb_params[0].sets =
|
||||
host_tlb_params[0].entries / host_tlb_params[0].ways;
|
||||
host_tlb_params[1].sets = 1;
|
||||
|
||||
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
|
||||
host_tlb_params[1].entries,
|
||||
vcpu_e500->h2g_tlb1_rmap = kcalloc(host_tlb_params[1].entries,
|
||||
sizeof(*vcpu_e500->h2g_tlb1_rmap),
|
||||
GFP_KERNEL);
|
||||
if (!vcpu_e500->h2g_tlb1_rmap)
|
||||
return -EINVAL;
|
||||
|
@ -259,10 +259,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
|
||||
case OP_31_XOP_MFSPR:
|
||||
emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
|
||||
if (emulated == EMULATE_AGAIN) {
|
||||
emulated = EMULATE_DONE;
|
||||
advance = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case OP_31_XOP_MTSPR:
|
||||
emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
|
||||
if (emulated == EMULATE_AGAIN) {
|
||||
emulated = EMULATE_DONE;
|
||||
advance = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case OP_31_XOP_TLBSYNC:
|
||||
|
@ -34,18 +34,38 @@
|
||||
#include "timing.h"
|
||||
#include "trace.h"
|
||||
|
||||
/* XXX to do:
|
||||
* lhax
|
||||
* lhaux
|
||||
* lswx
|
||||
* lswi
|
||||
* stswx
|
||||
* stswi
|
||||
* lha
|
||||
* lhau
|
||||
* lmw
|
||||
* stmw
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
||||
kvmppc_core_queue_fpunavail(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_PPC_FPU */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
||||
kvmppc_core_queue_vsx_unavail(vcpu);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
/*
|
||||
* XXX to do:
|
||||
* lfiwax, lfiwzx
|
||||
* vector loads and stores
|
||||
*
|
||||
* Instructions that trap when used on cache-inhibited mappings
|
||||
* are not emulated here: multiple and string instructions,
|
||||
* lq/stq, and the load-reserve/store-conditional instructions.
|
||||
*/
|
||||
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -66,6 +86,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
rs = get_rs(inst);
|
||||
rt = get_rt(inst);
|
||||
|
||||
/*
|
||||
* if mmio_vsx_tx_sx_enabled == 0, copy data between
|
||||
* VSR[0..31] and memory
|
||||
* if mmio_vsx_tx_sx_enabled == 1, copy data between
|
||||
* VSR[32..63] and memory
|
||||
*/
|
||||
vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
|
||||
vcpu->arch.mmio_vsx_copy_nums = 0;
|
||||
vcpu->arch.mmio_vsx_offset = 0;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
|
||||
vcpu->arch.mmio_sp64_extend = 0;
|
||||
vcpu->arch.mmio_sign_extend = 0;
|
||||
|
||||
switch (get_op(inst)) {
|
||||
case 31:
|
||||
switch (get_xop(inst)) {
|
||||
@ -73,6 +106,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LWZUX:
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LBZX:
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
|
||||
break;
|
||||
@ -82,22 +120,36 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STDX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STDUX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STWX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STWUX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STBX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STBUX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
@ -105,6 +157,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHAUX:
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHZX:
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
|
||||
break;
|
||||
@ -116,14 +173,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
|
||||
case OP_31_XOP_STHX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STHUX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
@ -143,8 +198,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
|
||||
case OP_31_XOP_STWBRX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 0);
|
||||
kvmppc_get_gpr(vcpu, rs), 4, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LHBRX:
|
||||
@ -153,10 +207,258 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
|
||||
case OP_31_XOP_STHBRX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 0);
|
||||
kvmppc_get_gpr(vcpu, rs), 2, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LDBRX:
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STDBRX:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs), 8, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LDX:
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LDUX:
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LWAX:
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LWAUX:
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
case OP_31_XOP_LFSX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LFSUX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LFDX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LFDUX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LFIWAX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_loads(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LFIWZX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STFSX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs), 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STFSUX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs), 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STFDX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs), 8, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STFDUX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs), 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STFIWX:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs), 4, 1);
|
||||
break;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
case OP_31_XOP_LXSDX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LXSSPX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LXSIWAX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 4, 1, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LXSIWZX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LXVD2X:
|
||||
/*
|
||||
* In this case, the official load/store process is like this:
|
||||
* Step1, exit from vm by page fault isr, then kvm save vsr.
|
||||
* Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
|
||||
* as reference.
|
||||
*
|
||||
* Step2, copy data between memory and VCPU
|
||||
* Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
|
||||
* 2copies*8bytes or 4copies*4bytes
|
||||
* to simulate one copy of 16bytes.
|
||||
* Also there is an endian issue here, we should notice the
|
||||
* layout of memory.
|
||||
* Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
|
||||
* If host is little-endian, kvm will call XXSWAPD for
|
||||
* LXVD2X_ROT/STXVD2X_ROT.
|
||||
* So, if host is little-endian,
|
||||
* the postion of memeory should be swapped.
|
||||
*
|
||||
* Step3, return to guest, kvm reset register.
|
||||
* Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
|
||||
* as reference.
|
||||
*/
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 2;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LXVW4X:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 4;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 4, 1, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_LXVDSX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type =
|
||||
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
||||
KVM_MMIO_REG_VSX|rt, 8, 1, 0);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STXSDX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
rs, 8, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STXSSPX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
rs, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STXSIWX:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_offset = 1;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 1;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
rs, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STXVD2X:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 2;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
rs, 8, 1);
|
||||
break;
|
||||
|
||||
case OP_31_XOP_STXVW4X:
|
||||
if (kvmppc_check_vsx_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_vsx_copy_nums = 4;
|
||||
vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
rs, 4, 1);
|
||||
break;
|
||||
#endif /* CONFIG_VSX */
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
break;
|
||||
@ -167,10 +469,60 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
|
||||
/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
case OP_STFS:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs),
|
||||
4, 1);
|
||||
break;
|
||||
|
||||
case OP_STFSU:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs),
|
||||
4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_STFD:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs),
|
||||
8, 1);
|
||||
break;
|
||||
|
||||
case OP_STFDU:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
VCPU_FPR(vcpu, rs),
|
||||
8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case OP_LD:
|
||||
rt = get_rt(inst);
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||
switch (inst & 3) {
|
||||
case 0: /* ld */
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||
break;
|
||||
case 1: /* ldu */
|
||||
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
case 2: /* lwa */
|
||||
emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
|
||||
break;
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
}
|
||||
break;
|
||||
|
||||
case OP_LWZU:
|
||||
@ -193,31 +545,37 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
4, 1);
|
||||
break;
|
||||
|
||||
/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
|
||||
case OP_STD:
|
||||
rs = get_rs(inst);
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
8, 1);
|
||||
switch (inst & 3) {
|
||||
case 0: /* std */
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||
break;
|
||||
case 1: /* stdu */
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs), 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
}
|
||||
break;
|
||||
|
||||
case OP_STWU:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
4, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_STB:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||
break;
|
||||
|
||||
case OP_STBU:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
1, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 1, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
@ -241,17 +599,49 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
||||
|
||||
case OP_STH:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||
break;
|
||||
|
||||
case OP_STHU:
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
kvmppc_get_gpr(vcpu, rs),
|
||||
2, 1);
|
||||
kvmppc_get_gpr(vcpu, rs), 2, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
case OP_LFS:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||
break;
|
||||
|
||||
case OP_LFSU:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
vcpu->arch.mmio_sp64_extend = 1;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 4, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
|
||||
case OP_LFD:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||
break;
|
||||
|
||||
case OP_LFDU:
|
||||
if (kvmppc_check_fp_disabled(vcpu))
|
||||
return EMULATE_DONE;
|
||||
emulated = kvmppc_handle_load(run, vcpu,
|
||||
KVM_MMIO_REG_FPR|rt, 8, 1);
|
||||
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
emulated = EMULATE_FAIL;
|
||||
break;
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include "timing.h"
|
||||
#include "irq.h"
|
||||
#include "../mm/mmu_decl.h"
|
||||
@ -232,7 +233,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
|
||||
case EV_HCALL_TOKEN(EV_IDLE):
|
||||
r = EV_SUCCESS;
|
||||
kvm_vcpu_block(vcpu);
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
break;
|
||||
default:
|
||||
r = EV_UNIMPLEMENTED;
|
||||
@ -524,11 +525,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
/* We support this only for PR */
|
||||
r = !hv_enabled;
|
||||
break;
|
||||
#ifdef CONFIG_KVM_MMIO
|
||||
case KVM_CAP_COALESCED_MMIO:
|
||||
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
case KVM_CAP_IRQ_MPIC:
|
||||
r = 1;
|
||||
@ -538,6 +534,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
case KVM_CAP_SPAPR_TCE:
|
||||
case KVM_CAP_SPAPR_TCE_64:
|
||||
/* fallthrough */
|
||||
case KVM_CAP_SPAPR_TCE_VFIO:
|
||||
case KVM_CAP_PPC_RTAS:
|
||||
case KVM_CAP_PPC_FIXUP_HCALL:
|
||||
case KVM_CAP_PPC_ENABLE_HCALL:
|
||||
@ -806,6 +804,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
|
||||
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
static inline int kvmppc_get_vsr_dword_offset(int index)
|
||||
{
|
||||
int offset;
|
||||
|
||||
if ((index != 0) && (index != 1))
|
||||
return -1;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
offset = index;
|
||||
#else
|
||||
offset = 1 - index;
|
||||
#endif
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static inline int kvmppc_get_vsr_word_offset(int index)
|
||||
{
|
||||
int offset;
|
||||
|
||||
if ((index > 3) || (index < 0))
|
||||
return -1;
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
offset = index;
|
||||
#else
|
||||
offset = 3 - index;
|
||||
#endif
|
||||
return offset;
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
|
||||
u64 gpr)
|
||||
{
|
||||
union kvmppc_one_reg val;
|
||||
int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
|
||||
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||
|
||||
if (offset == -1)
|
||||
return;
|
||||
|
||||
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||
val.vsxval[offset] = gpr;
|
||||
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||
} else {
|
||||
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
|
||||
u64 gpr)
|
||||
{
|
||||
union kvmppc_one_reg val;
|
||||
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||
|
||||
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||
val.vsxval[0] = gpr;
|
||||
val.vsxval[1] = gpr;
|
||||
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||
} else {
|
||||
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
|
||||
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
||||
u32 gpr32)
|
||||
{
|
||||
union kvmppc_one_reg val;
|
||||
int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
|
||||
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
||||
int dword_offset, word_offset;
|
||||
|
||||
if (offset == -1)
|
||||
return;
|
||||
|
||||
if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||
val.vval = VCPU_VSX_VR(vcpu, index);
|
||||
val.vsx32val[offset] = gpr32;
|
||||
VCPU_VSX_VR(vcpu, index) = val.vval;
|
||||
} else {
|
||||
dword_offset = offset / 2;
|
||||
word_offset = offset % 2;
|
||||
val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
|
||||
val.vsx32val[word_offset] = gpr32;
|
||||
VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
static inline u64 sp_to_dp(u32 fprs)
|
||||
{
|
||||
u64 fprd;
|
||||
|
||||
preempt_disable();
|
||||
enable_kernel_fp();
|
||||
asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
|
||||
: "fr0");
|
||||
preempt_enable();
|
||||
return fprd;
|
||||
}
|
||||
|
||||
static inline u32 dp_to_sp(u64 fprd)
|
||||
{
|
||||
u32 fprs;
|
||||
|
||||
preempt_disable();
|
||||
enable_kernel_fp();
|
||||
asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
|
||||
: "fr0");
|
||||
preempt_enable();
|
||||
return fprs;
|
||||
}
|
||||
|
||||
#else
|
||||
#define sp_to_dp(x) (x)
|
||||
#define dp_to_sp(x) (x)
|
||||
#endif /* CONFIG_PPC_FPU */
|
||||
|
||||
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
{
|
||||
@ -832,6 +953,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
/* conversion between single and double precision */
|
||||
if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
|
||||
gpr = sp_to_dp(gpr);
|
||||
|
||||
if (vcpu->arch.mmio_sign_extend) {
|
||||
switch (run->mmio.len) {
|
||||
#ifdef CONFIG_PPC64
|
||||
@ -848,8 +973,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
||||
|
||||
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
|
||||
case KVM_MMIO_REG_GPR:
|
||||
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
|
||||
@ -865,6 +988,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
|
||||
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
case KVM_MMIO_REG_VSX:
|
||||
if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
|
||||
kvmppc_set_vsr_dword(vcpu, gpr);
|
||||
else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
|
||||
kvmppc_set_vsr_word(vcpu, gpr);
|
||||
else if (vcpu->arch.mmio_vsx_copy_type ==
|
||||
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
|
||||
kvmppc_set_vsr_dword_dump(vcpu, gpr);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
BUG();
|
||||
@ -932,6 +1066,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int rt, unsigned int bytes,
|
||||
int is_default_endian, int mmio_sign_extend)
|
||||
{
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
|
||||
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
|
||||
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
|
||||
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
while (vcpu->arch.mmio_vsx_copy_nums) {
|
||||
emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
|
||||
is_default_endian, mmio_sign_extend);
|
||||
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
|
||||
vcpu->arch.mmio_vsx_copy_nums--;
|
||||
vcpu->arch.mmio_vsx_offset++;
|
||||
}
|
||||
return emulated;
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
u64 val, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
@ -957,6 +1120,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
vcpu->mmio_needed = 1;
|
||||
vcpu->mmio_is_write = 1;
|
||||
|
||||
if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
|
||||
val = dp_to_sp(val);
|
||||
|
||||
/* Store the value at the lowest bytes in 'data'. */
|
||||
if (!host_swabbed) {
|
||||
switch (bytes) {
|
||||
@ -990,6 +1156,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
||||
{
|
||||
u32 dword_offset, word_offset;
|
||||
union kvmppc_one_reg reg;
|
||||
int vsx_offset = 0;
|
||||
int copy_type = vcpu->arch.mmio_vsx_copy_type;
|
||||
int result = 0;
|
||||
|
||||
switch (copy_type) {
|
||||
case KVMPPC_VSX_COPY_DWORD:
|
||||
vsx_offset =
|
||||
kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
|
||||
|
||||
if (vsx_offset == -1) {
|
||||
result = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
|
||||
} else {
|
||||
reg.vval = VCPU_VSX_VR(vcpu, rs);
|
||||
*val = reg.vsxval[vsx_offset];
|
||||
}
|
||||
break;
|
||||
|
||||
case KVMPPC_VSX_COPY_WORD:
|
||||
vsx_offset =
|
||||
kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
|
||||
|
||||
if (vsx_offset == -1) {
|
||||
result = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
||||
dword_offset = vsx_offset / 2;
|
||||
word_offset = vsx_offset % 2;
|
||||
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
|
||||
*val = reg.vsx32val[word_offset];
|
||||
} else {
|
||||
reg.vval = VCPU_VSX_VR(vcpu, rs);
|
||||
*val = reg.vsx32val[vsx_offset];
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
result = -1;
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
int rs, unsigned int bytes, int is_default_endian)
|
||||
{
|
||||
u64 val;
|
||||
enum emulation_result emulated = EMULATE_DONE;
|
||||
|
||||
vcpu->arch.io_gpr = rs;
|
||||
|
||||
/* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
|
||||
if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
|
||||
(vcpu->arch.mmio_vsx_copy_nums < 0) ) {
|
||||
return EMULATE_FAIL;
|
||||
}
|
||||
|
||||
while (vcpu->arch.mmio_vsx_copy_nums) {
|
||||
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
|
||||
return EMULATE_FAIL;
|
||||
|
||||
emulated = kvmppc_handle_store(run, vcpu,
|
||||
val, bytes, is_default_endian);
|
||||
|
||||
if (emulated != EMULATE_DONE)
|
||||
break;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
|
||||
vcpu->arch.mmio_vsx_copy_nums--;
|
||||
vcpu->arch.mmio_vsx_offset++;
|
||||
}
|
||||
|
||||
return emulated;
|
||||
}
|
||||
|
||||
static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run)
|
||||
{
|
||||
enum emulation_result emulated = EMULATE_FAIL;
|
||||
int r;
|
||||
|
||||
vcpu->arch.paddr_accessed += run->mmio.len;
|
||||
|
||||
if (!vcpu->mmio_is_write) {
|
||||
emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
|
||||
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
|
||||
} else {
|
||||
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
||||
vcpu->arch.io_gpr, run->mmio.len, 1);
|
||||
}
|
||||
|
||||
switch (emulated) {
|
||||
case EMULATE_DO_MMIO:
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
r = RESUME_HOST;
|
||||
break;
|
||||
case EMULATE_FAIL:
|
||||
pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
|
||||
r = RESUME_HOST;
|
||||
break;
|
||||
default:
|
||||
r = RESUME_GUEST;
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
|
||||
{
|
||||
int r = 0;
|
||||
@ -1092,13 +1381,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int r;
|
||||
sigset_t sigsaved;
|
||||
|
||||
if (vcpu->sigset_active)
|
||||
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
||||
|
||||
if (vcpu->mmio_needed) {
|
||||
vcpu->mmio_needed = 0;
|
||||
if (!vcpu->mmio_is_write)
|
||||
kvmppc_complete_mmio_load(vcpu, run);
|
||||
vcpu->mmio_needed = 0;
|
||||
#ifdef CONFIG_VSX
|
||||
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
|
||||
vcpu->arch.mmio_vsx_copy_nums--;
|
||||
vcpu->arch.mmio_vsx_offset++;
|
||||
}
|
||||
|
||||
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
|
||||
r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
|
||||
if (r == RESUME_HOST) {
|
||||
vcpu->mmio_needed = 1;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
} else if (vcpu->arch.osi_needed) {
|
||||
u64 *gprs = run->osi.gprs;
|
||||
int i;
|
||||
@ -1120,6 +1420,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
#endif
|
||||
}
|
||||
|
||||
if (vcpu->sigset_active)
|
||||
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
|
||||
|
||||
if (run->immediate_exit)
|
||||
r = -EINTR;
|
||||
else
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/isc.h>
|
||||
#include <asm/guarded_storage.h>
|
||||
|
||||
#define KVM_S390_BSCA_CPU_SLOTS 64
|
||||
#define KVM_S390_ESCA_CPU_SLOTS 248
|
||||
@ -121,6 +122,7 @@ struct esca_block {
|
||||
#define CPUSTAT_SLSR 0x00002000
|
||||
#define CPUSTAT_ZARCH 0x00000800
|
||||
#define CPUSTAT_MCDS 0x00000100
|
||||
#define CPUSTAT_KSS 0x00000200
|
||||
#define CPUSTAT_SM 0x00000080
|
||||
#define CPUSTAT_IBS 0x00000040
|
||||
#define CPUSTAT_GED2 0x00000010
|
||||
@ -164,16 +166,27 @@ struct kvm_s390_sie_block {
|
||||
#define ICTL_RRBE 0x00001000
|
||||
#define ICTL_TPROT 0x00000200
|
||||
__u32 ictl; /* 0x0048 */
|
||||
#define ECA_CEI 0x80000000
|
||||
#define ECA_IB 0x40000000
|
||||
#define ECA_SIGPI 0x10000000
|
||||
#define ECA_MVPGI 0x01000000
|
||||
#define ECA_VX 0x00020000
|
||||
#define ECA_PROTEXCI 0x00002000
|
||||
#define ECA_SII 0x00000001
|
||||
__u32 eca; /* 0x004c */
|
||||
#define ICPT_INST 0x04
|
||||
#define ICPT_PROGI 0x08
|
||||
#define ICPT_INSTPROGI 0x0C
|
||||
#define ICPT_EXTREQ 0x10
|
||||
#define ICPT_EXTINT 0x14
|
||||
#define ICPT_IOREQ 0x18
|
||||
#define ICPT_WAIT 0x1c
|
||||
#define ICPT_VALIDITY 0x20
|
||||
#define ICPT_STOP 0x28
|
||||
#define ICPT_OPEREXC 0x2C
|
||||
#define ICPT_PARTEXEC 0x38
|
||||
#define ICPT_IOINST 0x40
|
||||
#define ICPT_KSS 0x5c
|
||||
__u8 icptcode; /* 0x0050 */
|
||||
__u8 icptstatus; /* 0x0051 */
|
||||
__u16 ihcpu; /* 0x0052 */
|
||||
@ -182,10 +195,19 @@ struct kvm_s390_sie_block {
|
||||
__u32 ipb; /* 0x0058 */
|
||||
__u32 scaoh; /* 0x005c */
|
||||
__u8 reserved60; /* 0x0060 */
|
||||
#define ECB_GS 0x40
|
||||
#define ECB_TE 0x10
|
||||
#define ECB_SRSI 0x04
|
||||
#define ECB_HOSTPROTINT 0x02
|
||||
__u8 ecb; /* 0x0061 */
|
||||
#define ECB2_CMMA 0x80
|
||||
#define ECB2_IEP 0x20
|
||||
#define ECB2_PFMFI 0x08
|
||||
#define ECB2_ESCA 0x04
|
||||
__u8 ecb2; /* 0x0062 */
|
||||
#define ECB3_AES 0x04
|
||||
#define ECB3_DEA 0x08
|
||||
#define ECB3_AES 0x04
|
||||
#define ECB3_RI 0x01
|
||||
__u8 ecb3; /* 0x0063 */
|
||||
__u32 scaol; /* 0x0064 */
|
||||
__u8 reserved68[4]; /* 0x0068 */
|
||||
@ -219,11 +241,14 @@ struct kvm_s390_sie_block {
|
||||
__u32 crycbd; /* 0x00fc */
|
||||
__u64 gcr[16]; /* 0x0100 */
|
||||
__u64 gbea; /* 0x0180 */
|
||||
__u8 reserved188[24]; /* 0x0188 */
|
||||
__u8 reserved188[8]; /* 0x0188 */
|
||||
__u64 sdnxo; /* 0x0190 */
|
||||
__u8 reserved198[8]; /* 0x0198 */
|
||||
__u32 fac; /* 0x01a0 */
|
||||
__u8 reserved1a4[20]; /* 0x01a4 */
|
||||
__u64 cbrlo; /* 0x01b8 */
|
||||
__u8 reserved1c0[8]; /* 0x01c0 */
|
||||
#define ECD_HOSTREGMGMT 0x20000000
|
||||
__u32 ecd; /* 0x01c8 */
|
||||
__u8 reserved1cc[18]; /* 0x01cc */
|
||||
__u64 pp; /* 0x01de */
|
||||
@ -498,6 +523,12 @@ struct kvm_s390_local_interrupt {
|
||||
#define FIRQ_CNTR_PFAULT 3
|
||||
#define FIRQ_MAX_COUNT 4
|
||||
|
||||
/* mask the AIS mode for a given ISC */
|
||||
#define AIS_MODE_MASK(isc) (0x80 >> isc)
|
||||
|
||||
#define KVM_S390_AIS_MODE_ALL 0
|
||||
#define KVM_S390_AIS_MODE_SINGLE 1
|
||||
|
||||
struct kvm_s390_float_interrupt {
|
||||
unsigned long pending_irqs;
|
||||
spinlock_t lock;
|
||||
@ -507,6 +538,10 @@ struct kvm_s390_float_interrupt {
|
||||
struct kvm_s390_ext_info srv_signal;
|
||||
int next_rr_cpu;
|
||||
unsigned long idle_mask[BITS_TO_LONGS(KVM_MAX_VCPUS)];
|
||||
struct mutex ais_lock;
|
||||
u8 simm;
|
||||
u8 nimm;
|
||||
int ais_enabled;
|
||||
};
|
||||
|
||||
struct kvm_hw_wp_info_arch {
|
||||
@ -554,6 +589,7 @@ struct kvm_vcpu_arch {
|
||||
/* if vsie is active, currently executed shadow sie control block */
|
||||
struct kvm_s390_sie_block *vsie_block;
|
||||
unsigned int host_acrs[NUM_ACRS];
|
||||
struct gs_cb *host_gscb;
|
||||
struct fpu host_fpregs;
|
||||
struct kvm_s390_local_interrupt local_int;
|
||||
struct hrtimer ckc_timer;
|
||||
@ -574,6 +610,7 @@ struct kvm_vcpu_arch {
|
||||
*/
|
||||
seqcount_t cputm_seqcount;
|
||||
__u64 cputm_start;
|
||||
bool gs_enabled;
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
@ -596,6 +633,7 @@ struct s390_io_adapter {
|
||||
bool maskable;
|
||||
bool masked;
|
||||
bool swap;
|
||||
bool suppressible;
|
||||
struct rw_semaphore maps_lock;
|
||||
struct list_head maps;
|
||||
atomic_t nr_maps;
|
||||
|
@ -75,6 +75,7 @@ struct sclp_info {
|
||||
unsigned char has_pfmfi : 1;
|
||||
unsigned char has_ibs : 1;
|
||||
unsigned char has_skey : 1;
|
||||
unsigned char has_kss : 1;
|
||||
unsigned int ibc;
|
||||
unsigned int mtid;
|
||||
unsigned int mtid_cp;
|
||||
|
@ -26,6 +26,8 @@
|
||||
#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
|
||||
#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
|
||||
#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
|
||||
#define KVM_DEV_FLIC_AISM 9
|
||||
#define KVM_DEV_FLIC_AIRQ_INJECT 10
|
||||
/*
|
||||
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
|
||||
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
|
||||
@ -41,7 +43,14 @@ struct kvm_s390_io_adapter {
|
||||
__u8 isc;
|
||||
__u8 maskable;
|
||||
__u8 swap;
|
||||
__u8 pad;
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
#define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01
|
||||
|
||||
struct kvm_s390_ais_req {
|
||||
__u8 isc;
|
||||
__u16 mode;
|
||||
};
|
||||
|
||||
#define KVM_S390_IO_ADAPTER_MASK 1
|
||||
@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine {
|
||||
#define KVM_S390_VM_CPU_FEAT_CMMA 10
|
||||
#define KVM_S390_VM_CPU_FEAT_PFMFI 11
|
||||
#define KVM_S390_VM_CPU_FEAT_SIGPIF 12
|
||||
#define KVM_S390_VM_CPU_FEAT_KSS 13
|
||||
struct kvm_s390_vm_cpu_feat {
|
||||
__u64 feat[16];
|
||||
};
|
||||
@ -131,7 +141,8 @@ struct kvm_s390_vm_cpu_subfunc {
|
||||
__u8 kmo[16]; /* with MSA4 */
|
||||
__u8 pcc[16]; /* with MSA4 */
|
||||
__u8 ppno[16]; /* with MSA5 */
|
||||
__u8 reserved[1824];
|
||||
__u8 kma[16]; /* with MSA8 */
|
||||
__u8 reserved[1808];
|
||||
};
|
||||
|
||||
/* kvm attributes for crypto */
|
||||
@ -197,6 +208,10 @@ struct kvm_guest_debug_arch {
|
||||
#define KVM_SYNC_VRS (1UL << 6)
|
||||
#define KVM_SYNC_RICCB (1UL << 7)
|
||||
#define KVM_SYNC_FPRS (1UL << 8)
|
||||
#define KVM_SYNC_GSCB (1UL << 9)
|
||||
/* length and alignment of the sdnx as a power of two */
|
||||
#define SDNXC 8
|
||||
#define SDNXL (1UL << SDNXC)
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
__u64 prefix; /* prefix register */
|
||||
@ -217,8 +232,16 @@ struct kvm_sync_regs {
|
||||
};
|
||||
__u8 reserved[512]; /* for future vector expansion */
|
||||
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
|
||||
__u8 padding[52]; /* riccb needs to be 64byte aligned */
|
||||
__u8 padding1[52]; /* riccb needs to be 64byte aligned */
|
||||
__u8 riccb[64]; /* runtime instrumentation controls block */
|
||||
__u8 padding2[192]; /* sdnx needs to be 256byte aligned */
|
||||
union {
|
||||
__u8 sdnx[SDNXL]; /* state description annex */
|
||||
struct {
|
||||
__u64 reserved1[2];
|
||||
__u64 gscb[4];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
|
||||
|
@ -261,7 +261,7 @@ struct aste {
|
||||
|
||||
int ipte_lock_held(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->eca & 1) {
|
||||
if (vcpu->arch.sie_block->eca & ECA_SII) {
|
||||
int rc;
|
||||
|
||||
read_lock(&vcpu->kvm->arch.sca_lock);
|
||||
@ -360,7 +360,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
|
||||
|
||||
void ipte_lock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->eca & 1)
|
||||
if (vcpu->arch.sie_block->eca & ECA_SII)
|
||||
ipte_lock_siif(vcpu);
|
||||
else
|
||||
ipte_lock_simple(vcpu);
|
||||
@ -368,7 +368,7 @@ void ipte_lock(struct kvm_vcpu *vcpu)
|
||||
|
||||
void ipte_unlock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->eca & 1)
|
||||
if (vcpu->arch.sie_block->eca & ECA_SII)
|
||||
ipte_unlock_siif(vcpu);
|
||||
else
|
||||
ipte_unlock_simple(vcpu);
|
||||
|
@ -35,6 +35,7 @@ static const intercept_handler_t instruction_handlers[256] = {
|
||||
[0xb6] = kvm_s390_handle_stctl,
|
||||
[0xb7] = kvm_s390_handle_lctl,
|
||||
[0xb9] = kvm_s390_handle_b9,
|
||||
[0xe3] = kvm_s390_handle_e3,
|
||||
[0xe5] = kvm_s390_handle_e5,
|
||||
[0xeb] = kvm_s390_handle_eb,
|
||||
};
|
||||
@ -368,8 +369,7 @@ static int handle_operexc(struct kvm_vcpu *vcpu)
|
||||
trace_kvm_s390_handle_operexc(vcpu, vcpu->arch.sie_block->ipa,
|
||||
vcpu->arch.sie_block->ipb);
|
||||
|
||||
if (vcpu->arch.sie_block->ipa == 0xb256 &&
|
||||
test_kvm_facility(vcpu->kvm, 74))
|
||||
if (vcpu->arch.sie_block->ipa == 0xb256)
|
||||
return handle_sthyi(vcpu);
|
||||
|
||||
if (vcpu->arch.sie_block->ipa == 0 && vcpu->kvm->arch.user_instr0)
|
||||
@ -404,28 +404,31 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch (vcpu->arch.sie_block->icptcode) {
|
||||
case 0x10:
|
||||
case 0x18:
|
||||
case ICPT_EXTREQ:
|
||||
case ICPT_IOREQ:
|
||||
return handle_noop(vcpu);
|
||||
case 0x04:
|
||||
case ICPT_INST:
|
||||
rc = handle_instruction(vcpu);
|
||||
break;
|
||||
case 0x08:
|
||||
case ICPT_PROGI:
|
||||
return handle_prog(vcpu);
|
||||
case 0x14:
|
||||
case ICPT_EXTINT:
|
||||
return handle_external_interrupt(vcpu);
|
||||
case 0x1c:
|
||||
case ICPT_WAIT:
|
||||
return kvm_s390_handle_wait(vcpu);
|
||||
case 0x20:
|
||||
case ICPT_VALIDITY:
|
||||
return handle_validity(vcpu);
|
||||
case 0x28:
|
||||
case ICPT_STOP:
|
||||
return handle_stop(vcpu);
|
||||
case 0x2c:
|
||||
case ICPT_OPEREXC:
|
||||
rc = handle_operexc(vcpu);
|
||||
break;
|
||||
case 0x38:
|
||||
case ICPT_PARTEXEC:
|
||||
rc = handle_partial_execution(vcpu);
|
||||
break;
|
||||
case ICPT_KSS:
|
||||
rc = kvm_s390_skey_check_enable(vcpu);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
@ -410,6 +410,7 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
struct kvm_s390_mchk_info *mchk)
|
||||
{
|
||||
unsigned long ext_sa_addr;
|
||||
unsigned long lc;
|
||||
freg_t fprs[NUM_FPRS];
|
||||
union mci mci;
|
||||
int rc;
|
||||
@ -418,12 +419,34 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
/* take care of lazy register loading */
|
||||
save_fpu_regs();
|
||||
save_access_regs(vcpu->run->s.regs.acrs);
|
||||
if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
|
||||
save_gs_cb(current->thread.gs_cb);
|
||||
|
||||
/* Extended save area */
|
||||
rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
|
||||
sizeof(unsigned long));
|
||||
/* Only bits 0-53 are used for address formation */
|
||||
ext_sa_addr &= ~0x3ffUL;
|
||||
/* Only bits 0 through 63-LC are used for address formation */
|
||||
lc = ext_sa_addr & MCESA_LC_MASK;
|
||||
if (test_kvm_facility(vcpu->kvm, 133)) {
|
||||
switch (lc) {
|
||||
case 0:
|
||||
case 10:
|
||||
ext_sa_addr &= ~0x3ffUL;
|
||||
break;
|
||||
case 11:
|
||||
ext_sa_addr &= ~0x7ffUL;
|
||||
break;
|
||||
case 12:
|
||||
ext_sa_addr &= ~0xfffUL;
|
||||
break;
|
||||
default:
|
||||
ext_sa_addr = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ext_sa_addr &= ~0x3ffUL;
|
||||
}
|
||||
|
||||
if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
|
||||
if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
|
||||
512))
|
||||
@ -431,6 +454,14 @@ static int __write_machine_check(struct kvm_vcpu *vcpu,
|
||||
} else {
|
||||
mci.vr = 0;
|
||||
}
|
||||
if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
|
||||
&& (lc == 11 || lc == 12)) {
|
||||
if (write_guest_abs(vcpu, ext_sa_addr + 1024,
|
||||
&vcpu->run->s.regs.gscb, 32))
|
||||
mci.gs = 0;
|
||||
} else {
|
||||
mci.gs = 0;
|
||||
}
|
||||
|
||||
/* General interruption information */
|
||||
rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
|
||||
@ -1968,6 +1999,8 @@ static int register_io_adapter(struct kvm_device *dev,
|
||||
adapter->maskable = adapter_info.maskable;
|
||||
adapter->masked = false;
|
||||
adapter->swap = adapter_info.swap;
|
||||
adapter->suppressible = (adapter_info.flags) &
|
||||
KVM_S390_ADAPTER_SUPPRESSIBLE;
|
||||
dev->kvm->arch.adapters[adapter->id] = adapter;
|
||||
|
||||
return 0;
|
||||
@ -2121,6 +2154,87 @@ static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
|
||||
struct kvm_s390_ais_req req;
|
||||
int ret = 0;
|
||||
|
||||
if (!fi->ais_enabled)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
|
||||
return -EFAULT;
|
||||
|
||||
if (req.isc > MAX_ISC)
|
||||
return -EINVAL;
|
||||
|
||||
trace_kvm_s390_modify_ais_mode(req.isc,
|
||||
(fi->simm & AIS_MODE_MASK(req.isc)) ?
|
||||
(fi->nimm & AIS_MODE_MASK(req.isc)) ?
|
||||
2 : KVM_S390_AIS_MODE_SINGLE :
|
||||
KVM_S390_AIS_MODE_ALL, req.mode);
|
||||
|
||||
mutex_lock(&fi->ais_lock);
|
||||
switch (req.mode) {
|
||||
case KVM_S390_AIS_MODE_ALL:
|
||||
fi->simm &= ~AIS_MODE_MASK(req.isc);
|
||||
fi->nimm &= ~AIS_MODE_MASK(req.isc);
|
||||
break;
|
||||
case KVM_S390_AIS_MODE_SINGLE:
|
||||
fi->simm |= AIS_MODE_MASK(req.isc);
|
||||
fi->nimm &= ~AIS_MODE_MASK(req.isc);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
mutex_unlock(&fi->ais_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_s390_inject_airq(struct kvm *kvm,
|
||||
struct s390_io_adapter *adapter)
|
||||
{
|
||||
struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
|
||||
struct kvm_s390_interrupt s390int = {
|
||||
.type = KVM_S390_INT_IO(1, 0, 0, 0),
|
||||
.parm = 0,
|
||||
.parm64 = (adapter->isc << 27) | 0x80000000,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
if (!fi->ais_enabled || !adapter->suppressible)
|
||||
return kvm_s390_inject_vm(kvm, &s390int);
|
||||
|
||||
mutex_lock(&fi->ais_lock);
|
||||
if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
|
||||
trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = kvm_s390_inject_vm(kvm, &s390int);
|
||||
if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
|
||||
fi->nimm |= AIS_MODE_MASK(adapter->isc);
|
||||
trace_kvm_s390_modify_ais_mode(adapter->isc,
|
||||
KVM_S390_AIS_MODE_SINGLE, 2);
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&fi->ais_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
unsigned int id = attr->attr;
|
||||
struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
|
||||
|
||||
if (!adapter)
|
||||
return -EINVAL;
|
||||
|
||||
return kvm_s390_inject_airq(kvm, adapter);
|
||||
}
|
||||
|
||||
static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
||||
{
|
||||
int r = 0;
|
||||
@ -2157,6 +2271,12 @@ static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
||||
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
|
||||
r = clear_io_irq(dev->kvm, attr);
|
||||
break;
|
||||
case KVM_DEV_FLIC_AISM:
|
||||
r = modify_ais_mode(dev->kvm, attr);
|
||||
break;
|
||||
case KVM_DEV_FLIC_AIRQ_INJECT:
|
||||
r = flic_inject_airq(dev->kvm, attr);
|
||||
break;
|
||||
default:
|
||||
r = -EINVAL;
|
||||
}
|
||||
@ -2176,6 +2296,8 @@ static int flic_has_attr(struct kvm_device *dev,
|
||||
case KVM_DEV_FLIC_ADAPTER_REGISTER:
|
||||
case KVM_DEV_FLIC_ADAPTER_MODIFY:
|
||||
case KVM_DEV_FLIC_CLEAR_IO_IRQ:
|
||||
case KVM_DEV_FLIC_AISM:
|
||||
case KVM_DEV_FLIC_AIRQ_INJECT:
|
||||
return 0;
|
||||
}
|
||||
return -ENXIO;
|
||||
@ -2286,12 +2408,7 @@ static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
|
||||
ret = adapter_indicators_set(kvm, adapter, &e->adapter);
|
||||
up_read(&adapter->maps_lock);
|
||||
if ((ret > 0) && !adapter->masked) {
|
||||
struct kvm_s390_interrupt s390int = {
|
||||
.type = KVM_S390_INT_IO(1, 0, 0, 0),
|
||||
.parm = 0,
|
||||
.parm64 = (adapter->isc << 27) | 0x80000000,
|
||||
};
|
||||
ret = kvm_s390_inject_vm(kvm, &s390int);
|
||||
ret = kvm_s390_inject_airq(kvm, adapter);
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -276,6 +276,10 @@ static void kvm_s390_cpu_feat_init(void)
|
||||
__cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
|
||||
kvm_s390_available_subfunc.ppno);
|
||||
|
||||
if (test_facility(146)) /* MSA8 */
|
||||
__cpacf_query(CPACF_KMA, (cpacf_mask_t *)
|
||||
kvm_s390_available_subfunc.kma);
|
||||
|
||||
if (MACHINE_HAS_ESOP)
|
||||
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
|
||||
/*
|
||||
@ -300,6 +304,8 @@ static void kvm_s390_cpu_feat_init(void)
|
||||
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
|
||||
if (sclp.has_ibs)
|
||||
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
|
||||
if (sclp.has_kss)
|
||||
allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
|
||||
/*
|
||||
* KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
|
||||
* all skey handling functions read/set the skey from the PGSTE
|
||||
@ -380,6 +386,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_S390_SKEYS:
|
||||
case KVM_CAP_S390_IRQ_STATE:
|
||||
case KVM_CAP_S390_USER_INSTR0:
|
||||
case KVM_CAP_S390_AIS:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_S390_MEM_OP:
|
||||
@ -405,6 +412,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
case KVM_CAP_S390_RI:
|
||||
r = test_facility(64);
|
||||
break;
|
||||
case KVM_CAP_S390_GS:
|
||||
r = test_facility(133);
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
@ -541,6 +551,34 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
||||
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
|
||||
r ? "(not available)" : "(success)");
|
||||
break;
|
||||
case KVM_CAP_S390_AIS:
|
||||
mutex_lock(&kvm->lock);
|
||||
if (kvm->created_vcpus) {
|
||||
r = -EBUSY;
|
||||
} else {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 72);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 72);
|
||||
kvm->arch.float_int.ais_enabled = 1;
|
||||
r = 0;
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
VM_EVENT(kvm, 3, "ENABLE: AIS %s",
|
||||
r ? "(not available)" : "(success)");
|
||||
break;
|
||||
case KVM_CAP_S390_GS:
|
||||
r = -EINVAL;
|
||||
mutex_lock(&kvm->lock);
|
||||
if (atomic_read(&kvm->online_vcpus)) {
|
||||
r = -EBUSY;
|
||||
} else if (test_facility(133)) {
|
||||
set_kvm_facility(kvm->arch.model.fac_mask, 133);
|
||||
set_kvm_facility(kvm->arch.model.fac_list, 133);
|
||||
r = 0;
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
|
||||
r ? "(not available)" : "(success)");
|
||||
break;
|
||||
case KVM_CAP_S390_USER_STSI:
|
||||
VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
|
||||
kvm->arch.user_stsi = 1;
|
||||
@ -1498,6 +1536,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
||||
|
||||
kvm_s390_crypto_init(kvm);
|
||||
|
||||
mutex_init(&kvm->arch.float_int.ais_lock);
|
||||
kvm->arch.float_int.simm = 0;
|
||||
kvm->arch.float_int.nimm = 0;
|
||||
kvm->arch.float_int.ais_enabled = 0;
|
||||
spin_lock_init(&kvm->arch.float_int.lock);
|
||||
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
||||
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
||||
@ -1646,7 +1688,7 @@ static void sca_add_vcpu(struct kvm_vcpu *vcpu)
|
||||
sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
|
||||
vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
|
||||
vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
|
||||
vcpu->arch.sie_block->ecb2 |= 0x04U;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
|
||||
set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
|
||||
} else {
|
||||
struct bsca_block *sca = vcpu->kvm->arch.sca;
|
||||
@ -1700,7 +1742,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
|
||||
kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
|
||||
vcpu->arch.sie_block->scaoh = scaoh;
|
||||
vcpu->arch.sie_block->scaol = scaol;
|
||||
vcpu->arch.sie_block->ecb2 |= 0x04U;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
|
||||
}
|
||||
kvm->arch.sca = new_sca;
|
||||
kvm->arch.use_esca = 1;
|
||||
@ -1749,6 +1791,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
kvm_s390_set_prefix(vcpu, 0);
|
||||
if (test_kvm_facility(vcpu->kvm, 64))
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
|
||||
if (test_kvm_facility(vcpu->kvm, 133))
|
||||
vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
|
||||
/* fprs can be synchronized via vrs, even if the guest has no vx. With
|
||||
* MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
|
||||
*/
|
||||
@ -1939,8 +1983,8 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
|
||||
if (!vcpu->arch.sie_block->cbrlo)
|
||||
return -ENOMEM;
|
||||
|
||||
vcpu->arch.sie_block->ecb2 |= 0x80;
|
||||
vcpu->arch.sie_block->ecb2 &= ~0x08;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
|
||||
vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1970,31 +2014,37 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
|
||||
if (MACHINE_HAS_ESOP)
|
||||
vcpu->arch.sie_block->ecb |= 0x02;
|
||||
vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
|
||||
if (test_kvm_facility(vcpu->kvm, 9))
|
||||
vcpu->arch.sie_block->ecb |= 0x04;
|
||||
vcpu->arch.sie_block->ecb |= ECB_SRSI;
|
||||
if (test_kvm_facility(vcpu->kvm, 73))
|
||||
vcpu->arch.sie_block->ecb |= 0x10;
|
||||
vcpu->arch.sie_block->ecb |= ECB_TE;
|
||||
|
||||
if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
|
||||
vcpu->arch.sie_block->ecb2 |= 0x08;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
|
||||
if (test_kvm_facility(vcpu->kvm, 130))
|
||||
vcpu->arch.sie_block->ecb2 |= 0x20;
|
||||
vcpu->arch.sie_block->eca = 0x1002000U;
|
||||
vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
|
||||
vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
|
||||
if (sclp.has_cei)
|
||||
vcpu->arch.sie_block->eca |= 0x80000000U;
|
||||
vcpu->arch.sie_block->eca |= ECA_CEI;
|
||||
if (sclp.has_ib)
|
||||
vcpu->arch.sie_block->eca |= 0x40000000U;
|
||||
vcpu->arch.sie_block->eca |= ECA_IB;
|
||||
if (sclp.has_siif)
|
||||
vcpu->arch.sie_block->eca |= 1;
|
||||
vcpu->arch.sie_block->eca |= ECA_SII;
|
||||
if (sclp.has_sigpif)
|
||||
vcpu->arch.sie_block->eca |= 0x10000000U;
|
||||
vcpu->arch.sie_block->eca |= ECA_SIGPI;
|
||||
if (test_kvm_facility(vcpu->kvm, 129)) {
|
||||
vcpu->arch.sie_block->eca |= 0x00020000;
|
||||
vcpu->arch.sie_block->ecd |= 0x20000000;
|
||||
vcpu->arch.sie_block->eca |= ECA_VX;
|
||||
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
||||
}
|
||||
vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
|
||||
| SDNXC;
|
||||
vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
|
||||
if (sclp.has_kss)
|
||||
atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
|
||||
else
|
||||
vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
|
||||
if (vcpu->kvm->arch.use_cmma) {
|
||||
rc = kvm_s390_vcpu_setup_cmma(vcpu);
|
||||
@ -2446,7 +2496,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
/* nothing to do, just clear the request */
|
||||
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2719,6 +2769,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
struct runtime_instr_cb *riccb;
|
||||
struct gs_cb *gscb;
|
||||
|
||||
riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
|
||||
gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
|
||||
vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
|
||||
vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
|
||||
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
|
||||
@ -2747,12 +2802,24 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
* we should enable RI here instead of doing the lazy enablement.
|
||||
*/
|
||||
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
|
||||
test_kvm_facility(vcpu->kvm, 64)) {
|
||||
struct runtime_instr_cb *riccb =
|
||||
(struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
|
||||
|
||||
if (riccb->valid)
|
||||
vcpu->arch.sie_block->ecb3 |= 0x01;
|
||||
test_kvm_facility(vcpu->kvm, 64) &&
|
||||
riccb->valid &&
|
||||
!(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
|
||||
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
|
||||
}
|
||||
/*
|
||||
* If userspace sets the gscb (e.g. after migration) to non-zero,
|
||||
* we should enable GS here instead of doing the lazy enablement.
|
||||
*/
|
||||
if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
|
||||
test_kvm_facility(vcpu->kvm, 133) &&
|
||||
gscb->gssm &&
|
||||
!vcpu->arch.gs_enabled) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
|
||||
vcpu->arch.sie_block->ecb |= ECB_GS;
|
||||
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
||||
vcpu->arch.gs_enabled = 1;
|
||||
}
|
||||
save_access_regs(vcpu->arch.host_acrs);
|
||||
restore_access_regs(vcpu->run->s.regs.acrs);
|
||||
@ -2768,6 +2835,20 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
if (test_fp_ctl(current->thread.fpu.fpc))
|
||||
/* User space provided an invalid FPC, let's clear it */
|
||||
current->thread.fpu.fpc = 0;
|
||||
if (MACHINE_HAS_GS) {
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
if (current->thread.gs_cb) {
|
||||
vcpu->arch.host_gscb = current->thread.gs_cb;
|
||||
save_gs_cb(vcpu->arch.host_gscb);
|
||||
}
|
||||
if (vcpu->arch.gs_enabled) {
|
||||
current->thread.gs_cb = (struct gs_cb *)
|
||||
&vcpu->run->s.regs.gscb;
|
||||
restore_gs_cb(current->thread.gs_cb);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
kvm_run->kvm_dirty_regs = 0;
|
||||
}
|
||||
@ -2794,6 +2875,18 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
/* Restore will be done lazily at return */
|
||||
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
|
||||
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
|
||||
if (MACHINE_HAS_GS) {
|
||||
__ctl_set_bit(2, 4);
|
||||
if (vcpu->arch.gs_enabled)
|
||||
save_gs_cb(current->thread.gs_cb);
|
||||
preempt_disable();
|
||||
current->thread.gs_cb = vcpu->arch.host_gscb;
|
||||
restore_gs_cb(vcpu->arch.host_gscb);
|
||||
preempt_enable();
|
||||
if (!vcpu->arch.host_gscb)
|
||||
__ctl_clear_bit(2, 4);
|
||||
vcpu->arch.host_gscb = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@
|
||||
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Transactional Memory Execution related macros */
|
||||
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
|
||||
#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE))
|
||||
#define TDB_FORMAT1 1
|
||||
#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
|
||||
|
||||
@ -246,6 +246,7 @@ static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
|
||||
int is_valid_psw(psw_t *psw);
|
||||
int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
|
||||
@ -253,6 +254,7 @@ int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
|
||||
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in vsie.c */
|
||||
int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
|
||||
|
@ -37,7 +37,8 @@
|
||||
static int handle_ri(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (test_kvm_facility(vcpu->kvm, 64)) {
|
||||
vcpu->arch.sie_block->ecb3 |= 0x01;
|
||||
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
|
||||
vcpu->arch.sie_block->ecb3 |= ECB3_RI;
|
||||
kvm_s390_retry_instr(vcpu);
|
||||
return 0;
|
||||
} else
|
||||
@ -52,6 +53,33 @@ int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int handle_gs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (test_kvm_facility(vcpu->kvm, 133)) {
|
||||
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
|
||||
preempt_disable();
|
||||
__ctl_set_bit(2, 4);
|
||||
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
|
||||
restore_gs_cb(current->thread.gs_cb);
|
||||
preempt_enable();
|
||||
vcpu->arch.sie_block->ecb |= ECB_GS;
|
||||
vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
|
||||
vcpu->arch.gs_enabled = 1;
|
||||
kvm_s390_retry_instr(vcpu);
|
||||
return 0;
|
||||
} else
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
}
|
||||
|
||||
int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int code = vcpu->arch.sie_block->ipb & 0xff;
|
||||
|
||||
if (code == 0x49 || code == 0x4d)
|
||||
return handle_gs(vcpu);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
/* Handle SCK (SET CLOCK) interception */
|
||||
static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
@ -170,18 +198,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __skey_check_enable(struct kvm_vcpu *vcpu)
|
||||
int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
|
||||
|
||||
trace_kvm_s390_skey_related_inst(vcpu);
|
||||
if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
|
||||
if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
|
||||
!(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
|
||||
return rc;
|
||||
|
||||
rc = s390_enable_skey();
|
||||
VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
|
||||
if (!rc)
|
||||
vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
|
||||
if (!rc) {
|
||||
if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
|
||||
atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
|
||||
else
|
||||
sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
|
||||
ICTL_RRBE);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -190,7 +225,7 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
|
||||
int rc;
|
||||
|
||||
vcpu->stat.instruction_storage_key++;
|
||||
rc = __skey_check_enable(vcpu);
|
||||
rc = kvm_s390_skey_check_enable(vcpu);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (sclp.has_skey) {
|
||||
@ -759,6 +794,7 @@ static const intercept_handler_t b2_handlers[256] = {
|
||||
[0x3b] = handle_io_inst,
|
||||
[0x3c] = handle_io_inst,
|
||||
[0x50] = handle_ipte_interlock,
|
||||
[0x56] = handle_sthyi,
|
||||
[0x5f] = handle_io_inst,
|
||||
[0x74] = handle_io_inst,
|
||||
[0x76] = handle_io_inst,
|
||||
@ -887,7 +923,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
|
||||
if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
|
||||
int rc = __skey_check_enable(vcpu);
|
||||
int rc = kvm_s390_skey_check_enable(vcpu);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -404,6 +404,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
||||
u64 code, addr, cc = 0;
|
||||
struct sthyi_sctns *sctns = NULL;
|
||||
|
||||
if (!test_kvm_facility(vcpu->kvm, 74))
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
|
||||
|
||||
/*
|
||||
* STHYI requires extensive locking in the higher hypervisors
|
||||
* and is very computational/memory expensive. Therefore we
|
||||
|
@ -280,6 +280,58 @@ TRACE_EVENT(kvm_s390_enable_disable_ibs,
|
||||
__entry->state ? "enabling" : "disabling", __entry->id)
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for modifying ais mode for a given isc.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_modify_ais_mode,
|
||||
TP_PROTO(__u8 isc, __u16 from, __u16 to),
|
||||
TP_ARGS(isc, from, to),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__u8, isc)
|
||||
__field(__u16, from)
|
||||
__field(__u16, to)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->isc = isc;
|
||||
__entry->from = from;
|
||||
__entry->to = to;
|
||||
),
|
||||
|
||||
TP_printk("for isc %x, modifying interruption mode from %s to %s",
|
||||
__entry->isc,
|
||||
(__entry->from == KVM_S390_AIS_MODE_ALL) ?
|
||||
"ALL-Interruptions Mode" :
|
||||
(__entry->from == KVM_S390_AIS_MODE_SINGLE) ?
|
||||
"Single-Interruption Mode" : "No-Interruptions Mode",
|
||||
(__entry->to == KVM_S390_AIS_MODE_ALL) ?
|
||||
"ALL-Interruptions Mode" :
|
||||
(__entry->to == KVM_S390_AIS_MODE_SINGLE) ?
|
||||
"Single-Interruption Mode" : "No-Interruptions Mode")
|
||||
);
|
||||
|
||||
/*
|
||||
* Trace point for suppressed adapter I/O interrupt.
|
||||
*/
|
||||
TRACE_EVENT(kvm_s390_airq_suppressed,
|
||||
TP_PROTO(__u32 id, __u8 isc),
|
||||
TP_ARGS(id, isc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(__u32, id)
|
||||
__field(__u8, isc)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->isc = isc;
|
||||
),
|
||||
|
||||
TP_printk("adapter I/O interrupt suppressed (id:%x isc:%x)",
|
||||
__entry->id, __entry->isc)
|
||||
);
|
||||
|
||||
|
||||
#endif /* _TRACE_KVMS390_H */
|
||||
|
||||
|
@ -117,6 +117,8 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
newflags |= cpuflags & CPUSTAT_SM;
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
|
||||
newflags |= cpuflags & CPUSTAT_IBS;
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
|
||||
newflags |= cpuflags & CPUSTAT_KSS;
|
||||
|
||||
atomic_set(&scb_s->cpuflags, newflags);
|
||||
return 0;
|
||||
@ -249,7 +251,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
{
|
||||
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
|
||||
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
|
||||
bool had_tx = scb_s->ecb & 0x10U;
|
||||
bool had_tx = scb_s->ecb & ECB_TE;
|
||||
unsigned long new_mso = 0;
|
||||
int rc;
|
||||
|
||||
@ -289,7 +291,9 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
* bits. Therefore we cannot provide interpretation and would later
|
||||
* have to provide own emulation handlers.
|
||||
*/
|
||||
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
|
||||
scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
|
||||
|
||||
scb_s->icpua = scb_o->icpua;
|
||||
|
||||
if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
|
||||
@ -307,34 +311,39 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
scb_s->ihcpu = scb_o->ihcpu;
|
||||
|
||||
/* MVPG and Protection Exception Interpretation are always available */
|
||||
scb_s->eca |= scb_o->eca & 0x01002000U;
|
||||
scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
|
||||
/* Host-protection-interruption introduced with ESOP */
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
|
||||
scb_s->ecb |= scb_o->ecb & 0x02U;
|
||||
scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
|
||||
/* transactional execution */
|
||||
if (test_kvm_facility(vcpu->kvm, 73)) {
|
||||
/* remap the prefix is tx is toggled on */
|
||||
if ((scb_o->ecb & 0x10U) && !had_tx)
|
||||
if ((scb_o->ecb & ECB_TE) && !had_tx)
|
||||
prefix_unmapped(vsie_page);
|
||||
scb_s->ecb |= scb_o->ecb & 0x10U;
|
||||
scb_s->ecb |= scb_o->ecb & ECB_TE;
|
||||
}
|
||||
/* SIMD */
|
||||
if (test_kvm_facility(vcpu->kvm, 129)) {
|
||||
scb_s->eca |= scb_o->eca & 0x00020000U;
|
||||
scb_s->ecd |= scb_o->ecd & 0x20000000U;
|
||||
scb_s->eca |= scb_o->eca & ECA_VX;
|
||||
scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
|
||||
}
|
||||
/* Run-time-Instrumentation */
|
||||
if (test_kvm_facility(vcpu->kvm, 64))
|
||||
scb_s->ecb3 |= scb_o->ecb3 & 0x01U;
|
||||
scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
|
||||
/* Instruction Execution Prevention */
|
||||
if (test_kvm_facility(vcpu->kvm, 130))
|
||||
scb_s->ecb2 |= scb_o->ecb2 & 0x20U;
|
||||
scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
|
||||
/* Guarded Storage */
|
||||
if (test_kvm_facility(vcpu->kvm, 133)) {
|
||||
scb_s->ecb |= scb_o->ecb & ECB_GS;
|
||||
scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
|
||||
}
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
|
||||
scb_s->eca |= scb_o->eca & 0x00000001U;
|
||||
scb_s->eca |= scb_o->eca & ECA_SII;
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
|
||||
scb_s->eca |= scb_o->eca & 0x40000000U;
|
||||
scb_s->eca |= scb_o->eca & ECA_IB;
|
||||
if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
|
||||
scb_s->eca |= scb_o->eca & 0x80000000U;
|
||||
scb_s->eca |= scb_o->eca & ECA_CEI;
|
||||
|
||||
prepare_ibc(vcpu, vsie_page);
|
||||
rc = shadow_crycb(vcpu, vsie_page);
|
||||
@ -406,7 +415,7 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
prefix += scb_s->mso;
|
||||
|
||||
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
|
||||
if (!rc && (scb_s->ecb & 0x10U))
|
||||
if (!rc && (scb_s->ecb & ECB_TE))
|
||||
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
|
||||
prefix + PAGE_SIZE);
|
||||
/*
|
||||
@ -496,6 +505,13 @@ static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
unpin_guest_page(vcpu->kvm, gpa, hpa);
|
||||
scb_s->riccbd = 0;
|
||||
}
|
||||
|
||||
hpa = scb_s->sdnxo;
|
||||
if (hpa) {
|
||||
gpa = scb_o->sdnxo;
|
||||
unpin_guest_page(vcpu->kvm, gpa, hpa);
|
||||
scb_s->sdnxo = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -543,7 +559,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
}
|
||||
|
||||
gpa = scb_o->itdba & ~0xffUL;
|
||||
if (gpa && (scb_s->ecb & 0x10U)) {
|
||||
if (gpa && (scb_s->ecb & ECB_TE)) {
|
||||
if (!(gpa & ~0x1fffU)) {
|
||||
rc = set_validity_icpt(scb_s, 0x0080U);
|
||||
goto unpin;
|
||||
@ -558,8 +574,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
}
|
||||
|
||||
gpa = scb_o->gvrd & ~0x1ffUL;
|
||||
if (gpa && (scb_s->eca & 0x00020000U) &&
|
||||
!(scb_s->ecd & 0x20000000U)) {
|
||||
if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
|
||||
if (!(gpa & ~0x1fffUL)) {
|
||||
rc = set_validity_icpt(scb_s, 0x1310U);
|
||||
goto unpin;
|
||||
@ -577,7 +592,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
}
|
||||
|
||||
gpa = scb_o->riccbd & ~0x3fUL;
|
||||
if (gpa && (scb_s->ecb3 & 0x01U)) {
|
||||
if (gpa && (scb_s->ecb3 & ECB3_RI)) {
|
||||
if (!(gpa & ~0x1fffUL)) {
|
||||
rc = set_validity_icpt(scb_s, 0x0043U);
|
||||
goto unpin;
|
||||
@ -591,6 +606,33 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
|
||||
goto unpin;
|
||||
scb_s->riccbd = hpa;
|
||||
}
|
||||
if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
|
||||
unsigned long sdnxc;
|
||||
|
||||
gpa = scb_o->sdnxo & ~0xfUL;
|
||||
sdnxc = scb_o->sdnxo & 0xfUL;
|
||||
if (!gpa || !(gpa & ~0x1fffUL)) {
|
||||
rc = set_validity_icpt(scb_s, 0x10b0U);
|
||||
goto unpin;
|
||||
}
|
||||
if (sdnxc < 6 || sdnxc > 12) {
|
||||
rc = set_validity_icpt(scb_s, 0x10b1U);
|
||||
goto unpin;
|
||||
}
|
||||
if (gpa & ((1 << sdnxc) - 1)) {
|
||||
rc = set_validity_icpt(scb_s, 0x10b2U);
|
||||
goto unpin;
|
||||
}
|
||||
/* Due to alignment rules (checked above) this cannot
|
||||
* cross page boundaries
|
||||
*/
|
||||
rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
|
||||
if (rc == -EINVAL)
|
||||
rc = set_validity_icpt(scb_s, 0x10b0U);
|
||||
if (rc)
|
||||
goto unpin;
|
||||
scb_s->sdnxo = hpa | sdnxc;
|
||||
}
|
||||
return 0;
|
||||
unpin:
|
||||
unpin_blocks(vcpu, vsie_page);
|
||||
|
@ -82,6 +82,7 @@ static struct facility_def facility_defs[] = {
|
||||
78, /* enhanced-DAT 2 */
|
||||
130, /* instruction-execution-protection */
|
||||
131, /* enhanced-SOP 2 and side-effect */
|
||||
146, /* msa extension 8 */
|
||||
-1 /* END */
|
||||
}
|
||||
},
|
||||
|
@ -221,6 +221,9 @@ struct x86_emulate_ops {
|
||||
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
|
||||
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
|
||||
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
|
||||
|
||||
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
|
||||
void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
|
||||
};
|
||||
|
||||
typedef u32 __attribute__((vector_size(16))) sse128_t;
|
||||
@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
|
||||
|
||||
/* interruptibility state, as a result of execution of STI or MOV SS */
|
||||
int interruptibility;
|
||||
int emul_flags;
|
||||
|
||||
bool perm_ok; /* do not check permissions if true */
|
||||
bool ud; /* inject an #UD if host doesn't support insn */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user