2019-06-03 05:44:50 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2013-01-22 00:36:12 +00:00
|
|
|
/*
|
2016-05-28 10:27:11 +00:00
|
|
|
* Copyright (C) 2015, 2016 ARM Ltd.
|
2013-01-22 00:36:12 +00:00
|
|
|
*/
|
2016-05-28 10:27:11 +00:00
|
|
|
#ifndef __KVM_ARM_VGIC_H
|
|
|
|
#define __KVM_ARM_VGIC_H
|
2015-11-23 15:20:05 +00:00
|
|
|
|
2022-01-04 15:19:40 +00:00
|
|
|
#include <linux/bits.h>
|
2013-01-22 00:36:14 +00:00
|
|
|
#include <linux/kvm.h>
|
|
|
|
#include <linux/irqreturn.h>
|
2022-01-04 15:19:40 +00:00
|
|
|
#include <linux/kref.h>
|
|
|
|
#include <linux/mutex.h>
|
2013-01-22 00:36:14 +00:00
|
|
|
#include <linux/spinlock.h>
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 08:28:45 +00:00
|
|
|
#include <linux/static_key.h>
|
2013-01-22 00:36:14 +00:00
|
|
|
#include <linux/types.h>
|
2024-02-21 05:42:44 +00:00
|
|
|
#include <linux/xarray.h>
|
2015-03-26 14:39:34 +00:00
|
|
|
#include <kvm/iodev.h>
|
2016-07-15 11:43:32 +00:00
|
|
|
#include <linux/list.h>
|
2016-09-12 14:49:15 +00:00
|
|
|
#include <linux/jump_label.h>
|
2013-01-22 00:36:12 +00:00
|
|
|
|
2017-10-27 14:28:38 +00:00
|
|
|
#include <linux/irqchip/arm-gic-v4.h>
|
|
|
|
|
2018-05-22 07:55:18 +00:00
|
|
|
#define VGIC_V3_MAX_CPUS 512
|
2016-05-28 10:27:11 +00:00
|
|
|
#define VGIC_V2_MAX_CPUS 8
|
|
|
|
#define VGIC_NR_IRQS_LEGACY 256
|
2013-01-22 00:36:14 +00:00
|
|
|
#define VGIC_NR_SGIS 16
|
|
|
|
#define VGIC_NR_PPIS 16
|
|
|
|
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
|
2016-05-28 10:27:11 +00:00
|
|
|
#define VGIC_MAX_SPI 1019
|
|
|
|
#define VGIC_MAX_RESERVED 1023
|
|
|
|
#define VGIC_MIN_LPI 8192
|
2016-07-22 16:20:41 +00:00
|
|
|
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
|
2014-02-04 18:13:03 +00:00
|
|
|
|
2017-05-02 18:11:49 +00:00
|
|
|
#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
|
2017-05-16 17:53:50 +00:00
|
|
|
#define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \
|
|
|
|
(irq) <= VGIC_MAX_SPI)
|
2017-05-02 18:11:49 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
enum vgic_type {
|
|
|
|
VGIC_V2, /* Good ol' GICv2 */
|
|
|
|
VGIC_V3, /* New fancy GICv3 */
|
|
|
|
};
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* same for all guests, as depending only on the _host's_ GIC model */
|
|
|
|
struct vgic_global {
|
|
|
|
/* type of the host GIC */
|
|
|
|
enum vgic_type type;
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* Physical address of vgic virtual cpu interface */
|
|
|
|
phys_addr_t vcpu_base;
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2017-12-04 16:43:23 +00:00
|
|
|
/* GICV mapping, kernel VA */
|
2016-09-06 08:28:46 +00:00
|
|
|
void __iomem *vcpu_base_va;
|
2017-12-04 16:43:23 +00:00
|
|
|
/* GICV mapping, HYP VA */
|
|
|
|
void __iomem *vcpu_hyp_va;
|
2016-09-06 08:28:46 +00:00
|
|
|
|
2017-12-04 16:43:23 +00:00
|
|
|
/* virtual control interface mapping, kernel VA */
|
2016-05-28 10:27:11 +00:00
|
|
|
void __iomem *vctrl_base;
|
2017-12-04 16:43:23 +00:00
|
|
|
/* virtual control interface mapping, HYP VA */
|
|
|
|
void __iomem *vctrl_hyp;
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* Number of implemented list registers */
|
|
|
|
int nr_lr;
|
2013-06-03 14:55:02 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* Maintenance IRQ number */
|
|
|
|
unsigned int maint_irq;
|
2013-06-21 10:57:56 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* maximum number of VCPUs allowed (GICv2 limits us to 8) */
|
|
|
|
int max_gic_vcpus;
|
2013-06-03 14:55:02 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
|
|
|
|
bool can_emulate_gicv2;
|
2016-09-12 14:49:15 +00:00
|
|
|
|
2017-10-27 14:28:37 +00:00
|
|
|
/* Hardware has GICv4? */
|
|
|
|
bool has_gicv4;
|
2020-03-04 20:33:20 +00:00
|
|
|
bool has_gicv4_1;
|
2017-10-27 14:28:37 +00:00
|
|
|
|
2021-03-15 21:56:47 +00:00
|
|
|
/* Pseudo GICv3 from outer space */
|
|
|
|
bool no_hw_deactivation;
|
|
|
|
|
2016-09-12 14:49:15 +00:00
|
|
|
/* GIC system register CPU interface */
|
|
|
|
struct static_key_false gicv3_cpuif;
|
2017-01-26 14:20:51 +00:00
|
|
|
|
|
|
|
u32 ich_vtr_el2;
|
2013-06-03 14:55:02 +00:00
|
|
|
};
|
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
extern struct vgic_global kvm_vgic_global_state;
|
2014-02-04 17:48:10 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
#define VGIC_V2_MAX_LRS (1 << 6)
|
|
|
|
#define VGIC_V3_MAX_LRS 16
|
|
|
|
#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
|
2013-06-03 14:55:02 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
enum vgic_irq_config {
|
|
|
|
VGIC_CONFIG_EDGE = 0,
|
|
|
|
VGIC_CONFIG_LEVEL
|
2013-06-18 18:17:28 +00:00
|
|
|
};
|
|
|
|
|
2021-03-01 17:39:39 +00:00
|
|
|
/*
|
|
|
|
* Per-irq ops overriding some common behavious.
|
|
|
|
*
|
|
|
|
* Always called in non-preemptible section and the functions can use
|
|
|
|
* kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs.
|
|
|
|
*/
|
|
|
|
struct irq_ops {
|
2021-03-15 13:11:58 +00:00
|
|
|
/* Per interrupt flags for special-cased interrupts */
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
#define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */
|
|
|
|
|
2021-03-01 17:39:39 +00:00
|
|
|
/*
|
|
|
|
* Callback function pointer to in-kernel devices that can tell us the
|
|
|
|
* state of the input level of mapped level-triggered IRQ faster than
|
|
|
|
* peaking into the physical GIC.
|
|
|
|
*/
|
|
|
|
bool (*get_input_level)(int vintid);
|
|
|
|
};
|
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
struct vgic_irq {
|
2019-01-07 15:06:15 +00:00
|
|
|
raw_spinlock_t irq_lock; /* Protects the content of the struct */
|
2024-02-21 05:42:50 +00:00
|
|
|
struct rcu_head rcu;
|
2016-05-28 10:27:11 +00:00
|
|
|
struct list_head ap_list;
|
|
|
|
|
|
|
|
struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
|
|
|
|
* SPIs and LPIs: The VCPU whose ap_list
|
|
|
|
* this is queued on.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
|
|
|
|
* be sent to, as a result of the
|
|
|
|
* targets reg (v2) or the
|
|
|
|
* affinity reg (v3).
|
|
|
|
*/
|
|
|
|
|
|
|
|
u32 intid; /* Guest visible INTID */
|
|
|
|
bool line_level; /* Level only */
|
2017-01-23 13:07:18 +00:00
|
|
|
bool pending_latch; /* The pending latch state used to calculate
|
|
|
|
* the pending state for both level
|
|
|
|
* and edge triggered IRQs. */
|
2016-05-28 10:27:11 +00:00
|
|
|
bool active; /* not used for LPIs */
|
|
|
|
bool enabled;
|
|
|
|
bool hw; /* Tied to HW IRQ */
|
2016-07-15 11:43:27 +00:00
|
|
|
struct kref refcount; /* Used for LPIs */
|
2016-05-28 10:27:11 +00:00
|
|
|
u32 hwintid; /* HW INTID number */
|
2017-10-27 14:28:32 +00:00
|
|
|
unsigned int host_irq; /* linux irq corresponding to hwintid */
|
2016-05-28 10:27:11 +00:00
|
|
|
union {
|
|
|
|
u8 targets; /* GICv2 target VCPUs mask */
|
|
|
|
u32 mpidr; /* GICv3 target VCPU */
|
|
|
|
};
|
|
|
|
u8 source; /* GICv2 SGIs only */
|
KVM: arm/arm64: vgic: Fix source vcpu issues for GICv2 SGI
Now that we make sure we don't inject multiple instances of the
same GICv2 SGI at the same time, we've made another bug more
obvious:
If we exit with an active SGI, we completely lose track of which
vcpu it came from. On the next entry, we restore it with 0 as a
source, and if that wasn't the right one, too bad. While this
doesn't seem to trouble GIC-400, the architectural model gets
offended and doesn't deactivate the interrupt on EOI.
Another connected issue is that we will happilly make pending
an interrupt from another vcpu, overriding the above zero with
something that is just as inconsistent. Don't do that.
The final issue is that we signal a maintenance interrupt when
no pending interrupts are present in the LR. Assuming we've fixed
the two issues above, we end-up in a situation where we keep
exiting as soon as we've reached the active state, and not be
able to inject the following pending.
The fix comes in 3 parts:
- GICv2 SGIs have their source vcpu saved if they are active on
exit, and restored on entry
- Multi-SGIs cannot go via the Pending+Active state, as this would
corrupt the source field
- Multi-SGIs are converted to using MI on EOI instead of NPIE
Fixes: 16ca6a607d84bef0 ("KVM: arm/arm64: vgic: Don't populate multiple LRs with the same vintid")
Reported-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2018-04-18 09:39:04 +00:00
|
|
|
u8 active_source; /* GICv2 SGIs only */
|
2016-05-28 10:27:11 +00:00
|
|
|
u8 priority;
|
2018-07-16 13:06:21 +00:00
|
|
|
u8 group; /* 0 == group 0, 1 == group 1 */
|
2016-05-28 10:27:11 +00:00
|
|
|
enum vgic_irq_config config; /* Level or edge */
|
2017-05-04 11:24:20 +00:00
|
|
|
|
2021-03-01 17:39:39 +00:00
|
|
|
struct irq_ops *ops;
|
2017-10-27 17:30:09 +00:00
|
|
|
|
2017-05-04 11:24:20 +00:00
|
|
|
void *owner; /* Opaque pointer to reserve an interrupt
|
|
|
|
for in-kernel devices. */
|
2014-06-02 14:19:12 +00:00
|
|
|
};
|
|
|
|
|
2021-03-15 13:11:58 +00:00
|
|
|
static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq)
|
|
|
|
{
|
|
|
|
return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE);
|
|
|
|
}
|
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
struct vgic_register_region;
|
2016-07-15 11:43:30 +00:00
|
|
|
struct vgic_its;
|
|
|
|
|
|
|
|
enum iodev_type {
|
|
|
|
IODEV_CPUIF,
|
|
|
|
IODEV_DIST,
|
|
|
|
IODEV_REDIST,
|
|
|
|
IODEV_ITS
|
|
|
|
};
|
2016-05-28 10:27:11 +00:00
|
|
|
|
2015-03-26 14:39:34 +00:00
|
|
|
struct vgic_io_device {
|
2016-05-28 10:27:11 +00:00
|
|
|
gpa_t base_addr;
|
2016-07-15 11:43:30 +00:00
|
|
|
union {
|
|
|
|
struct kvm_vcpu *redist_vcpu;
|
|
|
|
struct vgic_its *its;
|
|
|
|
};
|
2016-05-28 10:27:11 +00:00
|
|
|
const struct vgic_register_region *regions;
|
2016-07-15 11:43:30 +00:00
|
|
|
enum iodev_type iodev_type;
|
2016-05-28 10:27:11 +00:00
|
|
|
int nr_regions;
|
2015-03-26 14:39:34 +00:00
|
|
|
struct kvm_io_device dev;
|
|
|
|
};
|
|
|
|
|
2016-07-15 11:43:30 +00:00
|
|
|
struct vgic_its {
|
|
|
|
/* The base address of the ITS control register frame */
|
|
|
|
gpa_t vgic_its_base;
|
|
|
|
|
|
|
|
bool enabled;
|
|
|
|
struct vgic_io_device iodev;
|
2016-07-17 20:35:07 +00:00
|
|
|
struct kvm_device *dev;
|
2016-07-15 11:43:32 +00:00
|
|
|
|
|
|
|
/* These registers correspond to GITS_BASER{0,1} */
|
|
|
|
u64 baser_device_table;
|
|
|
|
u64 baser_coll_table;
|
|
|
|
|
|
|
|
/* Protects the command queue */
|
|
|
|
struct mutex cmd_lock;
|
|
|
|
u64 cbaser;
|
|
|
|
u32 creadr;
|
|
|
|
u32 cwriter;
|
|
|
|
|
2017-04-13 07:06:20 +00:00
|
|
|
/* migration ABI revision in use */
|
|
|
|
u32 abi_rev;
|
|
|
|
|
2016-07-15 11:43:32 +00:00
|
|
|
/* Protects the device and collection lists */
|
|
|
|
struct mutex its_lock;
|
|
|
|
struct list_head device_list;
|
|
|
|
struct list_head collection_list;
|
2024-04-22 20:01:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Caches the (device_id, event_id) -> vgic_irq translation for
|
|
|
|
* LPIs that are mapped and enabled.
|
|
|
|
*/
|
|
|
|
struct xarray translation_cache;
|
2016-07-15 11:43:30 +00:00
|
|
|
};
|
|
|
|
|
2017-01-17 22:09:13 +00:00
|
|
|
struct vgic_state_iter;
|
|
|
|
|
2018-05-22 07:55:08 +00:00
|
|
|
struct vgic_redist_region {
|
|
|
|
u32 index;
|
|
|
|
gpa_t base;
|
|
|
|
u32 count; /* number of redistributors or 0 if single region */
|
|
|
|
u32 free_index; /* index of the next free redistributor */
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
2013-01-22 00:36:12 +00:00
|
|
|
struct vgic_dist {
|
2014-05-15 09:03:25 +00:00
|
|
|
bool in_kernel;
|
2013-01-22 00:36:16 +00:00
|
|
|
bool ready;
|
2016-05-28 10:27:11 +00:00
|
|
|
bool initialized;
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2014-06-03 07:33:10 +00:00
|
|
|
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
|
|
|
|
u32 vgic_model;
|
|
|
|
|
2018-07-16 13:06:19 +00:00
|
|
|
/* Implementation revision as reported in the GICD_IIDR */
|
|
|
|
u32 implementation_rev;
|
2022-04-05 18:23:27 +00:00
|
|
|
#define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */
|
|
|
|
#define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */
|
|
|
|
#define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3
|
2018-07-16 13:06:19 +00:00
|
|
|
|
2018-07-16 13:06:26 +00:00
|
|
|
/* Userspace can write to GICv2 IGROUPR */
|
|
|
|
bool v2_groups_user_writable;
|
|
|
|
|
2016-07-15 11:43:38 +00:00
|
|
|
/* Do injected MSIs require an additional device ID? */
|
|
|
|
bool msis_require_devid;
|
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
int nr_spis;
|
2014-07-08 11:09:01 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* base addresses in guest physical address space: */
|
|
|
|
gpa_t vgic_dist_base; /* distributor */
|
2014-06-06 22:54:51 +00:00
|
|
|
union {
|
2016-05-28 10:27:11 +00:00
|
|
|
/* either a GICv2 CPU interface */
|
|
|
|
gpa_t vgic_cpu_base;
|
|
|
|
/* or a number of GICv3 redistributor regions */
|
2018-05-22 07:55:08 +00:00
|
|
|
struct list_head rd_regions;
|
2014-06-06 22:54:51 +00:00
|
|
|
};
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/* distributor enabled */
|
|
|
|
bool enabled;
|
2015-03-13 17:02:54 +00:00
|
|
|
|
2020-03-04 20:33:26 +00:00
|
|
|
/* Wants SGIs without active state */
|
|
|
|
bool nassgireq;
|
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
struct vgic_irq *spis;
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2015-03-26 14:39:35 +00:00
|
|
|
struct vgic_io_device dist_iodev;
|
2016-07-15 11:43:29 +00:00
|
|
|
|
2016-07-15 11:43:31 +00:00
|
|
|
bool has_its;
|
2023-01-26 23:54:48 +00:00
|
|
|
bool table_write_in_progress;
|
2016-07-15 11:43:31 +00:00
|
|
|
|
2016-07-15 11:43:29 +00:00
|
|
|
/*
|
|
|
|
* Contains the attributes and gpa of the LPI configuration table.
|
|
|
|
* Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
|
|
|
|
* one address across all redistributors.
|
2019-10-29 07:19:18 +00:00
|
|
|
* GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
|
2016-07-15 11:43:29 +00:00
|
|
|
*/
|
|
|
|
u64 propbaser;
|
2016-07-15 11:43:33 +00:00
|
|
|
|
2024-04-22 20:01:44 +00:00
|
|
|
#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0
|
2024-02-21 05:42:44 +00:00
|
|
|
struct xarray lpi_xa;
|
2017-01-17 22:09:13 +00:00
|
|
|
|
|
|
|
/* used by vgic-debug */
|
|
|
|
struct vgic_state_iter *iter;
|
2017-10-27 14:28:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* GICv4 ITS per-VM data, containing the IRQ domain, the VPE
|
|
|
|
* array, the property table pointer as well as allocation
|
|
|
|
* data. This essentially ties the Linux IRQ core and ITS
|
|
|
|
* together, and avoids leaking KVM's data structures anywhere
|
|
|
|
* else.
|
|
|
|
*/
|
|
|
|
struct its_vm its_vm;
|
2013-01-22 00:36:12 +00:00
|
|
|
};
|
|
|
|
|
2013-05-30 09:20:36 +00:00
|
|
|
struct vgic_v2_cpu_if {
|
|
|
|
u32 vgic_hcr;
|
|
|
|
u32 vgic_vmcr;
|
|
|
|
u32 vgic_apr;
|
2014-02-04 18:13:03 +00:00
|
|
|
u32 vgic_lr[VGIC_V2_MAX_LRS];
|
2018-12-01 16:41:28 +00:00
|
|
|
|
|
|
|
unsigned int used_lrs;
|
2013-05-30 09:20:36 +00:00
|
|
|
};
|
|
|
|
|
2013-07-12 14:15:23 +00:00
|
|
|
struct vgic_v3_cpu_if {
|
|
|
|
u32 vgic_hcr;
|
|
|
|
u32 vgic_vmcr;
|
2014-06-03 06:58:15 +00:00
|
|
|
u32 vgic_sre; /* Restored only, change ignored */
|
2013-07-12 14:15:23 +00:00
|
|
|
u32 vgic_ap0r[4];
|
|
|
|
u32 vgic_ap1r[4];
|
|
|
|
u64 vgic_lr[VGIC_V3_MAX_LRS];
|
2017-10-27 14:28:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* GICv4 ITS per-VPE data, containing the doorbell IRQ, the
|
|
|
|
* pending table pointer, the its_vm pointer and a few other
|
|
|
|
* HW specific things. As for the its_vm structure, this is
|
|
|
|
* linking the Linux IRQ subsystem and the ITS together.
|
|
|
|
*/
|
|
|
|
struct its_vpe its_vpe;
|
2018-12-01 16:41:28 +00:00
|
|
|
|
|
|
|
unsigned int used_lrs;
|
2013-07-12 14:15:23 +00:00
|
|
|
};
|
|
|
|
|
2013-01-22 00:36:12 +00:00
|
|
|
struct vgic_cpu {
|
2013-01-22 00:36:14 +00:00
|
|
|
/* CPU vif control registers for world switch */
|
2013-05-30 09:20:36 +00:00
|
|
|
union {
|
|
|
|
struct vgic_v2_cpu_if vgic_v2;
|
2013-07-12 14:15:23 +00:00
|
|
|
struct vgic_v3_cpu_if vgic_v3;
|
2013-05-30 09:20:36 +00:00
|
|
|
};
|
2014-06-23 16:37:18 +00:00
|
|
|
|
KVM: arm64: vgic: Allocate private interrupts on demand
Private interrupts are currently part of the CPU interface structure
that is part of each and every vcpu we create.
Currently, we have 32 of them per vcpu, resulting in a per-vcpu array
that is just shy of 4kB. On its own, that's no big deal, but it gets
in the way of other things:
- each vcpu gets mapped at EL2 on nVHE/hVHE configurations. This
requires memory that is physically contiguous. However, the EL2
code has no purpose looking at the interrupt structures and
could do without them being mapped.
- supporting features such as EPPIs, which extend the number of
private interrupts past the 32 limit would make the array
even larger, even for VMs that do not use the EPPI feature.
Address these issues by moving the private interrupt array outside
of the vcpu, and replace it with a simple pointer. We take this
opportunity to make it obvious what gets initialised when, as
that path was remarkably opaque, and tighten the locking.
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20240502154545.3012089-1-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
2024-05-02 15:45:45 +00:00
|
|
|
struct vgic_irq *private_irqs;
|
2013-01-22 00:36:12 +00:00
|
|
|
|
2019-01-07 15:06:17 +00:00
|
|
|
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/*
|
|
|
|
* List of IRQs that this VCPU should consider because they are either
|
|
|
|
* Active or Pending (hence the name; AP list), or because they recently
|
|
|
|
* were one of the two and need to be migrated off this list to another
|
|
|
|
* VCPU.
|
|
|
|
*/
|
|
|
|
struct list_head ap_list_head;
|
2013-06-04 10:02:10 +00:00
|
|
|
|
2016-07-15 11:43:22 +00:00
|
|
|
/*
|
|
|
|
* Members below are used with GICv3 emulation only and represent
|
|
|
|
* parts of the redistributor.
|
|
|
|
*/
|
|
|
|
struct vgic_io_device rd_iodev;
|
2018-05-22 07:55:08 +00:00
|
|
|
struct vgic_redist_region *rdreg;
|
2021-04-05 16:39:40 +00:00
|
|
|
u32 rdreg_index;
|
2022-04-05 18:23:26 +00:00
|
|
|
atomic_t syncr_busy;
|
2016-07-15 11:43:29 +00:00
|
|
|
|
|
|
|
/* Contains the attributes and gpa of the LPI pending tables. */
|
|
|
|
u64 pendbaser;
|
2022-04-05 18:23:25 +00:00
|
|
|
/* GICR_CTLR.{ENABLE_LPIS,RWP} */
|
|
|
|
atomic_t ctlr;
|
2017-01-26 14:20:51 +00:00
|
|
|
|
|
|
|
/* Cache guest priority bits */
|
|
|
|
u32 num_pri_bits;
|
|
|
|
|
|
|
|
/* Cache guest interrupt ID bits */
|
|
|
|
u32 num_id_bits;
|
2016-05-28 10:27:11 +00:00
|
|
|
};
|
2013-01-22 00:36:12 +00:00
|
|
|
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 08:28:45 +00:00
|
|
|
extern struct static_key_false vgic_v2_cpuif_trap;
|
2017-06-09 11:49:33 +00:00
|
|
|
extern struct static_key_false vgic_v3_cpuif_trap;
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 08:28:45 +00:00
|
|
|
|
2022-07-05 13:34:33 +00:00
|
|
|
int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr);
|
2014-06-23 16:37:18 +00:00
|
|
|
void kvm_vgic_early_init(struct kvm *kvm);
|
2017-05-08 10:30:24 +00:00
|
|
|
int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
|
2014-06-03 07:33:10 +00:00
|
|
|
int kvm_vgic_create(struct kvm *kvm, u32 type);
|
2014-07-08 11:09:01 +00:00
|
|
|
void kvm_vgic_destroy(struct kvm *kvm);
|
|
|
|
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
|
2016-05-28 10:27:11 +00:00
|
|
|
int kvm_vgic_map_resources(struct kvm *kvm);
|
|
|
|
int kvm_vgic_hyp_init(void);
|
2017-03-18 12:56:56 +00:00
|
|
|
void kvm_vgic_init_cpu_hardware(void);
|
2016-05-28 10:27:11 +00:00
|
|
|
|
2023-09-27 09:09:01 +00:00
|
|
|
int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
|
|
|
|
unsigned int intid, bool level, void *owner);
|
2017-10-27 14:28:32 +00:00
|
|
|
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
|
2021-03-01 17:39:39 +00:00
|
|
|
u32 vintid, struct irq_ops *ops);
|
2017-10-27 14:28:32 +00:00
|
|
|
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
|
2023-03-30 17:47:57 +00:00
|
|
|
int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid);
|
2017-10-27 14:28:32 +00:00
|
|
|
bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
|
2013-01-22 00:36:12 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
|
|
|
|
|
2016-03-24 10:21:04 +00:00
|
|
|
void kvm_vgic_load(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vgic_put(struct kvm_vcpu *vcpu);
|
|
|
|
|
2014-05-15 09:03:25 +00:00
|
|
|
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
|
2016-05-28 10:27:11 +00:00
|
|
|
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
|
2014-12-09 13:28:09 +00:00
|
|
|
#define vgic_ready(k) ((k)->arch.vgic.ready)
|
2016-03-07 10:32:29 +00:00
|
|
|
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
|
2016-05-28 10:27:11 +00:00
|
|
|
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
|
|
|
|
|
|
|
|
bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
|
|
|
|
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
|
2018-03-05 10:36:38 +00:00
|
|
|
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
|
2013-01-22 00:36:14 +00:00
|
|
|
|
2018-08-06 11:51:19 +00:00
|
|
|
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
|
2014-02-04 18:13:03 +00:00
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
/**
|
|
|
|
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
|
|
|
|
*
|
|
|
|
* The host's GIC naturally limits the maximum amount of VCPUs a guest
|
|
|
|
* can use.
|
|
|
|
*/
|
|
|
|
static inline int kvm_vgic_get_max_vcpus(void)
|
|
|
|
{
|
|
|
|
return kvm_vgic_global_state.max_gic_vcpus;
|
|
|
|
}
|
|
|
|
|
2016-07-22 16:20:41 +00:00
|
|
|
/**
|
|
|
|
* kvm_vgic_setup_default_irq_routing:
|
|
|
|
* Setup a default flat gsi routing table mapping all SPIs
|
|
|
|
*/
|
|
|
|
int kvm_vgic_setup_default_irq_routing(struct kvm *kvm);
|
|
|
|
|
2017-05-04 11:24:20 +00:00
|
|
|
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
|
|
|
|
|
2017-10-27 14:28:39 +00:00
|
|
|
struct kvm_kernel_irq_routing_entry;
|
|
|
|
|
|
|
|
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
|
|
|
|
struct kvm_kernel_irq_routing_entry *irq_entry);
|
|
|
|
|
|
|
|
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
|
|
|
|
struct kvm_kernel_irq_routing_entry *irq_entry);
|
|
|
|
|
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vcpu_load/put
When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.
To address this, let's move things around a bit:
- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"
Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.
Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
2019-10-27 14:41:59 +00:00
|
|
|
int vgic_v4_load(struct kvm_vcpu *vcpu);
|
2020-11-28 14:18:57 +00:00
|
|
|
void vgic_v4_commit(struct kvm_vcpu *vcpu);
|
2023-07-13 07:06:57 +00:00
|
|
|
int vgic_v4_put(struct kvm_vcpu *vcpu);
|
2017-10-27 14:28:49 +00:00
|
|
|
|
2022-11-30 23:09:00 +00:00
|
|
|
/* CPU HP callbacks */
|
|
|
|
void kvm_vgic_cpu_up(void);
|
|
|
|
void kvm_vgic_cpu_down(void);
|
|
|
|
|
2016-05-28 10:27:11 +00:00
|
|
|
#endif /* __KVM_ARM_VGIC_H */
|