mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-14 17:53:39 +00:00
Merge tag 'gvt-fixes-2017-03-08' of https://github.com/01org/gvt-linux into drm-intel-fixes
gvt-fixes-2017-03-08 - MMIO cmd access flag cleanup - Virtual display fixes from Weinan and Bing - config space reset fix from Changbin - better workload submission error path fix from Chuanxiao - other misc fixes Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
commit
70647f9163
@ -41,6 +41,54 @@ enum {
|
||||
INTEL_GVT_PCI_BAR_MAX,
|
||||
};
|
||||
|
||||
/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
|
||||
* byte) byte by byte in standard pci configuration space. (not the full
|
||||
* 256 bytes.)
|
||||
*/
|
||||
static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
|
||||
[PCI_COMMAND] = 0xff, 0x07,
|
||||
[PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
|
||||
[PCI_CACHE_LINE_SIZE] = 0xff,
|
||||
[PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
|
||||
[PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
|
||||
[PCI_INTERRUPT_LINE] = 0xff,
|
||||
};
|
||||
|
||||
/**
|
||||
* vgpu_pci_cfg_mem_write - write virtual cfg space memory
|
||||
*
|
||||
* Use this function to write virtual cfg space memory.
|
||||
* For standard cfg space, only RW bits can be changed,
|
||||
* and we emulates the RW1C behavior of PCI_STATUS register.
|
||||
*/
|
||||
static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||
u8 *src, unsigned int bytes)
|
||||
{
|
||||
u8 *cfg_base = vgpu_cfg_space(vgpu);
|
||||
u8 mask, new, old;
|
||||
int i = 0;
|
||||
|
||||
for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
|
||||
mask = pci_cfg_space_rw_bmp[off + i];
|
||||
old = cfg_base[off + i];
|
||||
new = src[i] & mask;
|
||||
|
||||
/**
|
||||
* The PCI_STATUS high byte has RW1C bits, here
|
||||
* emulates clear by writing 1 for these bits.
|
||||
* Writing a 0b to RW1C bits has no effect.
|
||||
*/
|
||||
if (off + i == PCI_STATUS + 1)
|
||||
new = (~new & old) & mask;
|
||||
|
||||
cfg_base[off + i] = (old & ~mask) | new;
|
||||
}
|
||||
|
||||
/* For other configuration space directly copy as it is. */
|
||||
if (i < bytes)
|
||||
memcpy(cfg_base + off + i, src + i, bytes - i);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
|
||||
*
|
||||
@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
|
||||
u8 changed = old ^ new;
|
||||
int ret;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
|
||||
if (!(changed & PCI_COMMAND_MEMORY))
|
||||
return 0;
|
||||
|
||||
@ -277,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
|
||||
break;
|
||||
default:
|
||||
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
|
||||
vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
|
||||
if (d_info == NULL)
|
||||
return;
|
||||
|
||||
gvt_err("opcode=0x%x %s sub_ops:",
|
||||
gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
|
||||
cmd >> (32 - d_info->op_len), d_info->name);
|
||||
|
||||
for (i = 0; i < d_info->nr_sub_op; i++)
|
||||
@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
|
||||
int cnt = 0;
|
||||
int i;
|
||||
|
||||
gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
|
||||
gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
|
||||
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
|
||||
s->ring_id, s->ring_start, s->ring_start + s->ring_size,
|
||||
s->ring_head, s->ring_tail);
|
||||
|
||||
gvt_err(" %s %s ip_gma(%08lx) ",
|
||||
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
|
||||
s->buf_type == RING_BUFFER_INSTRUCTION ?
|
||||
"RING_BUFFER" : "BATCH_BUFFER",
|
||||
s->buf_addr_type == GTT_BUFFER ?
|
||||
"GTT" : "PPGTT", s->ip_gma);
|
||||
|
||||
if (s->ip_va == NULL) {
|
||||
gvt_err(" ip_va(NULL)");
|
||||
gvt_dbg_cmd(" ip_va(NULL)");
|
||||
return;
|
||||
}
|
||||
|
||||
gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
|
||||
gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
|
||||
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
|
||||
cmd_val(s, 2), cmd_val(s, 3));
|
||||
|
||||
|
@ -176,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
||||
SDE_PORTE_HOTPLUG_SPT);
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
||||
}
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) &&
|
||||
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
|
||||
@ -196,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
GEN8_PORT_DP_A_HOTPLUG;
|
||||
else
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
|
||||
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||
info->size = size;
|
||||
info->length = (i + 4) < end ? 4 : (end - i);
|
||||
info->addr_mask = addr_mask;
|
||||
info->ro_mask = ro_mask;
|
||||
info->device = device;
|
||||
info->read = read ? read : intel_vgpu_default_mmio_read;
|
||||
info->write = write ? write : intel_vgpu_default_mmio_write;
|
||||
@ -1304,21 +1305,24 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
|
||||
|
||||
switch (cmd) {
|
||||
case 0x6:
|
||||
/**
|
||||
* "Read memory latency" command on gen9.
|
||||
* Below memory latency values are read
|
||||
* from skylake platform.
|
||||
*/
|
||||
if (!*data0)
|
||||
*data0 = 0x1e1a1100;
|
||||
else
|
||||
*data0 = 0x61514b3d;
|
||||
case GEN9_PCODE_READ_MEM_LATENCY:
|
||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
|
||||
/**
|
||||
* "Read memory latency" command on gen9.
|
||||
* Below memory latency values are read
|
||||
* from skylake platform.
|
||||
*/
|
||||
if (!*data0)
|
||||
*data0 = 0x1e1a1100;
|
||||
else
|
||||
*data0 = 0x61514b3d;
|
||||
}
|
||||
break;
|
||||
case SKL_PCODE_CDCLK_CONTROL:
|
||||
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
|
||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv))
|
||||
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
|
||||
break;
|
||||
case 0x5:
|
||||
case GEN6_PCODE_READ_RC6VIDS:
|
||||
*data0 |= 0x1;
|
||||
break;
|
||||
}
|
||||
@ -1520,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
#define MMIO_GM(reg, d, r, w) \
|
||||
MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
|
||||
|
||||
#define MMIO_GM_RDR(reg, d, r, w) \
|
||||
MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
|
||||
|
||||
#define MMIO_RO(reg, d, f, rm, r, w) \
|
||||
MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
|
||||
|
||||
@ -1539,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
#define MMIO_RING_GM(prefix, d, r, w) \
|
||||
MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
|
||||
|
||||
#define MMIO_RING_GM_RDR(prefix, d, r, w) \
|
||||
MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
|
||||
|
||||
#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
|
||||
MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
|
||||
|
||||
@ -1547,73 +1557,79 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int ret;
|
||||
|
||||
MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
|
||||
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
|
||||
intel_vgpu_reg_imr_handler);
|
||||
|
||||
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
|
||||
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
|
||||
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
|
||||
MMIO_D(SDEISR, D_ALL);
|
||||
|
||||
MMIO_RING_D(RING_HWSTAM, D_ALL);
|
||||
MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
|
||||
#define RING_REG(base) (base + 0x28)
|
||||
MMIO_RING_D(RING_REG, D_ALL);
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
#define RING_REG(base) (base + 0x134)
|
||||
MMIO_RING_D(RING_REG, D_ALL);
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
MMIO_GM(0x2148, D_ALL, NULL, NULL);
|
||||
MMIO_GM(CCID, D_ALL, NULL, NULL);
|
||||
MMIO_GM(0x12198, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
|
||||
MMIO_D(GEN7_CXT_SIZE, D_ALL);
|
||||
|
||||
MMIO_RING_D(RING_TAIL, D_ALL);
|
||||
MMIO_RING_D(RING_HEAD, D_ALL);
|
||||
MMIO_RING_D(RING_CTL, D_ALL);
|
||||
MMIO_RING_D(RING_ACTHD, D_ALL);
|
||||
MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
|
||||
|
||||
/* RING MODE */
|
||||
#define RING_REG(base) (base + 0x29c)
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
|
||||
MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
|
||||
ring_mode_mmio_write);
|
||||
#undef RING_REG
|
||||
|
||||
MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
|
||||
ring_timestamp_mmio_read, NULL);
|
||||
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
|
||||
ring_timestamp_mmio_read, NULL);
|
||||
|
||||
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x2124, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_D(GAM_ECOCHK, D_ALL);
|
||||
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(0x9030, D_ALL);
|
||||
MMIO_D(0x20a0, D_ALL);
|
||||
MMIO_D(0x2420, D_ALL);
|
||||
MMIO_D(0x2430, D_ALL);
|
||||
MMIO_D(0x2434, D_ALL);
|
||||
MMIO_D(0x2438, D_ALL);
|
||||
MMIO_D(0x243c, D_ALL);
|
||||
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
@ -2144,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(FORCEWAKE_ACK, D_ALL);
|
||||
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
|
||||
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
|
||||
MMIO_D(GTFIFODBG, D_ALL);
|
||||
MMIO_D(GTFIFOCTL, D_ALL);
|
||||
MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
|
||||
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
|
||||
MMIO_D(ECOBUS, D_ALL);
|
||||
@ -2202,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
|
||||
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
|
||||
MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
|
||||
MMIO_D(GEN6_PCODE_DATA, D_ALL);
|
||||
MMIO_D(0x13812c, D_ALL);
|
||||
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
|
||||
@ -2281,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x1a054, D_ALL);
|
||||
|
||||
MMIO_D(0x44070, D_ALL);
|
||||
|
||||
MMIO_D(0x215c, D_HSW_PLUS);
|
||||
MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
|
||||
MMIO_D(GEN7_OACONTROL, D_HSW);
|
||||
MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
|
||||
MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(0x2b00, D_BDW_PLUS);
|
||||
MMIO_D(0x2360, D_BDW_PLUS);
|
||||
MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
|
||||
MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(BCS_SWCTRL, D_ALL);
|
||||
MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
|
||||
MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
|
||||
MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
|
||||
MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
|
||||
@ -2318,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
|
||||
MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
|
||||
MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2326,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
struct drm_i915_private *dev_priv = gvt->dev_priv;
|
||||
int ret;
|
||||
|
||||
MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
|
||||
MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
|
||||
intel_vgpu_reg_imr_handler);
|
||||
|
||||
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
|
||||
@ -2391,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
|
||||
intel_vgpu_reg_master_irq_handler);
|
||||
|
||||
MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_D(0x1c134, D_BDW_PLUS);
|
||||
MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
|
||||
F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
|
||||
MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
|
||||
F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
|
||||
F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
|
||||
F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
|
||||
ring_mode_mmio_write);
|
||||
MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
|
||||
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
|
||||
F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
|
||||
ring_timestamp_mmio_read, NULL);
|
||||
|
||||
MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
|
||||
MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
#define RING_REG(base) (base + 0xd0)
|
||||
MMIO_RING_F(RING_REG, 4, F_RO, 0,
|
||||
@ -2425,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
#undef RING_REG
|
||||
|
||||
#define RING_REG(base) (base + 0x234)
|
||||
MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
|
||||
NULL, NULL);
|
||||
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
|
||||
~0LL, D_BDW_PLUS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
#define RING_REG(base) (base + 0x244)
|
||||
MMIO_RING_D(RING_REG, D_BDW_PLUS);
|
||||
MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
|
||||
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
#define RING_REG(base) (base + 0x370)
|
||||
@ -2453,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
|
||||
MMIO_D(0x1c054, D_BDW_PLUS);
|
||||
|
||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
|
||||
|
||||
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
|
||||
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
|
||||
|
||||
@ -2463,8 +2501,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
|
||||
#undef RING_REG
|
||||
|
||||
MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
|
||||
MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
|
||||
|
||||
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
@ -2485,15 +2523,17 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(0xfdc, D_BDW_PLUS);
|
||||
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
|
||||
MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
|
||||
MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_D(0xb1f0, D_BDW);
|
||||
MMIO_D(0xb1c0, D_BDW);
|
||||
MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(0xb100, D_BDW);
|
||||
MMIO_D(0xb10c, D_BDW);
|
||||
MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(0xb110, D_BDW);
|
||||
|
||||
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
|
||||
@ -2503,10 +2543,10 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x44484, D_BDW_PLUS);
|
||||
MMIO_D(0x4448c, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(0x83a4, D_BDW);
|
||||
MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
|
||||
|
||||
MMIO_D(0x8430, D_BDW);
|
||||
MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_D(0x110000, D_BDW_PLUS);
|
||||
|
||||
@ -2518,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_D(0x2248, D_BDW);
|
||||
MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2544,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
|
||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
|
||||
|
||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
|
||||
MMIO_D(0xa210, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||
@ -2702,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
|
||||
|
||||
MMIO_D(0xd08, D_SKL);
|
||||
MMIO_D(0x20e0, D_SKL);
|
||||
MMIO_D(0x20ec, D_SKL);
|
||||
MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
|
||||
MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
/* TRTT */
|
||||
MMIO_D(0x4de0, D_SKL);
|
||||
MMIO_D(0x4de4, D_SKL);
|
||||
MMIO_D(0x4de8, D_SKL);
|
||||
MMIO_D(0x4dec, D_SKL);
|
||||
MMIO_D(0x4df0, D_SKL);
|
||||
MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
|
||||
MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
|
||||
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
|
||||
|
||||
MMIO_D(0x45008, D_SKL);
|
||||
@ -2735,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(0x65f08, D_SKL);
|
||||
MMIO_D(0x320f0, D_SKL);
|
||||
|
||||
MMIO_D(_REG_VCS2_EXCC, D_SKL);
|
||||
MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_D(0x70034, D_SKL);
|
||||
MMIO_D(0x71034, D_SKL);
|
||||
MMIO_D(0x72034, D_SKL);
|
||||
@ -2748,7 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
|
||||
|
||||
MMIO_D(0x44500, D_SKL);
|
||||
MMIO_D(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS);
|
||||
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
|
||||
NULL, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
|
||||
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||
dma_addr_t daddr;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (is_error_page(page))
|
||||
if (unlikely(!pfn_valid(pfn)))
|
||||
return -EFAULT;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, daddr))
|
||||
|
@ -151,6 +151,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
||||
case INTEL_CONTEXT_SCHEDULE_OUT:
|
||||
intel_gvt_restore_render_mmio(workload->vgpu,
|
||||
workload->ring_id);
|
||||
/* If the status is -EINPROGRESS means this workload
|
||||
* doesn't meet any issue during dispatching so when
|
||||
* get the SCHEDULE_OUT set the status to be zero for
|
||||
* good. If the status is NOT -EINPROGRESS means there
|
||||
* is something wrong happened during dispatching and
|
||||
* the status should not be set to zero
|
||||
*/
|
||||
if (workload->status == -EINPROGRESS)
|
||||
workload->status = 0;
|
||||
atomic_set(&workload->shadow_ctx_active, 0);
|
||||
break;
|
||||
default:
|
||||
@ -362,15 +371,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
vgpu = workload->vgpu;
|
||||
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
/* For the workload w/ request, needs to wait for the context
|
||||
* switch to make sure request is completed.
|
||||
* For the workload w/o request, directly complete the workload.
|
||||
*/
|
||||
if (workload->req) {
|
||||
wait_event(workload->shadow_ctx_status_wq,
|
||||
!atomic_read(&workload->shadow_ctx_active));
|
||||
|
||||
update_guest_context(workload);
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
update_guest_context(workload);
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||
@ -400,7 +417,6 @@ static int workload_thread(void *priv)
|
||||
int ring_id = p->ring_id;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload = NULL;
|
||||
long lret;
|
||||
int ret;
|
||||
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
@ -449,23 +465,24 @@ static int workload_thread(void *priv)
|
||||
|
||||
gvt_dbg_sched("ring id %d wait workload %p\n",
|
||||
workload->ring_id, workload);
|
||||
|
||||
lret = i915_wait_request(workload->req,
|
||||
retry:
|
||||
i915_wait_request(workload->req,
|
||||
0, MAX_SCHEDULE_TIMEOUT);
|
||||
if (lret < 0) {
|
||||
workload->status = lret;
|
||||
gvt_err("fail to wait workload, skip\n");
|
||||
} else {
|
||||
workload->status = 0;
|
||||
/* I915 has replay mechanism and a request will be replayed
|
||||
* if there is i915 reset. So the seqno will be updated anyway.
|
||||
* If the seqno is not updated yet after waiting, which means
|
||||
* the replay may still be in progress and we can wait again.
|
||||
*/
|
||||
if (!i915_gem_request_completed(workload->req)) {
|
||||
gvt_dbg_sched("workload %p not completed, wait again\n",
|
||||
workload);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
complete:
|
||||
gvt_dbg_sched("will complete workload %p, status: %d\n",
|
||||
workload, workload->status);
|
||||
|
||||
if (workload->req)
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (need_force_wake)
|
||||
|
Loading…
x
Reference in New Issue
Block a user