From da889070be7b26b91e8b90f072687ca437d3ed7b Mon Sep 17 00:00:00 2001 From: Ilia Levi Date: Fri, 13 Dec 2024 09:25:35 +0200 Subject: [PATCH 01/12] drm/xe/irq: Separate MSI and MSI-X flows MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A new flow is added for devices that support MSI-X: - MSI-X vector 0 is used for GuC-to-host interrupt - MSI-X vector 1 (aka default MSI-X) is used for HW engines The default MSI-X will be passed to the HW engines in a subsequent patch. Signed-off-by: Ilia Levi Reviewed-by: Piotr Piórkowski Link: https://patchwork.freedesktop.org/patch/msgid/20241213072538.6823-2-ilia.levi@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_device.c | 4 +- drivers/gpu/drm/xe/xe_device.h | 3 +- drivers/gpu/drm/xe/xe_device_types.h | 6 + drivers/gpu/drm/xe/xe_irq.c | 267 +++++++++++++++++++++++---- drivers/gpu/drm/xe/xe_irq.h | 3 + 5 files changed, 242 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index d6fccea1e083..7f021ec5f8e7 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -326,7 +326,9 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, xe->info.revid = pdev->revision; xe->info.force_execlist = xe_modparam.force_execlist; - spin_lock_init(&xe->irq.lock); + err = xe_irq_init(xe); + if (err) + goto err; init_waitqueue_head(&xe->ufence_wq); diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index f1fbfe916867..fc3c2af3fb7f 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -157,8 +157,7 @@ static inline bool xe_device_has_sriov(struct xe_device *xe) static inline bool xe_device_has_msix(struct xe_device *xe) { - /* TODO: change this when MSI-X support is fully integrated */ - return false; + return xe->irq.msix.nvec > 0; } static inline bool xe_device_has_memirq(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index ace22e35e769..7b90df4e0021 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -348,6 +348,12 @@ struct xe_device { /** @irq.enabled: interrupts enabled on this device */ atomic_t enabled; + + /** @irq.msix: irq info for platforms that support MSI-X */ + struct { + /** @irq.msix.nvec: number of MSI-X interrupts */ + u16 nvec; + } msix; } irq; /** @ttm: ttm device */ diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 1c509e66694d..b8a0b9bbf24c 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -10,6 +10,7 @@ #include #include "display/xe_display.h" +#include "regs/xe_guc_regs.h" #include "regs/xe_irq_regs.h" #include "xe_device.h" #include "xe_drv.h" @@ -29,6 +30,11 @@ #define IIR(offset) XE_REG(offset + 0x8) #define IER(offset) XE_REG(offset + 0xc) +static int xe_irq_msix_init(struct xe_device *xe); +static void xe_irq_msix_free(struct xe_device *xe); +static int xe_irq_msix_request_irqs(struct xe_device *xe); +static void xe_irq_msix_synchronize_irq(struct xe_device *xe); + static void assert_iir_is_zero(struct xe_mmio *mmio, struct xe_reg reg) { u32 val = xe_mmio_read32(mmio, reg); @@ -572,6 +578,11 @@ static void xe_irq_reset(struct xe_device *xe) if (IS_SRIOV_VF(xe)) return vf_irq_reset(xe); + if (xe_device_uses_memirq(xe)) { + for_each_tile(tile, xe, id) + xe_memirq_reset(&tile->memirq); + } + for_each_tile(tile, xe, id) { if (GRAPHICS_VERx100(xe) >= 1210) dg1_irq_reset(tile); @@ -614,6 +625,14 @@ static void xe_irq_postinstall(struct xe_device *xe) if (IS_SRIOV_VF(xe)) return vf_irq_postinstall(xe); + if (xe_device_uses_memirq(xe)) { + struct xe_tile *tile; + unsigned int id; + + for_each_tile(tile, xe, id) + xe_memirq_postinstall(&tile->memirq); + } + xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); /* @@ -656,27 +675,11 @@ static irq_handler_t xe_irq_handler(struct xe_device *xe) return xelp_irq_handler; } -static void irq_uninstall(void *arg) -{ - struct xe_device *xe = arg; - struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - int irq; - - if (!atomic_xchg(&xe->irq.enabled, 0)) - return; - - xe_irq_reset(xe); - - irq = pci_irq_vector(pdev, 0); - free_irq(irq, xe); -} - -int xe_irq_install(struct xe_device *xe) +static int xe_irq_msi_request_irqs(struct xe_device *xe) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); - unsigned int irq_flags = PCI_IRQ_MSIX; irq_handler_t irq_handler; - int err, irq, nvec; + int irq, err; irq_handler = xe_irq_handler(xe); if (!irq_handler) { @@ -684,32 +687,71 @@ int xe_irq_install(struct xe_device *xe) return -EINVAL; } + irq = pci_irq_vector(pdev, 0); + err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe); + if (err < 0) { + drm_err(&xe->drm, "Failed to request MSI IRQ %d\n", err); + return err; + } + + return 0; +} + +static void xe_irq_msi_free(struct xe_device *xe) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + int irq; + + irq = pci_irq_vector(pdev, 0); + free_irq(irq, xe); +} + +static void irq_uninstall(void *arg) +{ + struct xe_device *xe = arg; + + if (!atomic_xchg(&xe->irq.enabled, 0)) + return; + xe_irq_reset(xe); - nvec = pci_msix_vec_count(pdev); - if (nvec <= 0) { - if (nvec == -EINVAL) { - /* MSIX capability is not supported in the device, using MSI */ - irq_flags = PCI_IRQ_MSI; - nvec = 1; - } else { - drm_err(&xe->drm, "MSIX: Failed getting count\n"); - return nvec; - } + if (xe_device_has_msix(xe)) + xe_irq_msix_free(xe); + else + xe_irq_msi_free(xe); +} + +int xe_irq_init(struct xe_device *xe) +{ + spin_lock_init(&xe->irq.lock); + + return xe_irq_msix_init(xe); +} + +int xe_irq_install(struct xe_device *xe) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + unsigned int irq_flags = PCI_IRQ_MSI; + int nvec = 1; + int err; + + xe_irq_reset(xe); + + if (xe_device_has_msix(xe)) { + nvec = xe->irq.msix.nvec; + irq_flags = PCI_IRQ_MSIX; } err = pci_alloc_irq_vectors(pdev, nvec, nvec, irq_flags); if (err < 0) { - drm_err(&xe->drm, "MSI/MSIX: Failed to enable support %d\n", err); + drm_err(&xe->drm, "Failed to allocate IRQ vectors: %d\n", err); return err; } - irq = pci_irq_vector(pdev, 0); - err = request_irq(irq, irq_handler, IRQF_SHARED, DRIVER_NAME, xe); - if (err < 0) { - drm_err(&xe->drm, "Failed to request MSI/MSIX IRQ %d\n", err); + err = xe_device_has_msix(xe) ? xe_irq_msix_request_irqs(xe) : + xe_irq_msi_request_irqs(xe); + if (err) return err; - } atomic_set(&xe->irq.enabled, 1); @@ -722,18 +764,28 @@ int xe_irq_install(struct xe_device *xe) return 0; free_irq_handler: - free_irq(irq, xe); + if (xe_device_has_msix(xe)) + xe_irq_msix_free(xe); + else + xe_irq_msi_free(xe); return err; } +static void xe_irq_msi_synchronize_irq(struct xe_device *xe) +{ + synchronize_irq(to_pci_dev(xe->drm.dev)->irq); +} + void xe_irq_suspend(struct xe_device *xe) { - int irq = to_pci_dev(xe->drm.dev)->irq; - atomic_set(&xe->irq.enabled, 0); /* no new irqs */ - synchronize_irq(irq); /* flush irqs */ + /* flush irqs */ + if (xe_device_has_msix(xe)) + xe_irq_msix_synchronize_irq(xe); + else + xe_irq_msi_synchronize_irq(xe); xe_irq_reset(xe); /* turn irqs off */ } @@ -754,3 +806,142 @@ void xe_irq_resume(struct xe_device *xe) for_each_gt(gt, xe, id) xe_irq_enable_hwe(gt); } + +/* MSI-X related definitions and functions below. */ + +enum xe_irq_msix_static { + GUC2HOST_MSIX = 0, + DEFAULT_MSIX = XE_IRQ_DEFAULT_MSIX, + /* Must be last */ + NUM_OF_STATIC_MSIX, +}; + +static int xe_irq_msix_init(struct xe_device *xe) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + int nvec = pci_msix_vec_count(pdev); + + if (nvec == -EINVAL) + return 0; /* MSI */ + + if (nvec < 0) { + drm_err(&xe->drm, "Failed getting MSI-X vectors count: %d\n", nvec); + return nvec; + } + + xe->irq.msix.nvec = nvec; + return 0; +} + +static irqreturn_t guc2host_irq_handler(int irq, void *arg) +{ + struct xe_device *xe = arg; + struct xe_tile *tile; + u8 id; + + if (!atomic_read(&xe->irq.enabled)) + return IRQ_NONE; + + for_each_tile(tile, xe, id) + xe_guc_irq_handler(&tile->primary_gt->uc.guc, + GUC_INTR_GUC2HOST); + + return IRQ_HANDLED; +} + +static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg) +{ + unsigned int tile_id, gt_id; + struct xe_device *xe = arg; + struct xe_memirq *memirq; + struct xe_hw_engine *hwe; + enum xe_hw_engine_id id; + struct xe_tile *tile; + struct xe_gt *gt; + + if (!atomic_read(&xe->irq.enabled)) + return IRQ_NONE; + + for_each_tile(tile, xe, tile_id) { + memirq = &tile->memirq; + if (!memirq->bo) + continue; + + for_each_gt(gt, xe, gt_id) { + if (gt->tile != tile) + continue; + + for_each_hw_engine(hwe, gt, id) + xe_memirq_hwe_handler(memirq, hwe); + } + } + + return IRQ_HANDLED; +} + +static int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, + const char *name, u16 msix) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + int ret, irq; + + irq = pci_irq_vector(pdev, msix); + if (irq < 0) + return irq; + + ret = request_irq(irq, handler, IRQF_SHARED, name, xe); + if (ret < 0) + return ret; + + return 0; +} + +static void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + int irq; + + irq = pci_irq_vector(pdev, msix); + if (irq < 0) { + drm_err(&xe->drm, "MSI-X %u can't be released, there is no matching IRQ\n", msix); + return; + } + + free_irq(irq, xe); +} + +static int xe_irq_msix_request_irqs(struct xe_device *xe) +{ + int err; + + err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, + DRIVER_NAME "-guc2host", GUC2HOST_MSIX); + if (err) { + drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", GUC2HOST_MSIX, err); + return err; + } + + err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, + DRIVER_NAME "-default-msix", DEFAULT_MSIX); + if (err) { + drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", DEFAULT_MSIX, err); + xe_irq_msix_free_irq(xe, GUC2HOST_MSIX); + return err; + } + + return 0; +} + +static void xe_irq_msix_free(struct xe_device *xe) +{ + xe_irq_msix_free_irq(xe, GUC2HOST_MSIX); + xe_irq_msix_free_irq(xe, DEFAULT_MSIX); +} + +static void xe_irq_msix_synchronize_irq(struct xe_device *xe) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + + synchronize_irq(pci_irq_vector(pdev, GUC2HOST_MSIX)); + synchronize_irq(pci_irq_vector(pdev, DEFAULT_MSIX)); +} diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h index 067514e13675..24ff16111b96 100644 --- a/drivers/gpu/drm/xe/xe_irq.h +++ b/drivers/gpu/drm/xe/xe_irq.h @@ -6,10 +6,13 @@ #ifndef _XE_IRQ_H_ #define _XE_IRQ_H_ +#define XE_IRQ_DEFAULT_MSIX 1 + struct xe_device; struct xe_tile; struct xe_gt; +int xe_irq_init(struct xe_device *xe); int xe_irq_install(struct xe_device *xe); void xe_irq_suspend(struct xe_device *xe); void xe_irq_resume(struct xe_device *xe); From 21d07f5fdc903e36cfd7119bb19477c4d12dbb36 Mon Sep 17 00:00:00 2001 From: Ilia Levi Date: Fri, 13 Dec 2024 09:25:36 +0200 Subject: [PATCH 02/12] drm/xe: Initial MSI-X support for HW engines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Configure the HW engines to work with MSI-X - Program the LRC to use memirq infra (similar to VF) - CS_INT_VEC field added to the LRC Bspec: 60342, 72547 Signed-off-by: Ilia Levi Reviewed-by: Piotr Piórkowski Link: https://patchwork.freedesktop.org/patch/msgid/20241213072538.6823-3-ilia.levi@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/regs/xe_engine_regs.h | 3 +++ drivers/gpu/drm/xe/regs/xe_lrc_layout.h | 3 +++ drivers/gpu/drm/xe/xe_exec_queue.c | 4 +++- drivers/gpu/drm/xe/xe_exec_queue_types.h | 2 ++ drivers/gpu/drm/xe/xe_execlist.c | 10 +++++++--- drivers/gpu/drm/xe/xe_hw_engine.c | 7 +++++-- drivers/gpu/drm/xe/xe_lrc.c | 24 ++++++++++++++++++++---- drivers/gpu/drm/xe/xe_lrc.h | 2 +- 8 files changed, 44 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 7c78496e6213..d86219dedde2 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -83,6 +83,8 @@ #define RING_IMR(base) XE_REG((base) + 0xa8) #define RING_INT_STATUS_RPT_PTR(base) XE_REG((base) + 0xac) +#define CS_INT_VEC(base) XE_REG((base) + 0x1b8) + #define RING_EIR(base) XE_REG((base) + 0xb0) #define RING_EMR(base) XE_REG((base) + 0xb4) #define RING_ESR(base) XE_REG((base) + 0xb8) @@ -138,6 +140,7 @@ #define RING_MODE(base) XE_REG((base) + 0x29c) #define GFX_DISABLE_LEGACY_MODE REG_BIT(3) +#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13) #define RING_TIMESTAMP(base) XE_REG((base) + 0x358) diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h index 045dfd09db99..57944f90bbf6 100644 --- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h +++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h @@ -25,6 +25,9 @@ #define CTX_INT_SRC_REPORT_REG (CTX_LRI_INT_REPORT_PTR + 3) #define CTX_INT_SRC_REPORT_PTR (CTX_LRI_INT_REPORT_PTR + 4) +#define CTX_CS_INT_VEC_REG 0x5a +#define CTX_CS_INT_VEC_DATA (CTX_CS_INT_VEC_REG + 1) + #define INDIRECT_CTX_RING_HEAD (0x02 + 1) #define INDIRECT_CTX_RING_TAIL (0x04 + 1) #define INDIRECT_CTX_RING_START (0x06 + 1) diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index aab9e561153d..9c94be571900 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -16,6 +16,7 @@ #include "xe_hw_engine_class_sysfs.h" #include "xe_hw_engine_group.h" #include "xe_hw_fence.h" +#include "xe_irq.h" #include "xe_lrc.h" #include "xe_macros.h" #include "xe_migrate.h" @@ -68,6 +69,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, q->gt = gt; q->class = hwe->class; q->width = width; + q->msix_vec = XE_IRQ_DEFAULT_MSIX; q->logical_mask = logical_mask; q->fence_irq = >->fence_irq[hwe->class]; q->ring_ops = gt->ring_ops[hwe->class]; @@ -117,7 +119,7 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q) } for (i = 0; i < q->width; ++i) { - q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K); + q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec); if (IS_ERR(q->lrc[i])) { err = PTR_ERR(q->lrc[i]); goto err_unlock; diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h index 1158b6062a6c..eec8f9935a58 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue_types.h +++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h @@ -63,6 +63,8 @@ struct xe_exec_queue { char name[MAX_FENCE_NAME_LEN]; /** @width: width (number BB submitted per exec) of this exec queue */ u16 width; + /** @msix_vec: MSI-X vector (for platforms that support it) */ + u16 msix_vec; /** @fence_irq: fence IRQ used to signal job completion */ struct xe_hw_fence_irq *fence_irq; diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c index a8c416a48812..5ef96deaa881 100644 --- a/drivers/gpu/drm/xe/xe_execlist.c +++ b/drivers/gpu/drm/xe/xe_execlist.c @@ -17,6 +17,7 @@ #include "xe_exec_queue.h" #include "xe_gt.h" #include "xe_hw_fence.h" +#include "xe_irq.h" #include "xe_lrc.h" #include "xe_macros.h" #include "xe_mmio.h" @@ -47,6 +48,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, struct xe_mmio *mmio = >->mmio; struct xe_device *xe = gt_to_xe(gt); u64 lrc_desc; + u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE); lrc_desc = xe_lrc_descriptor(lrc); @@ -80,8 +82,10 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, xe_mmio_write32(mmio, RING_HWS_PGA(hwe->mmio_base), xe_bo_ggtt_addr(hwe->hwsp)); xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base)); - xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), - _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); + + if (xe_device_has_msix(gt_to_xe(hwe->gt))) + ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE); + xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode); xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base), lower_32_bits(lrc_desc)); @@ -265,7 +269,7 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe, port->hwe = hwe; - port->lrc = xe_lrc_create(hwe, NULL, SZ_16K); + port->lrc = xe_lrc_create(hwe, NULL, SZ_16K, XE_IRQ_DEFAULT_MSIX); if (IS_ERR(port->lrc)) { err = PTR_ERR(port->lrc); goto err; diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index b19366744148..4294aa600192 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -324,6 +324,7 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) { u32 ccs_mask = xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE); + u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE); if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask) xe_mmio_write32(&hwe->gt->mmio, RCU_MODE, @@ -332,8 +333,10 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0); xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0), xe_bo_ggtt_addr(hwe->hwsp)); - xe_hw_engine_mmio_write32(hwe, RING_MODE(0), - _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); + + if (xe_device_has_msix(gt_to_xe(hwe->gt))) + ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE); + xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode); xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0), _MASKED_BIT_DISABLE(STOP_RING)); xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0)); diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 22e58c6e2a35..bbb9ffbf6367 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -584,6 +584,7 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) { struct xe_memirq *memirq = >_to_tile(hwe->gt)->memirq; struct xe_device *xe = gt_to_xe(hwe->gt); + u8 num_regs; if (!xe_device_uses_memirq(xe)) return; @@ -593,12 +594,18 @@ static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) regs[CTX_INT_MASK_ENABLE_REG] = RING_IMR(0).addr; regs[CTX_INT_MASK_ENABLE_PTR] = xe_memirq_enable_ptr(memirq); - regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | + num_regs = xe_device_has_msix(xe) ? 3 : 2; + regs[CTX_LRI_INT_REPORT_PTR] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(num_regs) | MI_LRI_LRM_CS_MMIO | MI_LRI_FORCE_POSTED; regs[CTX_INT_STATUS_REPORT_REG] = RING_INT_STATUS_RPT_PTR(0).addr; regs[CTX_INT_STATUS_REPORT_PTR] = xe_memirq_status_ptr(memirq, hwe); regs[CTX_INT_SRC_REPORT_REG] = RING_INT_SRC_RPT_PTR(0).addr; regs[CTX_INT_SRC_REPORT_PTR] = xe_memirq_source_ptr(memirq, hwe); + + if (xe_device_has_msix(xe)) { + regs[CTX_CS_INT_VEC_REG] = CS_INT_VEC(0).addr; + /* CTX_CS_INT_VEC_DATA will be set in xe_lrc_init */ + } } static int lrc_ring_mi_mode(struct xe_hw_engine *hwe) @@ -876,7 +883,7 @@ static void xe_lrc_finish(struct xe_lrc *lrc) #define PVC_CTX_ACC_CTR_THOLD (0x2a + 1) static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, - struct xe_vm *vm, u32 ring_size) + struct xe_vm *vm, u32 ring_size, u16 msix_vec) { struct xe_gt *gt = hwe->gt; struct xe_tile *tile = gt_to_tile(gt); @@ -945,6 +952,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, xe_drm_client_add_bo(vm->xef->client, lrc->bo); } + if (xe_device_has_msix(xe)) { + xe_lrc_write_ctx_reg(lrc, CTX_INT_STATUS_REPORT_PTR, + xe_memirq_status_ptr(&tile->memirq, hwe)); + xe_lrc_write_ctx_reg(lrc, CTX_INT_SRC_REPORT_PTR, + xe_memirq_source_ptr(&tile->memirq, hwe)); + xe_lrc_write_ctx_reg(lrc, CTX_CS_INT_VEC_DATA, msix_vec << 16 | msix_vec); + } + if (xe_gt_has_indirect_ring_state(gt)) { xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE, __xe_lrc_indirect_ring_ggtt_addr(lrc)); @@ -1005,6 +1020,7 @@ err_lrc_finish: * @hwe: Hardware Engine * @vm: The VM (address space) * @ring_size: LRC ring size + * @msix_vec: MSI-X interrupt vector (for platforms that support it) * * Allocate and initialize the Logical Ring Context (LRC). * @@ -1012,7 +1028,7 @@ err_lrc_finish: * upon failure. */ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, - u32 ring_size) + u32 ring_size, u16 msix_vec) { struct xe_lrc *lrc; int err; @@ -1021,7 +1037,7 @@ struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, if (!lrc) return ERR_PTR(-ENOMEM); - err = xe_lrc_init(lrc, hwe, vm, ring_size); + err = xe_lrc_init(lrc, hwe, vm, ring_size, msix_vec); if (err) { kfree(lrc); return ERR_PTR(err); diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index b459dcab8787..4206e6a8b50a 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -42,7 +42,7 @@ struct xe_lrc_snapshot { #define LRC_PPHWSP_SCRATCH_ADDR (0x34 * 4) struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm, - u32 ring_size); + u32 ring_size, u16 msix_vec); void xe_lrc_destroy(struct kref *ref); /** From f0d3a3cffd432bd756b25a630f7bd19c018f72ce Mon Sep 17 00:00:00 2001 From: Ilia Levi Date: Fri, 13 Dec 2024 09:25:37 +0200 Subject: [PATCH 03/12] drm/xe/irq: Manage MSI-X interrupts allocation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expose functions to request and free MSI-X interrupts. The request has two flavors: - Static MSI-X allocation, for known MSI-X interrupts (e.g. GuC-to-host) - Dynamic MSI-X allocation, which uses the next available MSI-X interrupt Signed-off-by: Ilia Levi Reviewed-by: Piotr Piórkowski Link: https://patchwork.freedesktop.org/patch/msgid/20241213072538.6823-4-ilia.levi@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_device_types.h | 2 + drivers/gpu/drm/xe/xe_irq.c | 96 ++++++++++++++++++++++------ drivers/gpu/drm/xe/xe_irq.h | 5 ++ 3 files changed, 83 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 7b90df4e0021..8a7b15972413 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -353,6 +353,8 @@ struct xe_device { struct { /** @irq.msix.nvec: number of MSI-X interrupts */ u16 nvec; + /** @irq.msix.indexes: used to allocate MSI-X indexes */ + struct xarray indexes; } msix; } irq; diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index b8a0b9bbf24c..32f5a67a917b 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -830,6 +830,7 @@ static int xe_irq_msix_init(struct xe_device *xe) } xe->irq.msix.nvec = nvec; + xa_init_flags(&xe->irq.msix.indexes, XA_FLAGS_ALLOC); return 0; } @@ -879,8 +880,32 @@ static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg) return IRQ_HANDLED; } -static int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, - const char *name, u16 msix) +static int xe_irq_msix_alloc_vector(struct xe_device *xe, void *irq_buf, + bool dynamic_msix, u16 *msix) +{ + struct xa_limit limit; + int ret; + u32 id; + + limit = (dynamic_msix) ? XA_LIMIT(NUM_OF_STATIC_MSIX, xe->irq.msix.nvec - 1) : + XA_LIMIT(*msix, *msix); + ret = xa_alloc(&xe->irq.msix.indexes, &id, irq_buf, limit, GFP_KERNEL); + if (ret) + return ret; + + if (dynamic_msix) + *msix = id; + + return 0; +} + +static void xe_irq_msix_release_vector(struct xe_device *xe, u16 msix) +{ + xa_erase(&xe->irq.msix.indexes, msix); +} + +static int xe_irq_msix_request_irq_internal(struct xe_device *xe, irq_handler_t handler, + void *irq_buf, const char *name, u16 msix) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); int ret, irq; @@ -889,17 +914,41 @@ static int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, if (irq < 0) return irq; - ret = request_irq(irq, handler, IRQF_SHARED, name, xe); + ret = request_irq(irq, handler, IRQF_SHARED, name, irq_buf); if (ret < 0) return ret; return 0; } -static void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix) +int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf, + const char *name, bool dynamic_msix, u16 *msix) +{ + int ret; + + ret = xe_irq_msix_alloc_vector(xe, irq_buf, dynamic_msix, msix); + if (ret) + return ret; + + ret = xe_irq_msix_request_irq_internal(xe, handler, irq_buf, name, *msix); + if (ret) { + drm_err(&xe->drm, "Failed to request IRQ for MSI-X %u\n", *msix); + xe_irq_msix_release_vector(xe, *msix); + return ret; + } + + return 0; +} + +void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); int irq; + void *irq_buf; + + irq_buf = xa_load(&xe->irq.msix.indexes, msix); + if (!irq_buf) + return; irq = pci_irq_vector(pdev, msix); if (irq < 0) { @@ -907,24 +956,25 @@ static void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix) return; } - free_irq(irq, xe); + free_irq(irq, irq_buf); + xe_irq_msix_release_vector(xe, msix); } -static int xe_irq_msix_request_irqs(struct xe_device *xe) +int xe_irq_msix_request_irqs(struct xe_device *xe) { int err; + u16 msix; - err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, - DRIVER_NAME "-guc2host", GUC2HOST_MSIX); - if (err) { - drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", GUC2HOST_MSIX, err); + msix = GUC2HOST_MSIX; + err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe, + DRIVER_NAME "-guc2host", false, &msix); + if (err) return err; - } - err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, - DRIVER_NAME "-default-msix", DEFAULT_MSIX); + msix = DEFAULT_MSIX; + err = xe_irq_msix_request_irq(xe, xe_irq_msix_default_hwe_handler, xe, + DRIVER_NAME "-default-msix", false, &msix); if (err) { - drm_err(&xe->drm, "Failed to request MSI-X IRQ %d: %d\n", DEFAULT_MSIX, err); xe_irq_msix_free_irq(xe, GUC2HOST_MSIX); return err; } @@ -932,16 +982,22 @@ static int xe_irq_msix_request_irqs(struct xe_device *xe) return 0; } -static void xe_irq_msix_free(struct xe_device *xe) +void xe_irq_msix_free(struct xe_device *xe) { - xe_irq_msix_free_irq(xe, GUC2HOST_MSIX); - xe_irq_msix_free_irq(xe, DEFAULT_MSIX); + unsigned long msix; + u32 *dummy; + + xa_for_each(&xe->irq.msix.indexes, msix, dummy) + xe_irq_msix_free_irq(xe, msix); + xa_destroy(&xe->irq.msix.indexes); } -static void xe_irq_msix_synchronize_irq(struct xe_device *xe) +void xe_irq_msix_synchronize_irq(struct xe_device *xe) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + unsigned long msix; + u32 *dummy; - synchronize_irq(pci_irq_vector(pdev, GUC2HOST_MSIX)); - synchronize_irq(pci_irq_vector(pdev, DEFAULT_MSIX)); + xa_for_each(&xe->irq.msix.indexes, msix, dummy) + synchronize_irq(pci_irq_vector(pdev, msix)); } diff --git a/drivers/gpu/drm/xe/xe_irq.h b/drivers/gpu/drm/xe/xe_irq.h index 24ff16111b96..a28bd577ba52 100644 --- a/drivers/gpu/drm/xe/xe_irq.h +++ b/drivers/gpu/drm/xe/xe_irq.h @@ -6,6 +6,8 @@ #ifndef _XE_IRQ_H_ #define _XE_IRQ_H_ +#include + #define XE_IRQ_DEFAULT_MSIX 1 struct xe_device; @@ -17,5 +19,8 @@ int xe_irq_install(struct xe_device *xe); void xe_irq_suspend(struct xe_device *xe); void xe_irq_resume(struct xe_device *xe); void xe_irq_enable_hwe(struct xe_gt *gt); +int xe_irq_msix_request_irq(struct xe_device *xe, irq_handler_t handler, void *irq_buf, + const char *name, bool dynamic_msix, u16 *msix); +void xe_irq_msix_free_irq(struct xe_device *xe, u16 msix); #endif From 70fb86a85dc9fd66014d7eb2fe356f50702ceeb6 Mon Sep 17 00:00:00 2001 From: John Harrison Date: Fri, 13 Dec 2024 09:28:33 -0800 Subject: [PATCH 04/12] drm/xe: Revert some changes that break a mesa debug tool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a mesa debug tool for decoding devcoredump files. Recent changes to improve the devcoredump output broke that tool. So revert the changes until the tool can be extended to support the new fields. Signed-off-by: John Harrison Fixes: c28fd6c358db ("drm/xe/devcoredump: Improve section headings and add tile info") Fixes: ec1455ce7e35 ("drm/xe/devcoredump: Add ASCII85 dump helper function") Cc: John Harrison Cc: Julia Filipchuk Cc: Lucas De Marchi Cc: Thomas Hellström Cc: Rodrigo Vivi Cc: intel-xe@lists.freedesktop.org Reviewed-by: Jonathan Cavitt Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20241213172833.1733376-1-John.C.Harrison@Intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_devcoredump.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 71636e80b71d..6980304c8903 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -119,7 +119,11 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count, drm_puts(&p, "\n**** GuC CT ****\n"); xe_guc_ct_snapshot_print(ss->guc.ct, &p); - drm_puts(&p, "\n**** Contexts ****\n"); + /* + * Don't add a new section header here because the mesa debug decoder + * tool expects the context information to be in the 'GuC CT' section. + */ + /* drm_puts(&p, "\n**** Contexts ****\n"); */ xe_guc_exec_queue_snapshot_print(ss->ge, &p); drm_puts(&p, "\n**** Job ****\n"); @@ -416,6 +420,15 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char buff[ASCII85_BUFSZ], *line_buff; size_t line_pos = 0; + /* + * Splitting blobs across multiple lines is not compatible with the mesa + * debug decoder tool. Note that even dropping the explicit '\n' below + * doesn't help because the GuC log is so big some underlying implementation + * still splits the lines at 512K characters. So just bail completely for + * the moment. + */ + return; + #define DMESG_MAX_LINE_LEN 800 #define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */ From dc5e20ae1f8a7c354dc9833faa2720254e5a5443 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Fri, 13 Dec 2024 13:24:14 +0100 Subject: [PATCH 05/12] drm/xe: Use non-interruptible wait when moving BO to system MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure a non-interruptible wait is used when moving a bo to XE_PL_SYSTEM. This prevents dma_mappings from being removed prematurely while a GPU job is still in progress, even if the CPU receives a signal during the operation. Fixes: 75521e8b56e8 ("drm/xe: Perform dma_map when moving system buffer objects to TT") Cc: Thomas Hellström Cc: Matthew Brost Cc: Lucas De Marchi Cc: stable@vger.kernel.org # v6.11+ Suggested-by: Matthew Auld Reviewed-by: Matthew Auld Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20241213122415.3880017-1-nirmoy.das@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/xe_bo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 283cd0294570..06931df876ab 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -733,7 +733,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, new_mem->mem_type == XE_PL_SYSTEM) { long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, - true, + false, MAX_SCHEDULE_TIMEOUT); if (timeout < 0) { ret = timeout; From cda06412c06893a6f07a2fbf89d42a0972ec9e8e Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Fri, 13 Dec 2024 13:24:15 +0100 Subject: [PATCH 06/12] drm/xe: Wait for migration job before unmapping pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a potential GPU page fault during tt -> system moves by waiting for migration jobs to complete before unmapping SG. This ensures that IOMMU mappings are not prematurely torn down while a migration job is still in progress. v2: Use intr=false(Matt A) v3: Update commit message(Matt A) v4: s/DMA_RESV_USAGE_BOOKKEEP/DMA_RESV_USAGE_KERNEL(Thomas) Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/3466 Fixes: 75521e8b56e8 ("drm/xe: Perform dma_map when moving system buffer objects to TT") Cc: Thomas Hellström Cc: Matthew Brost Cc: Lucas De Marchi Cc: stable@vger.kernel.org # v6.11+ Cc: Matthew Auld Reviewed-by: Matthew Auld Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20241213122415.3880017-2-nirmoy.das@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/xe_bo.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 06931df876ab..e6c896ad5602 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -857,8 +857,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, out: if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) && - ttm_bo->ttm) + ttm_bo->ttm) { + long timeout = dma_resv_wait_timeout(ttm_bo->base.resv, + DMA_RESV_USAGE_KERNEL, + false, + MAX_SCHEDULE_TIMEOUT); + if (timeout < 0) + ret = timeout; + xe_tt_unmap_sg(ttm_bo->ttm); + } return ret; } From d9a1ae0d17bdc444eefee6bc5f0dd11e875bd735 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Fri, 13 Dec 2024 10:10:12 -0800 Subject: [PATCH 07/12] drm/xe/guc: Enable WA_DUAL_QUEUE for newer platforms The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions on RCS and CCSes with different address spaces, which on DG2 is required as a WA for an HW bug. On newer platforms, this block has been moved in HW at the CS level, by stalling the RCS/CCS context switch when one of the other RCS/CCSes is busy with a different address space. While functionally correct, having a submission stalled on the HW limits the GuC ability to shuffle things around and can cause complications if the non-stalled submission runs for a long time, because the GuC doesn't know that the stalled submission isn't actually running and might declare it as hung. Therefore, we enable the DUAL_QUEUE_WA on all newer platforms to move management back to the GuC. Note that the GuC specs also recommend enabling this for all platforms starting from MTL that have a CCS. v2: only apply the WA on GTs that have CCS engines v3: split comment (Jonathan) Signed-off-by: Daniele Ceraolo Spurio Cc: John Harrison Cc: Jesus Narvaez Cc: Jonathan Cavitt Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20241213181012.2178794-1-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/xe/xe_guc.c | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 4e2868efb620..408365dfe4ee 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -147,6 +147,34 @@ static u32 guc_ctl_ads_flags(struct xe_guc *guc) return flags; } +static bool needs_wa_dual_queue(struct xe_gt *gt) +{ + /* + * The DUAL_QUEUE_WA tells the GuC to not allow concurrent submissions + * on RCS and CCSes with different address spaces, which on DG2 is + * required as a WA for an HW bug. + */ + if (XE_WA(gt, 22011391025)) + return true; + + /* + * On newer platforms, the HW has been updated to not allow parallel + * execution of different address spaces, so the RCS/CCS will stall the + * context switch if one of the other RCS/CCSes is busy with a different + * address space. While functionally correct, having a submission + * stalled on the HW limits the GuC ability to shuffle things around and + * can cause complications if the non-stalled submission runs for a long + * time, because the GuC doesn't know that the stalled submission isn't + * actually running and might declare it as hung. Therefore, we enable + * the DUAL_QUEUE_WA on all newer platforms on GTs that have CCS engines + * to move management back to the GuC. + */ + if (CCS_MASK(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) + return true; + + return false; +} + static u32 guc_ctl_wa_flags(struct xe_guc *guc) { struct xe_device *xe = guc_to_xe(guc); @@ -159,7 +187,7 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc) if (XE_WA(gt, 14014475959)) flags |= GUC_WA_HOLD_CCS_SWITCHOUT; - if (XE_WA(gt, 22011391025)) + if (needs_wa_dual_queue(gt)) flags |= GUC_WA_DUAL_QUEUE; /* From 5637797add2af632a5d037044ab1b0b35643902e Mon Sep 17 00:00:00 2001 From: Ashutosh Dixit Date: Thu, 12 Dec 2024 14:49:03 -0800 Subject: [PATCH 08/12] drm/xe/oa/uapi: Expose an unblock after N reports OA property Expose an "unblock after N reports" OA property, to allow userspace threads to be woken up less frequently. Co-developed-by: Umesh Nerlige Ramappa Signed-off-by: Umesh Nerlige Ramappa Signed-off-by: Ashutosh Dixit Reviewed-by: Jonathan Cavitt Reviewed-by: Umesh Nerlige Ramappa Link: https://patchwork.freedesktop.org/patch/msgid/20241212224903.1853862-1-ashutosh.dixit@intel.com --- drivers/gpu/drm/xe/xe_oa.c | 30 ++++++++++++++++++++++++++---- drivers/gpu/drm/xe/xe_oa_types.h | 3 +++ drivers/gpu/drm/xe/xe_query.c | 3 ++- include/uapi/drm/xe_drm.h | 7 +++++++ 4 files changed, 38 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index ec88b18e9baa..56bf375a9d4b 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -97,6 +97,7 @@ struct xe_oa_open_param { int num_syncs; struct xe_sync_entry *syncs; size_t oa_buffer_size; + int wait_num_reports; }; struct xe_oa_config_bo { @@ -241,11 +242,10 @@ static void oa_timestamp_clear(struct xe_oa_stream *stream, u32 *report) static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream) { u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); + u32 tail, hw_tail, partial_report_size, available; int report_size = stream->oa_buffer.format->size; - u32 tail, hw_tail; unsigned long flags; bool pollin; - u32 partial_report_size; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); @@ -289,8 +289,8 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream) stream->oa_buffer.tail = tail; - pollin = xe_oa_circ_diff(stream, stream->oa_buffer.tail, - stream->oa_buffer.head) >= report_size; + available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head); + pollin = available >= stream->wait_num_reports * report_size; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -1285,6 +1285,17 @@ static int xe_oa_set_prop_oa_buffer_size(struct xe_oa *oa, u64 value, return 0; } +static int xe_oa_set_prop_wait_num_reports(struct xe_oa *oa, u64 value, + struct xe_oa_open_param *param) +{ + if (!value) { + drm_dbg(&oa->xe->drm, "wait_num_reports %llu\n", value); + return -EINVAL; + } + param->wait_num_reports = value; + return 0; +} + static int xe_oa_set_prop_ret_inval(struct xe_oa *oa, u64 value, struct xe_oa_open_param *param) { @@ -1306,6 +1317,7 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_open[] = { [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_oa_buffer_size, + [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_wait_num_reports, }; static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = { @@ -1321,6 +1333,7 @@ static const xe_oa_set_property_fn xe_oa_set_property_funcs_config[] = { [DRM_XE_OA_PROPERTY_NUM_SYNCS] = xe_oa_set_prop_num_syncs, [DRM_XE_OA_PROPERTY_SYNCS] = xe_oa_set_prop_syncs_user, [DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE] = xe_oa_set_prop_ret_inval, + [DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS] = xe_oa_set_prop_ret_inval, }; static int xe_oa_user_ext_set_property(struct xe_oa *oa, enum xe_oa_user_extn_from from, @@ -1797,6 +1810,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream, stream->periodic = param->period_exponent > 0; stream->period_exponent = param->period_exponent; stream->no_preempt = param->no_preempt; + stream->wait_num_reports = param->wait_num_reports; stream->xef = xe_file_get(param->xef); stream->num_syncs = param->num_syncs; @@ -2156,6 +2170,14 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f if (!param.oa_buffer_size) param.oa_buffer_size = DEFAULT_XE_OA_BUFFER_SIZE; + if (!param.wait_num_reports) + param.wait_num_reports = 1; + if (param.wait_num_reports > param.oa_buffer_size / f->size) { + drm_dbg(&oa->xe->drm, "wait_num_reports %d\n", param.wait_num_reports); + ret = -EINVAL; + goto err_exec_q; + } + ret = xe_oa_parse_syncs(oa, ¶m); if (ret) goto err_exec_q; diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h index df7793915628..2dcd3b9562e9 100644 --- a/drivers/gpu/drm/xe/xe_oa_types.h +++ b/drivers/gpu/drm/xe/xe_oa_types.h @@ -218,6 +218,9 @@ struct xe_oa_stream { /** @pollin: Whether there is data available to read */ bool pollin; + /** @wait_num_reports: Number of reports to wait for before signalling pollin */ + int wait_num_reports; + /** @periodic: Whether periodic sampling is currently enabled */ bool periodic; diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index d2a816f71bf2..c059639613f7 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -672,7 +672,8 @@ static int query_oa_units(struct xe_device *xe, du->oa_unit_type = u->type; du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt); du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS | - DRM_XE_OA_CAPS_OA_BUFFER_SIZE; + DRM_XE_OA_CAPS_OA_BUFFER_SIZE | + DRM_XE_OA_CAPS_WAIT_NUM_REPORTS; j = 0; for_each_hw_engine(hwe, gt, hwe_id) { diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 0383b52cbd86..f62689ca861a 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1487,6 +1487,7 @@ struct drm_xe_oa_unit { #define DRM_XE_OA_CAPS_BASE (1 << 0) #define DRM_XE_OA_CAPS_SYNCS (1 << 1) #define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2) +#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3) /** @oa_timestamp_freq: OA timestamp freq */ __u64 oa_timestamp_freq; @@ -1660,6 +1661,12 @@ enum drm_xe_oa_property_id { * buffer is allocated by default. */ DRM_XE_OA_PROPERTY_OA_BUFFER_SIZE, + + /** + * @DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS: Number of reports to wait + * for before unblocking poll or read + */ + DRM_XE_OA_PROPERTY_WAIT_NUM_REPORTS, }; /** From 1460bb1fef9ccf7390af0d74a15252442fd6effd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Roberto=20de=20Souza?= Date: Tue, 17 Dec 2024 08:07:32 -0800 Subject: [PATCH 09/12] drm/xe: Force write completion of MI_STORE_DATA_IMM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With Force write completion unset there is no guarantees of when the write will be globally visible what is not the behavior wanted. Fixes: 9c57bc08652a ("drm/xe/lnl: Drop force_probe requirement") Signed-off-by: José Roberto de Souza Reviewed-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/20241217160732.46280-1-jose.souza@intel.com --- drivers/gpu/drm/xe/instructions/xe_mi_commands.h | 13 +++++++------ drivers/gpu/drm/xe/xe_migrate.c | 11 ++++++++--- drivers/gpu/drm/xe/xe_oa.c | 4 +++- drivers/gpu/drm/xe/xe_ring_ops.c | 6 ++++-- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 10ec2920d31b..f4ee910f0943 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -33,12 +33,13 @@ #define MI_TOPOLOGY_FILTER __MI_INSTR(0xD) #define MI_FORCE_WAKEUP __MI_INSTR(0x1D) -#define MI_STORE_DATA_IMM __MI_INSTR(0x20) -#define MI_SDI_GGTT REG_BIT(22) -#define MI_SDI_LEN_DW GENMASK(9, 0) -#define MI_SDI_NUM_DW(x) REG_FIELD_PREP(MI_SDI_LEN_DW, (x) + 3 - 2) -#define MI_SDI_NUM_QW(x) (REG_FIELD_PREP(MI_SDI_LEN_DW, 2 * (x) + 3 - 2) | \ - REG_BIT(21)) +#define MI_STORE_DATA_IMM __MI_INSTR(0x20) +#define MI_SDI_GGTT REG_BIT(22) +#define MI_FORCE_WRITE_COMPLETION_CHECK REG_BIT(10) +#define MI_SDI_LEN_DW GENMASK(9, 0) +#define MI_SDI_NUM_DW(x) REG_FIELD_PREP(MI_SDI_LEN_DW, (x) + 3 - 2) +#define MI_SDI_NUM_QW(x) (REG_FIELD_PREP(MI_SDI_LEN_DW, 2 * (x) + 3 - 2) | \ + REG_BIT(21)) #define MI_LOAD_REGISTER_IMM __MI_INSTR(0x22) #define MI_LRI_LRM_CS_MMIO REG_BIT(19) diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 1b97d90aadda..8b32fad67878 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -581,7 +581,9 @@ static void emit_pte(struct xe_migrate *m, while (ptes) { u32 chunk = min(MAX_PTE_PER_SDI, ptes); - bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); + bb->cs[bb->len++] = MI_STORE_DATA_IMM | + MI_FORCE_WRITE_COMPLETION_CHECK | + MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = ofs; bb->cs[bb->len++] = 0; @@ -1223,7 +1225,9 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, if (!(bb->len & 1)) bb->cs[bb->len++] = MI_NOOP; - bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); + bb->cs[bb->len++] = MI_STORE_DATA_IMM | + MI_FORCE_WRITE_COMPLETION_CHECK | + MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr); if (pt_op->bind) @@ -1388,7 +1392,8 @@ __xe_migrate_update_pgtables(struct xe_migrate *m, u32 idx = 0; bb->cs[bb->len++] = MI_STORE_DATA_IMM | - MI_SDI_NUM_QW(chunk); + MI_FORCE_WRITE_COMPLETION_CHECK | + MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = ofs; bb->cs[bb->len++] = 0; /* upper_32_bits */ diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 56bf375a9d4b..ae94490b0eac 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -690,7 +690,9 @@ static void xe_oa_store_flex(struct xe_oa_stream *stream, struct xe_lrc *lrc, u32 offset = xe_bo_ggtt_addr(lrc->bo); do { - bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); + bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | + MI_FORCE_WRITE_COMPLETION_CHECK | + MI_SDI_NUM_DW(1); bb->cs[bb->len++] = offset + flex->offset * sizeof(u32); bb->cs[bb->len++] = 0; bb->cs[bb->len++] = flex->value; diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 0be4f489d3e1..3a75a08b6be9 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -72,7 +72,8 @@ static int emit_user_interrupt(u32 *dw, int i) static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i) { - dw[i++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | MI_SDI_NUM_DW(1); + dw[i++] = MI_STORE_DATA_IMM | MI_SDI_GGTT | + MI_FORCE_WRITE_COMPLETION_CHECK | MI_SDI_NUM_DW(1); dw[i++] = addr; dw[i++] = 0; dw[i++] = value; @@ -162,7 +163,8 @@ static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw, static int emit_store_imm_ppgtt_posted(u64 addr, u64 value, u32 *dw, int i) { - dw[i++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(1); + dw[i++] = MI_STORE_DATA_IMM | MI_FORCE_WRITE_COMPLETION_CHECK | + MI_SDI_NUM_QW(1); dw[i++] = lower_32_bits(addr); dw[i++] = upper_32_bits(addr); dw[i++] = lower_32_bits(value); From 64546cf46e370c89cc4b0434b00ba05cdef02d86 Mon Sep 17 00:00:00 2001 From: Mika Kahola Date: Tue, 17 Dec 2024 16:34:40 +0200 Subject: [PATCH 10/12] drm/i915/display: UHBR rates for Thunderbolt tbt-alt mode is missing uhbr rates 10G and 20G. This requires requires pll clock rates 312.5 MHz and 625 MHz to be added, respectively. The uhbr rates are supported only form PTL+ platforms. v2: Add drm_WARN_ON() to check if port clock is not supported by the platform (Imre) Combine forward ungate with mask parameter (Imre) Rename XE3LPDP_* to XE3D_* (Imre) Signed-off-by: Mika Kahola Reviewed-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20241217143440.572308-1-mika.kahola@intel.com --- drivers/gpu/drm/i915/display/intel_cx0_phy.c | 39 +++++++++++++++++-- .../gpu/drm/i915/display/intel_cx0_phy_regs.h | 4 ++ 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 71dc659228ab..6bcef6696deb 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -3070,7 +3070,10 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); - clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); + if (DISPLAY_VER(display) >= 30) + clock = REG_FIELD_GET(XE3_DDI_CLOCK_SELECT_MASK, val); + else + clock = REG_FIELD_GET(XELPDP_DDI_CLOCK_SELECT_MASK, val); drm_WARN_ON(display->drm, !(val & XELPDP_FORWARD_CLOCK_UNGATE)); drm_WARN_ON(display->drm, !(val & XELPDP_TBT_CLOCK_REQUEST)); @@ -3085,13 +3088,18 @@ int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder) return 540000; case XELPDP_DDI_CLOCK_SELECT_TBT_810: return 810000; + case XELPDP_DDI_CLOCK_SELECT_TBT_312_5: + return 1000000; + case XELPDP_DDI_CLOCK_SELECT_TBT_625: + return 2000000; default: MISSING_CASE(clock); return 162000; } } -static int intel_mtl_tbt_clock_select(int clock) +static int intel_mtl_tbt_clock_select(struct intel_display *display, + int clock) { switch (clock) { case 162000: @@ -3102,6 +3110,18 @@ static int intel_mtl_tbt_clock_select(int clock) return XELPDP_DDI_CLOCK_SELECT_TBT_540; case 810000: return XELPDP_DDI_CLOCK_SELECT_TBT_810; + case 1000000: + if (DISPLAY_VER(display) < 30) { + drm_WARN_ON(display->drm, "UHBR10 not supported for the platform\n"); + return XELPDP_DDI_CLOCK_SELECT_TBT_162; + } + return XELPDP_DDI_CLOCK_SELECT_TBT_312_5; + case 2000000: + if (DISPLAY_VER(display) < 30) { + drm_WARN_ON(display->drm, "UHBR20 not supported for the platform\n"); + return XELPDP_DDI_CLOCK_SELECT_TBT_162; + } + return XELPDP_DDI_CLOCK_SELECT_TBT_625; default: MISSING_CASE(clock); return XELPDP_DDI_CLOCK_SELECT_TBT_162; @@ -3114,15 +3134,26 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder, struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); u32 val = 0; + u32 mask; /* * 1. Program PORT_CLOCK_CTL REGISTER to configure * clock muxes, gating and SSC */ - val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(crtc_state->port_clock)); + + if (DISPLAY_VER(display) >= 30) { + mask = XE3_DDI_CLOCK_SELECT_MASK; + val |= XE3_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock)); + } else { + mask = XELPDP_DDI_CLOCK_SELECT_MASK; + val |= XELPDP_DDI_CLOCK_SELECT(intel_mtl_tbt_clock_select(display, crtc_state->port_clock)); + } + + mask |= XELPDP_FORWARD_CLOCK_UNGATE; val |= XELPDP_FORWARD_CLOCK_UNGATE; + intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), - XELPDP_DDI_CLOCK_SELECT_MASK | XELPDP_FORWARD_CLOCK_UNGATE, val); + mask, val); /* 2. Read back PORT_CLOCK_CTL REGISTER */ val = intel_de_read(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port)); diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h index c685479c9756..59002fe651ef 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h @@ -187,7 +187,9 @@ #define XELPDP_TBT_CLOCK_REQUEST REG_BIT(19) #define XELPDP_TBT_CLOCK_ACK REG_BIT(18) #define XELPDP_DDI_CLOCK_SELECT_MASK REG_GENMASK(15, 12) +#define XE3_DDI_CLOCK_SELECT_MASK REG_GENMASK(16, 12) #define XELPDP_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XELPDP_DDI_CLOCK_SELECT_MASK, val) +#define XE3_DDI_CLOCK_SELECT(val) REG_FIELD_PREP(XE3_DDI_CLOCK_SELECT_MASK, val) #define XELPDP_DDI_CLOCK_SELECT_NONE 0x0 #define XELPDP_DDI_CLOCK_SELECT_MAXPCLK 0x8 #define XELPDP_DDI_CLOCK_SELECT_DIV18CLK 0x9 @@ -195,6 +197,8 @@ #define XELPDP_DDI_CLOCK_SELECT_TBT_270 0xd #define XELPDP_DDI_CLOCK_SELECT_TBT_540 0xe #define XELPDP_DDI_CLOCK_SELECT_TBT_810 0xf +#define XELPDP_DDI_CLOCK_SELECT_TBT_312_5 0x18 +#define XELPDP_DDI_CLOCK_SELECT_TBT_625 0x19 #define XELPDP_FORWARD_CLOCK_UNGATE REG_BIT(10) #define XELPDP_LANE1_PHY_CLOCK_SELECT REG_BIT(8) #define XELPDP_SSC_ENABLE_PLLA REG_BIT(1) From c2a86d8ce81d43ae56549682097ebf5827ec43b9 Mon Sep 17 00:00:00 2001 From: Nirmoy Das Date: Wed, 18 Dec 2024 15:14:47 +0100 Subject: [PATCH 11/12] drm/xe/tests: Move shrink test out of xe_bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The subtest typically has an execution time long enough to motivate a separate test so that it can be easily excluded if needed. v2: reword commit message(Thomas) Cc: Matthew Auld Cc: Thomas Hellström Reviewed-by: Thomas Hellström Link: https://patchwork.freedesktop.org/patch/msgid/20241218141447.2528530-1-nirmoy.das@intel.com Signed-off-by: Nirmoy Das --- drivers/gpu/drm/xe/tests/xe_bo.c | 16 ++++++++++++++-- drivers/gpu/drm/xe/tests/xe_live_test_mod.c | 2 ++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index c9ec7a313c6b..405ff904153e 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -606,8 +606,6 @@ static void xe_bo_shrink_kunit(struct kunit *test) static struct kunit_case xe_bo_tests[] = { KUNIT_CASE_PARAM(xe_ccs_migrate_kunit, xe_pci_live_device_gen_param), KUNIT_CASE_PARAM(xe_bo_evict_kunit, xe_pci_live_device_gen_param), - KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param, - {.speed = KUNIT_SPEED_SLOW}), {} }; @@ -618,3 +616,17 @@ struct kunit_suite xe_bo_test_suite = { .init = xe_kunit_helper_xe_device_live_test_init, }; EXPORT_SYMBOL_IF_KUNIT(xe_bo_test_suite); + +static struct kunit_case xe_bo_shrink_test[] = { + KUNIT_CASE_PARAM_ATTR(xe_bo_shrink_kunit, xe_pci_live_device_gen_param, + {.speed = KUNIT_SPEED_SLOW}), + {} +}; + +VISIBLE_IF_KUNIT +struct kunit_suite xe_bo_shrink_test_suite = { + .name = "xe_bo_shrink", + .test_cases = xe_bo_shrink_test, + .init = xe_kunit_helper_xe_device_live_test_init, +}; +EXPORT_SYMBOL_IF_KUNIT(xe_bo_shrink_test_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c index 0d36ab864ec0..81277c77016d 100644 --- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c +++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c @@ -6,11 +6,13 @@ #include extern struct kunit_suite xe_bo_test_suite; +extern struct kunit_suite xe_bo_shrink_test_suite; extern struct kunit_suite xe_dma_buf_test_suite; extern struct kunit_suite xe_migrate_test_suite; extern struct kunit_suite xe_mocs_test_suite; kunit_test_suite(xe_bo_test_suite); +kunit_test_suite(xe_bo_shrink_test_suite); kunit_test_suite(xe_dma_buf_test_suite); kunit_test_suite(xe_migrate_test_suite); kunit_test_suite(xe_mocs_test_suite); From a8d0aa0e7fcd20c9f1992688c0f0d07a68287403 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 16 Dec 2024 23:32:53 +0100 Subject: [PATCH 12/12] drm/xe/pf: Use correct function to check LMEM provisioning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a typo in function call and instead of VF LMEM we were looking at VF GGTT provisioning. Fix that. Fixes: 234670cea9a2 ("drm/xe/pf: Skip fair VFs provisioning if already provisioned") Signed-off-by: Michal Wajdeczko Cc: Piotr Piórkowski Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20241216223253.819-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 65082f12f1a8..bd621df3ab91 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -2120,7 +2120,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) valid_any = valid_any || (valid_ggtt && is_primary); if (IS_DGFX(xe)) { - bool valid_lmem = pf_get_vf_config_ggtt(primary_gt, vfid); + bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid); valid_any = valid_any || (valid_lmem && is_primary); valid_all = valid_all && valid_lmem;