Merge tag 'drm-intel-gt-next-2023-12-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

-   drm/i915: Implement fdinfo memory stats printing

    Use the newly added drm_print_memory_stats helper to show memory
    utilisation of our objects in drm/driver specific fdinfo output.

    To collect the stats we walk the per memory regions object lists
    and accumulate object size into the respective drm_memory_stats
    categories.

Cross-subsystem Changes:

- Backmerge of drm-next (to bring drm-intel-next for PXP changes)

Driver Changes:

- Wa_18028616096 now applies to all DG2 (Matt R)
- Drop Wa_22014600077 on all DG2 (Matt R)
- Add new ATS-M device ID (Haridhar)
- More Meteorlake (MTL) workarounds (Matt R, Dnyaneshwar, Jonathan,
  Gustavo, Radhakrishna)
- PMU WARN_ON cleanup on driver unbind (Umesh)
- Limit GGTT WC flushing workaround to pre BXT/ICL platforms
- Complement implementation for Wa_16018031267 / Wa_16018063123
  (Andrzej, Jonathan, Nirmoy, Chris)

- Properly print internal GSC engine in trace logs (Tvrtko)
- Track gt pm wakerefs (Andrzej)
- Fix null deref bugs on perf code when perf is disabled (Harshit,
  Tvrtko)
- Fix __i915_request_create memory leak on driver unbind (Andrzej)
- Remove spurious unsupported HuC message on MTL (Daniele)
- Read a shadowed mmio register for ggtt flush (Vinay)
- Add missing new-line to GT_TRACE (Andrzej)
- Add drm_dbgs for critical PXP events (Alan)
- Skip pxp init if gt is wedged (Zhanjun)

- Replace custom intel runtime_pm tracker with ref_tracker library
  (Andrzej)
- Compiler warning/static checker/coding style cleanups (Arnd, Nirmoy,
  Soumya, Gilbert, Dorcas, Kunwu, Sam, Tvrtko)
- Code structure and helper cleanups (Jani, Tvrtko, Andi)
- Selftest improvements (John, Tvrtko, Andrzej)

Signed-off-by: Dave Airlie <airlied@redhat.com>

# Conflicts:
#	drivers/gpu/drm/i915/gt/intel_gt_mcr.c
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZXNBcsSwJEVsq9On@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
Dave Airlie 2023-12-13 11:20:49 +10:00
commit 6734cd03f7
77 changed files with 1232 additions and 635 deletions

View File

@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
@ -38,6 +40,7 @@ config DRM_I915_DEBUG
select DRM_I915_DEBUG_GEM_ONCE
select DRM_I915_DEBUG_MMIO
select DRM_I915_DEBUG_RUNTIME_PM
select DRM_I915_DEBUG_WAKEREF
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
@ -231,7 +234,9 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
@ -240,3 +245,16 @@ config DRM_I915_DEBUG_RUNTIME_PM
Recommended for driver developers only.
If in doubt, say "N"
config DRM_I915_DEBUG_WAKEREF
bool "Enable extra tracking for wakerefs"
depends on DRM_I915
select REF_TRACKER
select STACKDEPOT
select STACKTRACE
help
Choose this option to turn on extra state checking and usage
tracking for the wakerefPM functionality. This may introduce
overhead during driver runtime.
If in doubt, say "N"

View File

@ -405,7 +405,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
struct drm_i915_private,
display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %u\n",
drm_dbg(&i915->drm, "async_put_wakeref %lu\n",
power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]",

View File

@ -279,7 +279,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
}
static struct i915_gem_proto_context *
proto_context_create(struct drm_i915_private *i915, unsigned int flags)
proto_context_create(struct drm_i915_file_private *fpriv,
struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_proto_context *pc, *err;
@ -287,6 +288,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
if (!pc)
return ERR_PTR(-ENOMEM);
pc->fpriv = fpriv;
pc->num_user_engines = -1;
pc->user_engines = NULL;
pc->user_flags = BIT(UCONTEXT_BANNABLE) |
@ -1622,6 +1624,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
err = PTR_ERR(ppgtt);
goto err_ctx;
}
ppgtt->vm.fpriv = pc->fpriv;
vm = &ppgtt->vm;
}
if (vm)
@ -1741,7 +1744,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
/* 0 reserved for invalid/unassigned ppgtt */
xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
pc = proto_context_create(i915, 0);
pc = proto_context_create(file_priv, i915, 0);
if (IS_ERR(pc)) {
err = PTR_ERR(pc);
goto err;
@ -1823,6 +1826,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->vm_id = id;
ppgtt->vm.fpriv = file_priv;
return 0;
err_put:
@ -2285,7 +2289,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
ext_data.pc = proto_context_create(i915, args->flags);
ext_data.pc = proto_context_create(file->driver_priv, i915,
args->flags);
if (IS_ERR(ext_data.pc))
return PTR_ERR(ext_data.pc);

View File

@ -188,6 +188,9 @@ struct i915_gem_proto_engine {
* CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE.
*/
struct i915_gem_proto_context {
/** @fpriv: Client which creates the context */
struct drm_i915_file_private *fpriv;
/** @vm: See &i915_gem_context.vm */
struct i915_address_space *vm;

View File

@ -254,6 +254,8 @@ struct i915_execbuffer {
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
intel_wakeref_t wakeref;
intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@ -1679,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
relocs = kvmalloc_array(size, 1, GFP_KERNEL);
relocs = kvmalloc_array(1, size, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
@ -2720,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb)
for_each_child(ce, child)
intel_context_get(child);
intel_gt_pm_get(gt);
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
intel_gt_pm_get(to_gt(gt->i915));
eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@ -2766,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
if (gt->info.id)
intel_gt_pm_put(to_gt(gt->i915));
intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(gt);
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@ -2786,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb)
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
intel_gt_pm_put(to_gt(eb->gt->i915));
intel_gt_pm_put(eb->gt);
intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
intel_context_put(eb->context);

View File

@ -106,6 +106,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
#ifdef CONFIG_PROC_FS
INIT_LIST_HEAD(&obj->client_link);
#endif
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
@ -293,6 +297,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/* We need to keep this alive for RCU read access from fdinfo. */
if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@ -389,9 +397,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->ops->release)
obj->ops->release(obj);
if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
@ -442,6 +447,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
i915_drm_client_remove_object(obj);
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.

View File

@ -302,6 +302,18 @@ struct drm_i915_gem_object {
*/
struct i915_address_space *shares_resv_from;
#ifdef CONFIG_PROC_FS
/**
* @client: @i915_drm_client which created the object
*/
struct i915_drm_client *client;
/**
* @client_link: Link into @i915_drm_client.objects_list
*/
struct list_head client_link;
#endif
union {
struct rcu_head rcu;
struct llist_node freed;

View File

@ -386,6 +386,27 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
/* Wa_14019821291 */
if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
/*
* This workaround is primarily implemented by the BIOS. We
* just need to figure out whether the BIOS has applied the
* workaround (meaning the programmed address falls within
* the DSM) and, if so, reserve that part of the DSM to
* prevent accidental reuse. The DSM location should be just
* below the WOPCM.
*/
u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
MTL_GSCPSMI_BASEADDR_LSB,
MTL_GSCPSMI_BASEADDR_MSB);
if (gscpsmi_base >= i915->dsm.stolen.start &&
gscpsmi_base < i915->dsm.stolen.end) {
*base = gscpsmi_base;
*size = i915->dsm.stolen.end - gscpsmi_base;
return;
}
}
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
case GEN8_STOLEN_RESERVED_1M:
*size = 1024 * 1024;

View File

@ -85,6 +85,7 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@ -99,7 +100,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@ -112,12 +113,13 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
i915_vma_unpin_iomap(vma);
out_rpm:
intel_gt_pm_put(vma->vm->gt);
intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@ -132,7 +134,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@ -145,7 +147,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
i915_vma_unpin_iomap(vma);
out_rpm:
intel_gt_pm_put(vma->vm->gt);
intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}

View File

@ -630,14 +630,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
intel_gt_pm_get(to_gt(i915));
intel_gt_pm_get_untracked(to_gt(i915));
cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
intel_gt_pm_put(to_gt(i915));
intel_gt_pm_put_untracked(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@ -778,6 +778,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
static int gtt_set(struct drm_i915_gem_object *obj)
{
intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@ -786,7 +787,7 @@ static int gtt_set(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@ -798,12 +799,13 @@ static int gtt_set(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
intel_gt_pm_put(vma->vm->gt);
intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_check(struct drm_i915_gem_object *obj)
{
intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@ -812,7 +814,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
intel_gt_pm_get(vma->vm->gt);
wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@ -828,7 +830,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
intel_gt_pm_put(vma->vm->gt);
intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}

View File

@ -83,7 +83,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
int err;
u32 id;
pc = proto_context_create(i915, 0);
pc = proto_context_create(fpriv, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
@ -152,7 +152,7 @@ kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx;
struct i915_gem_proto_context *pc;
pc = proto_context_create(i915, 0);
pc = proto_context_create(NULL, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);

View File

@ -5,6 +5,7 @@
#include <linux/log2.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gen8_ppgtt.h"
@ -222,6 +223,9 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
if (vm->rsvd.obj)
i915_gem_object_put(vm->rsvd.obj);
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
@ -950,6 +954,41 @@ gen8_alloc_top_pd(struct i915_address_space *vm)
return ERR_PTR(err);
}
static int gen8_init_rsvd(struct i915_address_space *vm)
{
struct drm_i915_private *i915 = vm->i915;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int ret;
/* The memory will be used only by GPU. */
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
I915_BO_ALLOC_VOLATILE |
I915_BO_ALLOC_GPU_ONLY);
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto unref;
}
ret = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
if (ret)
goto unref;
vm->rsvd.vma = i915_vma_make_unshrinkable(vma);
vm->rsvd.obj = obj;
vm->total -= vma->node.size;
return 0;
unref:
i915_gem_object_put(obj);
return ret;
}
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@ -1031,6 +1070,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
err = gen8_init_rsvd(&ppgtt->vm);
if (err)
goto err_put;
return ppgtt;
err_put:

View File

@ -28,11 +28,14 @@ static void irq_disable(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
intel_wakeref_t wakeref;
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference.
*/
if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
wakeref = intel_gt_pm_get_if_awake(b->irq_engine->gt);
if (GEM_WARN_ON(!wakeref))
return;
/*
@ -41,7 +44,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
WRITE_ONCE(b->irq_armed, true);
WRITE_ONCE(b->irq_armed, wakeref);
/* Requests may have completed before we could enable the interrupt. */
if (!b->irq_enabled++ && b->irq_enable(b))
@ -61,12 +64,14 @@ static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
intel_wakeref_t wakeref = b->irq_armed;
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
b->irq_disable(b);
WRITE_ONCE(b->irq_armed, false);
intel_gt_pm_put_async(b->irq_engine->gt);
WRITE_ONCE(b->irq_armed, 0);
intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)

View File

@ -13,6 +13,7 @@
#include <linux/types.h>
#include "intel_engine_types.h"
#include "intel_wakeref.h"
/*
* Rather than have every client wait upon all user interrupts,
@ -43,7 +44,7 @@ struct intel_breadcrumbs {
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
bool irq_armed;
intel_wakeref_t irq_armed;
/* Not all breadcrumbs are attached to physical HW */
intel_engine_mask_t engine_mask;

View File

@ -6,6 +6,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_trace.h"
@ -50,6 +51,7 @@ intel_context_create(struct intel_engine_cs *engine)
int intel_context_alloc_state(struct intel_context *ce)
{
struct i915_gem_context *ctx;
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
@ -66,6 +68,18 @@ int intel_context_alloc_state(struct intel_context *ce)
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
rcu_read_lock();
ctx = rcu_dereference(ce->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
if (ctx) {
if (ctx->client)
i915_drm_client_add_context_objects(ctx->client,
ce);
i915_gem_context_put(ctx);
}
}
unlock:

View File

@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce)
return;
ce->ops->enter(ce);
intel_gt_pm_get(ce->vm->gt);
ce->wakeref = intel_gt_pm_get(ce->vm->gt);
}
static inline void intel_context_mark_active(struct intel_context *ce)
@ -229,7 +229,7 @@ static inline void intel_context_exit(struct intel_context *ce)
if (--ce->active_count)
return;
intel_gt_pm_put_async(ce->vm->gt);
intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
ce->ops->exit(ce);
}

View File

@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
#include "intel_wakeref.h"
#include "uc/intel_guc_fwif.h"
@ -112,6 +113,7 @@ struct intel_context {
u32 ring_size;
struct intel_ring *ring;
struct intel_timeline *timeline;
intel_wakeref_t wakeref;
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0

View File

@ -47,7 +47,7 @@
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
#define MAX_MMIO_BASES 3
struct engine_info {

View File

@ -188,7 +188,7 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
attr.priority = 0;
attr.priority = I915_PRIORITY_NORMAL;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)

View File

@ -63,7 +63,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
ENGINE_TRACE(engine, "\n");
intel_gt_pm_get(engine->gt);
engine->wakeref_track = intel_gt_pm_get(engine->gt);
/* Discard stale context state from across idling */
ce = engine->kernel_context;
@ -122,6 +122,7 @@ __queue_and_release_pm(struct i915_request *rq,
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
/*
* We have to serialise all potential retirement paths with our
@ -285,7 +286,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->park(engine);
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
intel_gt_pm_put_async(engine->gt);
intel_gt_pm_put_async(engine->gt, engine->wakeref_track);
return 0;
}
@ -296,7 +297,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);

View File

@ -118,9 +118,15 @@
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
#define PER_CTX_BB_FORCE BIT(2)
#define PER_CTX_BB_VALID BIT(0)
#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define ECOSKPD(base) _MMIO((base) + 0x1d0)
#define XEHP_BLITTER_SCHEDULING_MODE_MASK REG_GENMASK(12, 11)
#define XEHP_BLITTER_ROUND_ROBIN_MODE \
REG_FIELD_PREP(XEHP_BLITTER_SCHEDULING_MODE_MASK, 1)
#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY REG_BIT(3)
#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3)
@ -257,5 +263,7 @@
#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
#define VDBOX_CGCTL3F1C(base) _MMIO((base) + 0x3f1c)
#define MFXPIPE_CLKGATE_DIS REG_BIT(3)
#endif /* __INTEL_ENGINE_REGS__ */

View File

@ -446,7 +446,9 @@ struct intel_engine_cs {
unsigned long serial;
unsigned long wakeref_serial;
intel_wakeref_t wakeref_track;
struct intel_wakeref wakeref;
struct file *default_state;
struct {

View File

@ -630,7 +630,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
intel_gt_pm_put_async(engine->gt);
intel_gt_pm_put_async_untracked(engine->gt);
/*
* If this is part of a virtual engine, its next request may

View File

@ -245,16 +245,15 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc)) {
if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
guc_ggtt_ct_invalidate(gt);
} else if (GRAPHICS_VER(i915) >= 12) {
else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
} else {
else
intel_uncore_write_fw(gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
}
}
@ -297,7 +296,7 @@ static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
return intel_gt_is_bind_context_ready(gt);
}
static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
{
struct intel_context *ce;
struct intel_gt *gt = ggtt->vm.gt;
@ -314,7 +313,8 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
* would conflict with fs_reclaim trying to allocate memory while
* doing rpm_resume().
*/
if (!intel_gt_pm_get_if_awake(gt))
*wakeref = intel_gt_pm_get_if_awake(gt);
if (!*wakeref)
return NULL;
intel_engine_pm_get(ce->engine);
@ -322,10 +322,10 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
return ce;
}
static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
{
intel_engine_pm_put(ce->engine);
intel_gt_pm_put(ce->engine->gt);
intel_gt_pm_put(ce->engine->gt, wakeref);
}
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
@ -338,12 +338,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
struct sgt_iter iter;
struct i915_request *rq;
struct intel_context *ce;
intel_wakeref_t wakeref;
u32 *cs;
if (!num_entries)
return true;
ce = gen8_ggtt_bind_get_ce(ggtt);
ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
if (!ce)
return false;
@ -419,13 +420,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
offset += n_ptes;
}
gen8_ggtt_bind_put_ce(ce);
gen8_ggtt_bind_put_ce(ce, wakeref);
return true;
err_rq:
i915_request_put(rq);
put_ce:
gen8_ggtt_bind_put_ce(ce);
gen8_ggtt_bind_put_ce(ce, wakeref);
return false;
}

View File

@ -451,7 +451,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
RING_HEAD(RENDER_RING_BASE));
RING_TAIL(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}

View File

@ -82,6 +82,10 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
@ -114,6 +118,11 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
{
return guc_to_gt(guc)->i915;
}
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);

View File

@ -388,8 +388,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* registers. This wakeref will be released in the unlock
* routine.
*
* This is expected to become a formally documented/numbered
* workaround soon.
* Wa_22018931422
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);

View File

@ -28,19 +28,20 @@
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
intel_wakeref_t wakeref;
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
}
static void runtime_begin(struct intel_gt *gt)
@ -138,7 +139,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops);
intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops, "GT");
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@ -167,7 +168,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
GT_TRACE(gt, "force:%s", str_yes_no(force));
GT_TRACE(gt, "force:%s\n", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@ -236,6 +237,7 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err;
err = intel_gt_has_unrecoverable_error(gt);
@ -252,7 +254,7 @@ int intel_gt_resume(struct intel_gt *gt)
*/
gt_sanitize(gt, true);
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
@ -295,7 +297,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
intel_gt_bind_context_set_ready(gt);
return err;

View File

@ -16,19 +16,28 @@ static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
return intel_wakeref_is_active(&gt->wakeref);
}
static inline void intel_gt_pm_get(struct intel_gt *gt)
static inline void intel_gt_pm_get_untracked(struct intel_gt *gt)
{
intel_wakeref_get(&gt->wakeref);
}
static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt)
{
intel_gt_pm_get_untracked(gt);
return intel_wakeref_track(&gt->wakeref);
}
static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
__intel_wakeref_get(&gt->wakeref);
}
static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
return intel_wakeref_get_if_active(&gt->wakeref);
if (!intel_wakeref_get_if_active(&gt->wakeref))
return 0;
return intel_wakeref_track(&gt->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
@ -36,12 +45,18 @@ static inline void intel_gt_pm_might_get(struct intel_gt *gt)
intel_wakeref_might_get(&gt->wakeref);
}
static inline void intel_gt_pm_put(struct intel_gt *gt)
static inline void intel_gt_pm_put_untracked(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
}
static inline void intel_gt_pm_put_async(struct intel_gt *gt)
static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle)
{
intel_wakeref_untrack(&gt->wakeref, handle);
intel_gt_pm_put_untracked(gt);
}
static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt)
{
intel_wakeref_put_async(&gt->wakeref);
}
@ -51,9 +66,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
intel_wakeref_might_put(&gt->wakeref);
}
#define with_intel_gt_pm(gt, tmp) \
for (tmp = 1, intel_gt_pm_get(gt); tmp; \
intel_gt_pm_put(gt), tmp = 0)
static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle)
{
intel_wakeref_untrack(&gt->wakeref, handle);
intel_gt_pm_put_async_untracked(gt);
}
#define with_intel_gt_pm(gt, wf) \
for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
@ -64,7 +84,7 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{

View File

@ -27,7 +27,7 @@
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
intel_gt_pm_get(gt);
intel_gt_pm_get_untracked(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
}
@ -36,7 +36,7 @@ void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
intel_gt_pm_put(gt);
intel_gt_pm_put_untracked(gt);
atomic_dec(&gt->user_wakeref);
}

View File

@ -537,6 +537,9 @@
#define XEHP_SQCM MCR_REG(0x8724)
#define EN_32B_ACCESS REG_BIT(30)
#define MTL_GSCPSMI_BASEADDR_LSB _MMIO(0x880c)
#define MTL_GSCPSMI_BASEADDR_MSB _MMIO(0x8810)
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)

View File

@ -63,6 +63,9 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
if (vm->fpriv)
i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@ -84,6 +87,9 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
if (vm->fpriv)
i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@ -95,6 +101,16 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
/*
* FIXME: It is suspected that some Address Translation Service (ATS)
* issue on IOMMU is causing CAT errors to occur on some MTL workloads.
* Applying a write barrier to the ppgtt set entry functions appeared
* to have no effect, so we must temporarily use I915_MAP_WC here on
* MTL until a proper ATS solution is found.
*/
if (IS_METEORLAKE(vm->i915))
type = I915_MAP_WC;
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@ -109,6 +125,16 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
/*
* FIXME: It is suspected that some Address Translation Service (ATS)
* issue on IOMMU is causing CAT errors to occur on some MTL workloads.
* Applying a write barrier to the ppgtt set entry functions appeared
* to have no effect, so we must temporarily use I915_MAP_WC here on
* MTL until a proper ATS solution is found.
*/
if (IS_METEORLAKE(vm->i915))
type = I915_MAP_WC;
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);

View File

@ -249,8 +249,13 @@ struct i915_address_space {
struct work_struct release_work;
struct drm_mm mm;
struct {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
} rsvd;
struct intel_gt *gt;
struct drm_i915_private *i915;
struct drm_i915_file_private *fpriv;
struct device *dma;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */

View File

@ -828,6 +828,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
return 0;
}
static void
lrc_setup_bb_per_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr)
{
GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
ctx_bb_ggtt_addr |
PER_CTX_BB_FORCE |
PER_CTX_BB_VALID;
}
static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce)
return PAGE_SIZE * ce->wa_bb_page;
}
static u32 *context_indirect_bb(const struct intel_context *ce)
/*
* per_ctx below determines which WABB section is used.
* When true, the function returns the location of the
* PER_CTX_BB. When false, the function returns the
* location of the INDIRECT_CTX.
*/
static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
{
void *ptr;
@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce)
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
ptr += per_ctx ? PAGE_SIZE : 0;
return ptr;
}
@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
context_size += PAGE_SIZE;
/* INDIRECT_CTX and PER_CTX_BB need separate pages. */
context_size += PAGE_SIZE * 2;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return gen12_emit_aux_table_inv(ce->engine, cs);
}
static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
{
struct intel_gt *gt = ce->engine->gt;
int mocs = gt->mocs.uc_index << 1;
/**
* Wa_16018031267 / Wa_16018063123 requires that SW forces the
* main copy engine arbitration into round robin mode. We
* additionally need to submit the following WABB blt command
* to produce 4 subblits with each subblit generating 0 byte
* write requests as WABB:
*
* XY_FASTCOLOR_BLT
* BG0 -> 5100000E
* BG1 -> 0000003F (Dest pitch)
* BG2 -> 00000000 (X1, Y1) = (0, 0)
* BG3 -> 00040001 (X2, Y2) = (1, 4)
* BG4 -> scratch
* BG5 -> scratch
* BG6-12 -> 00000000
* BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 )
* BG14 -> 00000010 (Qpitch = 4)
* BG15 -> 00000000
*/
*cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f;
*cs++ = 0;
*cs++ = 4 << 16 | 1;
*cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
*cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0x20004004;
*cs++ = 0x10;
*cs++ = 0;
return cs;
}
static u32 *
xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
{
/* Wa_16018031267, Wa_16018063123 */
if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
return cs;
}
static void
setup_per_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
/* Place PER_CTX_BB on next page after INDIRECT_CTX */
u32 * const start = context_wabb(ce, true);
u32 *cs;
cs = emit(ce, start);
/* PER_CTX_BB must manually terminate */
*cs++ = MI_BATCH_BUFFER_END;
GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
lrc_indirect_bb(ce) + PAGE_SIZE);
}
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
u32 * const start = context_indirect_bb(ce);
u32 * const start = context_wabb(ce, false);
u32 *cs;
cs = emit(ce, start);
@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce,
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;

View File

@ -1293,7 +1293,7 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
if (msg)
drm_notice(&engine->i915->drm,
"Resetting %s for %s\n", engine->name, msg);
atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
ret = intel_gt_reset_engine(engine);
if (ret) {

View File

@ -849,13 +849,12 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
if (sseu->max_slices == 0) {
if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
sseu_print_xehp_topology(sseu, p);
} else {
else
sseu_print_hsw_topology(sseu, p);
}
}
void intel_sseu_print_ss_info(const char *type,

View File

@ -1662,9 +1662,23 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
debug_dump_steering(gt);
}
static void
wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
int id;
for_each_engine(engine, gt, id)
if (engine->class == VIDEO_DECODE_CLASS)
wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
MFXPIPE_CLKGATE_DIS);
}
static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
wa_16021867713(gt, wal);
/*
* Wa_14018778641
* Wa_18018781329
@ -1674,6 +1688,9 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
debug_dump_steering(gt);
}
@ -2340,14 +2357,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, true);
}
if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
/* Wa_22014600077:dg2 */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
_MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
0 /* Wa_14012342262 write-only reg, so skip verification */,
true);
}
if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
@ -2782,6 +2791,11 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
/* Wa_16018031267, Wa_16018063123 */
if (NEEDS_FASTCOLOR_BLT_WABB(engine))
wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
XEHP_BLITTER_SCHEDULING_MODE_MASK,
XEHP_BLITTER_ROUND_ROBIN_MODE);
}
static void
@ -2915,6 +2929,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
/* Wa_18028616096 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_DG2_G11(i915)) {
@ -2943,11 +2960,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
true);
}
if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) {
/* Wa_18028616096 */
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,

View File

@ -21,20 +21,22 @@ static int cmp_u32(const void *A, const void *B)
return *a - *b;
}
static void perf_begin(struct intel_gt *gt)
static intel_wakeref_t perf_begin(struct intel_gt *gt)
{
intel_gt_pm_get(gt);
intel_wakeref_t wakeref = intel_gt_pm_get(gt);
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters);
queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work);
return wakeref;
}
static int perf_end(struct intel_gt *gt)
static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref)
{
atomic_dec(&gt->rps.num_waiters);
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
return igt_flush_test(gt->i915);
}
@ -133,12 +135,13 @@ static int perf_mi_bb_start(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
perf_begin(gt);
wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *batch;
@ -207,7 +210,7 @@ static int perf_mi_bb_start(void *arg)
pr_info("%s: MI_BB_START cycles: %u\n",
engine->name, trifilter(cycles));
}
if (perf_end(gt))
if (perf_end(gt, wakeref))
err = -EIO;
return err;
@ -260,12 +263,13 @@ static int perf_mi_noop(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
perf_begin(gt);
wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *base, *nop;
@ -364,7 +368,7 @@ static int perf_mi_noop(void *arg)
pr_info("%s: 16K MI_NOOP cycles: %u\n",
engine->name, trifilter(cycles));
}
if (perf_end(gt))
if (perf_end(gt, wakeref))
err = -EIO;
return err;

View File

@ -84,7 +84,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
i915_active_unlock_wait(&p->active);
wait_var_event_timeout(&p->active, i915_active_is_idle(&p->active), HZ);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,

View File

@ -81,6 +81,7 @@ static int live_gt_clocks(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
if (!gt->clock_frequency) { /* unknown */
@ -91,7 +92,7 @@ static int live_gt_clocks(void *arg)
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
@ -128,7 +129,7 @@ static int live_gt_clocks(void *arg)
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
return err;
}

View File

@ -1555,7 +1555,7 @@ static int live_lrc_isolation(void *arg)
return err;
}
static int indirect_ctx_submit_req(struct intel_context *ce)
static int wabb_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
@ -1579,7 +1579,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
emit_wabb_ctx_canary(const struct intel_context *ce,
u32 *cs, bool per_ctx)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
@ -1587,26 +1588,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
CTX_BB_CANARY_OFFSET;
CTX_BB_CANARY_OFFSET +
(per_ctx ? PAGE_SIZE : 0);
*cs++ = 0;
return cs;
}
static void
indirect_ctx_bb_setup(struct intel_context *ce)
static u32 *
emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
{
u32 *cs = context_indirect_bb(ce);
return emit_wabb_ctx_canary(ce, cs, false);
}
static u32 *
emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
{
return emit_wabb_ctx_canary(ce, cs, true);
}
static void
wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
{
u32 *cs = context_wabb(ce, per_ctx);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
if (per_ctx)
setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
else
setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
static bool check_ring_start(struct intel_context *ce)
static bool check_ring_start(struct intel_context *ce, bool per_ctx)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
LRC_STATE_OFFSET + context_wa_bb_offset(ce);
LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
(per_ctx ? PAGE_SIZE : 0);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
@ -1618,21 +1636,21 @@ static bool check_ring_start(struct intel_context *ce)
return false;
}
static int indirect_ctx_bb_check(struct intel_context *ce)
static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
{
int err;
err = indirect_ctx_submit_req(ce);
err = wabb_ctx_submit_req(ce);
if (err)
return err;
if (!check_ring_start(ce))
if (!check_ring_start(ce, per_ctx))
return -EINVAL;
return 0;
}
static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx)
{
struct intel_context *a, *b;
int err;
@ -1667,14 +1685,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
indirect_ctx_bb_setup(a);
indirect_ctx_bb_setup(b);
wabb_ctx_setup(a, per_ctx);
wabb_ctx_setup(b, per_ctx);
err = indirect_ctx_bb_check(a);
err = wabb_ctx_check(a, per_ctx);
if (err)
goto unpin_b;
err = indirect_ctx_bb_check(b);
err = wabb_ctx_check(b, per_ctx);
unpin_b:
intel_context_unpin(b);
@ -1688,7 +1706,7 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
return err;
}
static int live_lrc_indirect_ctx_bb(void *arg)
static int lrc_wabb_ctx(void *arg, bool per_ctx)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@ -1697,7 +1715,7 @@ static int live_lrc_indirect_ctx_bb(void *arg)
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
err = __live_lrc_indirect_ctx_bb(engine);
err = __lrc_wabb_ctx(engine, per_ctx);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
@ -1710,6 +1728,16 @@ static int live_lrc_indirect_ctx_bb(void *arg)
return err;
}
static int live_lrc_indirect_ctx_bb(void *arg)
{
return lrc_wabb_ctx(arg, false);
}
static int live_lrc_per_ctx_bb(void *arg)
{
return lrc_wabb_ctx(arg, true);
}
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
@ -1947,6 +1975,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
SUBTEST(live_lrc_per_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))

View File

@ -261,11 +261,12 @@ static int igt_atomic_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@ -296,7 +297,7 @@ static int igt_atomic_reset(void *arg)
unlock:
igt_global_reset_unlock(gt);
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
return err;
}
@ -307,6 +308,7 @@ static int igt_atomic_engine_reset(void *arg)
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
@ -317,7 +319,7 @@ static int igt_atomic_engine_reset(void *arg)
if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@ -365,7 +367,7 @@ static int igt_atomic_engine_reset(void *arg)
out_unlock:
igt_global_reset_unlock(gt);
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
return err;
}

View File

@ -224,6 +224,7 @@ int live_rps_clock_interval(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = 0;
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
@ -236,7 +237,7 @@ int live_rps_clock_interval(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
intel_rps_disable(&gt->rps);
intel_gt_check_clock_frequency(gt);
@ -355,7 +356,7 @@ int live_rps_clock_interval(void *arg)
}
intel_rps_enable(&gt->rps);
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@ -376,6 +377,7 @@ int live_rps_control(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
intel_wakeref_t wakeref;
int err = 0;
/*
@ -398,7 +400,7 @@ int live_rps_control(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t min_dt, max_dt;
@ -488,7 +490,7 @@ int live_rps_control(void *arg)
break;
}
}
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@ -1023,6 +1025,7 @@ int live_rps_interrupt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
intel_wakeref_t wakeref;
u32 pm_events;
int err = 0;
@ -1033,9 +1036,9 @@ int live_rps_interrupt(void *arg)
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
intel_gt_pm_get(gt);
pm_events = rps->pm_events;
intel_gt_pm_put(gt);
pm_events = 0;
with_intel_gt_pm(gt, wakeref)
pm_events = rps->pm_events;
if (!pm_events) {
pr_err("No RPS PM events registered, but RPS is enabled?\n");
return -ENODEV;

View File

@ -266,6 +266,7 @@ static int run_test(struct intel_gt *gt, int test_type)
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
struct igt_spinner spin;
u32 slpc_min_freq, slpc_max_freq;
int err = 0;
@ -311,7 +312,7 @@ static int run_test(struct intel_gt *gt, int test_type)
}
intel_gt_pm_wait_for_idle(gt);
intel_gt_pm_get(gt);
wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
u32 max_act_freq;
@ -397,7 +398,7 @@ static int run_test(struct intel_gt *gt, int test_type)
if (igt_flush_test(gt->i915))
err = -EIO;
intel_gt_pm_put(gt);
intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);

View File

@ -322,6 +322,7 @@ static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
gsc->proxy.component = data;
gsc->proxy.component->mei_dev = mei_kdev;
mutex_unlock(&gsc->proxy.mutex);
gt_dbg(gt, "GSC proxy mei component bound\n");
return 0;
}
@ -342,6 +343,7 @@ static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
gt_dbg(gt, "GSC proxy mei component unbound\n");
}
static const struct component_ops i915_gsc_proxy_component_ops = {

View File

@ -330,7 +330,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
static u32 guc_ctl_devid(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
}

View File

@ -297,6 +297,10 @@ struct intel_guc {
* @number_guc_id_stolen: The number of guc_ids that have been stolen
*/
int number_guc_id_stolen;
/**
* @fast_response_selftest: Backdoor to CT handler for fast response selftest
*/
u32 fast_response_selftest;
#endif
};

View File

@ -355,7 +355,7 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
const struct __guc_mmio_reg_descr_group *lists;
if (GRAPHICS_VER(i915) >= 12)

View File

@ -265,7 +265,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
u32 *cmds;
int err;
err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO);
if (err)
return err;
@ -1076,6 +1076,15 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
found = true;
break;
}
#ifdef CONFIG_DRM_I915_SELFTEST
if (!found && ct_to_guc(ct)->fast_response_selftest) {
CT_DEBUG(ct, "Assuming unsolicited response due to FAST_REQUEST selftest\n");
ct_to_guc(ct)->fast_response_selftest++;
found = true;
}
#endif
if (!found) {
CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
len, hxg[0], fence, ct->requests.last_fence);

View File

@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@ -573,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@ -589,7 +589,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
/* A negative value means "use platform/config default" */
if (i915->params.guc_log_level < 0) {
@ -664,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
int ret = 0;
@ -796,7 +796,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
if (!log->relay.started)
return;

View File

@ -14,7 +14,7 @@ static bool __guc_rc_supported(struct intel_guc *guc)
{
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)

View File

@ -34,7 +34,7 @@ static bool __detect_slpc_supported(struct intel_guc *guc)
{
/* GuC SLPC is unavailable for pre-Gen12 */
return guc->submission_supported &&
GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_slpc_selected(struct intel_guc *guc)

View File

@ -1107,7 +1107,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
intel_gt_pm_put_async(guc_to_gt(guc));
intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@ -1303,6 +1303,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
unsigned long flags;
u32 reset_count;
bool in_reset;
intel_wakeref_t wakeref;
spin_lock_irqsave(&guc->timestamp.lock, flags);
@ -1325,7 +1326,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
/*
@ -1334,7 +1336,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
intel_gt_pm_put_async(gt, wakeref);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
@ -3385,9 +3387,9 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.destroyed_worker);
struct intel_gt *gt = guc_to_gt(guc);
int tmp;
intel_wakeref_t wakeref;
with_intel_gt_pm(gt, tmp)
with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
@ -4624,12 +4626,12 @@ static bool __guc_submission_supported(struct intel_guc *guc)
{
/* GuC submission is unavailable for pre-Gen11 */
return intel_guc_is_supported(guc) &&
GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
GRAPHICS_VER(guc_to_i915(guc)) >= 11;
}
static bool __guc_submission_selected(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct drm_i915_private *i915 = guc_to_i915(guc);
if (!intel_guc_submission_is_supported(guc))
return false;
@ -4894,7 +4896,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
intel_gt_pm_put_async(guc_to_gt(guc));
intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@ -5001,7 +5003,8 @@ static void capture_error_state(struct intel_guc *guc,
if (match) {
intel_engine_set_hung_context(e, ce);
engine_mask |= e->mask;
atomic_inc(&i915->gpu_error.reset_engine_count[e->uabi_class]);
i915_increase_reset_engine_count(&i915->gpu_error,
e);
}
}
@ -5013,7 +5016,7 @@ static void capture_error_state(struct intel_guc *guc,
} else {
intel_engine_set_hung_context(ce->engine, ce);
engine_mask = ce->engine->mask;
atomic_inc(&i915->gpu_error.reset_engine_count[ce->engine->uabi_class]);
i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
}
with_intel_runtime_pm(&i915->runtime_pm, wakeref)

View File

@ -106,11 +106,6 @@ static void __confirm_options(struct intel_uc *uc)
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
!intel_uc_supports_huc(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "HuC is not supported!");
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",

View File

@ -286,11 +286,126 @@ static int intel_guc_steal_guc_ids(void *arg)
return ret;
}
/*
* Send a context schedule H2G message with an invalid context id.
* This should generate a GUC_RESULT_INVALID_CONTEXT response.
*/
static int bad_h2g(struct intel_guc *guc)
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT,
0x12345678,
};
return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
}
/*
* Set a spinner running to make sure the system is alive and active,
* then send a bad but asynchronous H2G command and wait to see if an
* error response is returned. If no response is received or if the
* spinner dies then the test will fail.
*/
#define FAST_RESPONSE_TIMEOUT_MS 1000
static int intel_guc_fast_request(void *arg)
{
struct intel_gt *gt = arg;
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
intel_wakeref_t wakeref;
struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
bool spinning = false;
int ret = 0;
if (!engine)
return 0;
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
ce = intel_context_create(engine);
if (IS_ERR(ce)) {
ret = PTR_ERR(ce);
gt_err(gt, "Failed to create spinner request: %pe\n", ce);
goto err_pm;
}
ret = igt_spinner_init(&spin, engine->gt);
if (ret) {
gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
goto err_pm;
}
spinning = true;
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
gt_err(gt, "Failed to create spinner request: %pe\n", rq);
goto err_spin;
}
ret = request_add_spin(rq, &spin);
if (ret) {
gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
goto err_rq;
}
gt->uc.guc.fast_response_selftest = 1;
ret = bad_h2g(&gt->uc.guc);
if (ret) {
gt_err(gt, "Failed to send H2G: %pe\n", ERR_PTR(ret));
goto err_rq;
}
ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq),
FAST_RESPONSE_TIMEOUT_MS);
if (ret) {
gt_err(gt, "Request wait failed: %pe\n", ERR_PTR(ret));
goto err_rq;
}
if (i915_request_completed(rq)) {
gt_err(gt, "Spinner died waiting for fast request error!\n");
ret = -EIO;
goto err_rq;
}
if (gt->uc.guc.fast_response_selftest != 2) {
gt_err(gt, "Unexpected fast response count: %d\n",
gt->uc.guc.fast_response_selftest);
goto err_rq;
}
igt_spinner_end(&spin);
spinning = false;
ret = intel_selftest_wait_for_rq(rq);
if (ret) {
gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
goto err_rq;
}
err_rq:
i915_request_put(rq);
err_spin:
if (spinning)
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
err_pm:
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
return ret;
}
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
SUBTEST(intel_guc_steal_guc_ids),
SUBTEST(intel_guc_fast_request),
};
struct intel_gt *gt = to_gt(i915);

View File

@ -74,7 +74,7 @@ static int intel_hang_guc(void *arg)
goto err;
}
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);

View File

@ -51,6 +51,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_driver.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
@ -299,107 +300,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
return 0;
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
struct i915_gpu_coredump *error;
ssize_t ret;
void *buf;
error = file->private_data;
if (!error)
return 0;
/* Bounce buffer required because of kernfs __user API convenience. */
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
if (ret <= 0)
goto out;
if (!copy_to_user(ubuf, buf, ret))
*pos += ret;
else
ret = -EFAULT;
out:
kfree(buf);
return ret;
}
static int gpu_state_release(struct inode *inode, struct file *file)
{
i915_gpu_coredump_put(file->private_data);
return 0;
}
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_coredump *gpu;
intel_wakeref_t wakeref;
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
file->private_data = gpu;
return 0;
}
static const struct file_operations i915_gpu_info_fops = {
.owner = THIS_MODULE,
.open = i915_gpu_info_open,
.read = gpu_state_read,
.llseek = default_llseek,
.release = gpu_state_release,
};
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct i915_gpu_coredump *error = filp->private_data;
if (!error)
return 0;
drm_dbg(&error->i915->drm, "Resetting error state\n");
i915_reset_error_state(error->i915);
return cnt;
}
static int i915_error_state_open(struct inode *inode, struct file *file)
{
struct i915_gpu_coredump *error;
error = i915_first_error_state(inode->i_private);
if (IS_ERR(error))
return PTR_ERR(error);
file->private_data = error;
return 0;
}
static const struct file_operations i915_error_state_fops = {
.owner = THIS_MODULE,
.open = i915_error_state_open,
.read = gpu_state_read,
.write = i915_error_state_write,
.llseek = default_llseek,
.release = gpu_state_release,
};
#endif
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@ -839,10 +739,6 @@ static const struct i915_debugfs_files {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
};
void i915_debugfs_register(struct drm_i915_private *dev_priv)
@ -865,4 +761,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
i915_gpu_error_debugfs_register(dev_priv);
}

View File

@ -798,7 +798,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
intel_pxp_init(i915);
ret = intel_pxp_init(i915);
if (ret != -ENODEV)
drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
if (ret)
@ -1033,7 +1035,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_driver_release(&i915->runtime_pm);
intel_runtime_pm_driver_last_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)

View File

@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
kref_init(&client->kref);
spin_lock_init(&client->ctx_lock);
INIT_LIST_HEAD(&client->ctx_list);
#ifdef CONFIG_PROC_FS
spin_lock_init(&client->objects_lock);
INIT_LIST_HEAD(&client->objects_list);
#endif
return client;
}
@ -41,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
}
#ifdef CONFIG_PROC_FS
static void
obj_meminfo(struct drm_i915_gem_object *obj,
struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
{
const enum intel_region_id id = obj->mm.region ?
obj->mm.region->id : INTEL_REGION_SMEM;
const u64 sz = obj->base.size;
if (obj->base.handle_count > 1)
stats[id].shared += sz;
else
stats[id].private += sz;
if (i915_gem_object_has_pages(obj)) {
stats[id].resident += sz;
if (!dma_resv_test_signaled(obj->base.resv,
DMA_RESV_USAGE_BOOKKEEP))
stats[id].active += sz;
else if (i915_gem_object_is_shrinkable(obj) &&
obj->mm.madv == I915_MADV_DONTNEED)
stats[id].purgeable += sz;
}
}
static void show_meminfo(struct drm_printer *p, struct drm_file *file)
{
struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
struct drm_i915_file_private *fpriv = file->driver_priv;
struct i915_drm_client *client = fpriv->client;
struct drm_i915_private *i915 = fpriv->i915;
struct drm_i915_gem_object *obj;
struct intel_memory_region *mr;
struct list_head __rcu *pos;
unsigned int id;
/* Public objects. */
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, obj, id)
obj_meminfo(obj, stats);
spin_unlock(&file->table_lock);
/* Internal objects. */
rcu_read_lock();
list_for_each_rcu(pos, &client->objects_list) {
obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
client_link));
if (!obj)
continue;
obj_meminfo(obj, stats);
i915_gem_object_put(obj);
}
rcu_read_unlock();
for_each_memory_region(mr, i915, id)
drm_print_memory_stats(p,
&stats[id],
DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE,
mr->uabi_name);
}
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@ -102,10 +168,52 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
* ******************************************************************
*/
show_meminfo(p, file);
if (GRAPHICS_VER(i915) < 8)
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
show_client_class(p, i915, file_priv->client, i);
}
void i915_drm_client_add_object(struct i915_drm_client *client,
struct drm_i915_gem_object *obj)
{
unsigned long flags;
GEM_WARN_ON(obj->client);
GEM_WARN_ON(!list_empty(&obj->client_link));
spin_lock_irqsave(&client->objects_lock, flags);
obj->client = i915_drm_client_get(client);
list_add_tail_rcu(&obj->client_link, &client->objects_list);
spin_unlock_irqrestore(&client->objects_lock, flags);
}
void i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
{
struct i915_drm_client *client = fetch_and_zero(&obj->client);
unsigned long flags;
/* Object may not be associated with a client. */
if (!client)
return;
spin_lock_irqsave(&client->objects_lock, flags);
list_del_rcu(&obj->client_link);
spin_unlock_irqrestore(&client->objects_lock, flags);
i915_drm_client_put(client);
}
void i915_drm_client_add_context_objects(struct i915_drm_client *client,
struct intel_context *ce)
{
if (ce->state)
i915_drm_client_add_object(client, ce->state->obj);
if (ce->ring != ce->engine->legacy.ring && ce->ring->vma)
i915_drm_client_add_object(client, ce->ring->vma->obj);
}
#endif

View File

@ -12,6 +12,10 @@
#include <uapi/drm/i915_drm.h>
#include "i915_file_private.h"
#include "gem/i915_gem_object_types.h"
#include "gt/intel_context_types.h"
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_file;
@ -25,6 +29,20 @@ struct i915_drm_client {
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
#ifdef CONFIG_PROC_FS
/**
* @objects_lock: lock protecting @objects_list
*/
spinlock_t objects_lock;
/**
* @objects_list: list of objects created by this client
*
* Protected by @objects_lock.
*/
struct list_head objects_list;
#endif
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
@ -49,4 +67,28 @@ struct i915_drm_client *i915_drm_client_alloc(void);
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
#ifdef CONFIG_PROC_FS
void i915_drm_client_add_object(struct i915_drm_client *client,
struct drm_i915_gem_object *obj);
void i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
void i915_drm_client_add_context_objects(struct i915_drm_client *client,
struct intel_context *ce);
#else
static inline void i915_drm_client_add_object(struct i915_drm_client *client,
struct drm_i915_gem_object *obj)
{
}
static inline void
i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
{
}
static inline void
i915_drm_client_add_context_objects(struct i915_drm_client *client,
struct intel_context *ce)
{
}
#endif
#endif /* !__I915_DRM_CLIENT_H__ */

View File

@ -57,6 +57,7 @@
#include "i915_memcpy.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
#include "i915_sysfs.h"
#include "i915_utils.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@ -520,7 +521,7 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
struct i915_vma_coredump *
static struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
@ -609,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
const struct intel_engine_cs *engine,
const struct i915_vma_coredump *vma)
static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
const struct intel_engine_cs *engine,
const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@ -2140,7 +2141,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
return error;
}
struct i915_gpu_coredump *
static struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
@ -2211,7 +2212,7 @@ void i915_capture_error_state(struct intel_gt *gt,
i915_gpu_coredump_put(error);
}
struct i915_gpu_coredump *
static struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
struct i915_gpu_coredump *error;
@ -2378,3 +2379,184 @@ void intel_klog_error_capture(struct intel_gt *gt,
drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
}
#endif
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
struct i915_gpu_coredump *error;
ssize_t ret;
void *buf;
error = file->private_data;
if (!error)
return 0;
/* Bounce buffer required because of kernfs __user API convenience. */
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
if (ret <= 0)
goto out;
if (!copy_to_user(ubuf, buf, ret))
*pos += ret;
else
ret = -EFAULT;
out:
kfree(buf);
return ret;
}
static int gpu_state_release(struct inode *inode, struct file *file)
{
i915_gpu_coredump_put(file->private_data);
return 0;
}
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_coredump *gpu;
intel_wakeref_t wakeref;
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
file->private_data = gpu;
return 0;
}
static const struct file_operations i915_gpu_info_fops = {
.owner = THIS_MODULE,
.open = i915_gpu_info_open,
.read = gpu_state_read,
.llseek = default_llseek,
.release = gpu_state_release,
};
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct i915_gpu_coredump *error = filp->private_data;
if (!error)
return 0;
drm_dbg(&error->i915->drm, "Resetting error state\n");
i915_reset_error_state(error->i915);
return cnt;
}
static int i915_error_state_open(struct inode *inode, struct file *file)
{
struct i915_gpu_coredump *error;
error = i915_first_error_state(inode->i_private);
if (IS_ERR(error))
return PTR_ERR(error);
file->private_data = error;
return 0;
}
static const struct file_operations i915_error_state_fops = {
.owner = THIS_MODULE,
.open = i915_error_state_open,
.read = gpu_state_read,
.write = i915_error_state_write,
.llseek = default_llseek,
.release = gpu_state_release,
};
void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
&i915_error_state_fops);
debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
&i915_gpu_info_fops);
}
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
ssize_t ret = 0;
/*
* FIXME: Concurrent clients triggering resets and reading + clearing
* dumps can cause inconsistent sysfs reads when a user calls in with a
* non-zero offset to complete a prior partial read but the
* gpu_coredump has been cleared or replaced.
*/
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
i915_gpu_coredump_put(gpu);
} else {
const char *str = "No error state collected\n";
size_t len = strlen(str);
if (off < len) {
ret = min_t(size_t, count, len - off);
memcpy(buf, str + off, ret);
}
}
return ret;
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
drm_dbg(&dev_priv->drm, "Resetting error state\n");
i915_reset_error_state(dev_priv);
return count;
}
static const struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
.read = error_state_read,
.write = error_state_write,
};
void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
{
struct device *kdev = i915->drm.primary->kdev;
if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
drm_err(&i915->drm, "error_state sysfs setup failed\n");
}
void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
{
struct device *kdev = i915->drm.primary->kdev;
sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
}

View File

@ -17,6 +17,7 @@
#include "display/intel_display_device.h"
#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
#include "gt/uc/intel_uc_fw.h"
@ -234,7 +235,7 @@ struct i915_gpu_error {
atomic_t reset_count;
/** Number of times an engine has been reset */
atomic_t reset_engine_count[I915_NUM_ENGINES];
atomic_t reset_engine_count[MAX_ENGINE_CLASS];
};
struct drm_i915_error_state_buf {
@ -257,7 +258,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
const struct intel_engine_cs *engine)
{
return atomic_read(&error->reset_engine_count[engine->uabi_class]);
return atomic_read(&error->reset_engine_count[engine->class]);
}
static inline void
i915_increase_reset_engine_count(struct i915_gpu_error *error,
const struct intel_engine_cs *engine)
{
atomic_inc(&error->reset_engine_count[engine->class]);
}
#define CORE_DUMP_FLAG_NONE 0x0
@ -277,14 +285,7 @@ static inline void intel_klog_error_capture(struct intel_gt *gt,
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
const struct intel_engine_cs *engine,
const struct i915_vma_coredump *vma);
struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
intel_engine_mask_t engine_mask, u32 dump_flags);
void i915_capture_error_state(struct intel_gt *gt,
intel_engine_mask_t engine_mask, u32 dump_flags);
@ -332,10 +333,13 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
void i915_gpu_error_debugfs_register(struct drm_i915_private *i915);
void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915);
void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915);
#else
__printf(2, 3)
@ -403,12 +407,6 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
static inline struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
return ERR_PTR(-ENODEV);
}
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@ -418,6 +416,18 @@ static inline void i915_disable_error_state(struct drm_i915_private *i915,
{
}
static inline void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
{
}
static inline void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
{
}
static inline void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
{
}
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */

View File

@ -31,6 +31,16 @@
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
static struct i915_pmu *event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct i915_pmu, base);
}
static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
{
return container_of(pmu, struct drm_i915_private, pmu);
}
static u8 engine_config_sample(u64 config)
{
return config & I915_PMU_SAMPLE_MASK;
@ -141,7 +151,7 @@ static u32 frequency_enabled_mask(void)
static bool pmu_needs_timer(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
u32 enable;
/*
@ -213,19 +223,19 @@ static u64 get_rc6(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
intel_wakeref_t wakeref;
unsigned long flags;
bool awake = false;
u64 val;
if (intel_gt_pm_get_if_awake(gt)) {
wakeref = intel_gt_pm_get_if_awake(gt);
if (wakeref) {
val = __get_rc6(gt);
intel_gt_pm_put_async(gt);
awake = true;
intel_gt_pm_put_async(gt, wakeref);
}
spin_lock_irqsave(&pmu->lock, flags);
if (awake) {
if (wakeref) {
store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
@ -251,7 +261,7 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_gt *gt;
unsigned int i;
@ -429,12 +439,14 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
intel_wakeref_t wakeref;
if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
if (!intel_gt_pm_get_if_awake(gt))
wakeref = intel_gt_pm_get_if_awake(gt);
if (!wakeref)
return;
if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
@ -463,14 +475,13 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
period_ns / 1000);
}
intel_gt_pm_put_async(gt);
intel_gt_pm_put_async(gt, wakeref);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
struct drm_i915_private *i915 =
container_of(hrtimer, struct drm_i915_private, pmu.timer);
struct i915_pmu *pmu = &i915->pmu;
struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
unsigned int period_ns;
struct intel_gt *gt;
unsigned int i;
@ -505,8 +516,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
static void i915_pmu_event_destroy(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = event_to_pmu(event);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
drm_WARN_ON(&i915->drm, event->parent);
@ -572,8 +583,8 @@ config_status(struct drm_i915_private *i915, u64 config)
static int engine_event_init(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = event_to_pmu(event);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
@ -586,9 +597,8 @@ static int engine_event_init(struct perf_event *event)
static int i915_pmu_event_init(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
struct i915_pmu *pmu = event_to_pmu(event);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
if (pmu->closed)
@ -628,9 +638,8 @@ static int i915_pmu_event_init(struct perf_event *event)
static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
struct i915_pmu *pmu = event_to_pmu(event);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
u64 val = 0;
if (is_engine_event(event)) {
@ -686,10 +695,8 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = event_to_pmu(event);
struct hw_perf_event *hwc = &event->hw;
struct i915_pmu *pmu = &i915->pmu;
u64 prev, new;
if (pmu->closed) {
@ -707,10 +714,9 @@ static void i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = event_to_pmu(event);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@ -771,10 +777,9 @@ static void i915_pmu_enable(struct perf_event *event)
static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = event_to_pmu(event);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@ -818,9 +823,7 @@ static void i915_pmu_disable(struct perf_event *event)
static void i915_pmu_event_start(struct perf_event *event, int flags)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return;
@ -848,9 +851,7 @@ static void i915_pmu_event_stop(struct perf_event *event, int flags)
static int i915_pmu_event_add(struct perf_event *event, int flags)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
struct i915_pmu *pmu = &i915->pmu;
struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return -ENODEV;
@ -982,7 +983,7 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
static struct attribute **
create_event_attributes(struct i915_pmu *pmu)
{
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
struct drm_i915_private *i915 = pmu_to_i915(pmu);
static const struct {
unsigned int counter;
const char *name;

View File

@ -155,81 +155,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_coredump *gpu;
ssize_t ret = 0;
/*
* FIXME: Concurrent clients triggering resets and reading + clearing
* dumps can cause inconsistent sysfs reads when a user calls in with a
* non-zero offset to complete a prior partial read but the
* gpu_coredump has been cleared or replaced.
*/
gpu = i915_first_error_state(i915);
if (IS_ERR(gpu)) {
ret = PTR_ERR(gpu);
} else if (gpu) {
ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
i915_gpu_coredump_put(gpu);
} else {
const char *str = "No error state collected\n";
size_t len = strlen(str);
if (off < len) {
ret = min_t(size_t, count, len - off);
memcpy(buf, str + off, ret);
}
}
return ret;
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
drm_dbg(&dev_priv->drm, "Resetting error state\n");
i915_reset_error_state(dev_priv);
return count;
}
static const struct bin_attribute error_state_attr = {
.attr.name = "error",
.attr.mode = S_IRUSR | S_IWUSR,
.size = 0,
.read = error_state_read,
.write = error_state_write,
};
static void i915_setup_error_capture(struct device *kdev)
{
if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
drm_err(&kdev_minor_to_i915(kdev)->drm,
"error_state sysfs setup failed\n");
}
static void i915_teardown_error_capture(struct device *kdev)
{
sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
}
#else
static void i915_setup_error_capture(struct device *kdev) {}
static void i915_teardown_error_capture(struct device *kdev) {}
#endif
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@ -255,7 +180,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_warn(&dev_priv->drm,
"failed to register GT sysfs directory\n");
i915_setup_error_capture(kdev);
i915_gpu_error_sysfs_setup(dev_priv);
intel_engines_add_sysfs(dev_priv);
}
@ -264,7 +189,7 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
i915_teardown_error_capture(kdev);
i915_gpu_error_sysfs_teardown(dev_priv);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);

View File

@ -216,6 +216,22 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
static const char *region_type_str(u16 type)
{
switch (type) {
case INTEL_MEMORY_SYSTEM:
return "system";
case INTEL_MEMORY_LOCAL:
return "local";
case INTEL_MEMORY_STOLEN_LOCAL:
return "stolen-local";
case INTEL_MEMORY_STOLEN_SYSTEM:
return "stolen-system";
default:
return "unknown";
}
}
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@ -244,6 +260,9 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->type = type;
mem->instance = instance;
snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
region_type_str(type), instance);
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);

View File

@ -80,6 +80,7 @@ struct intel_memory_region {
u16 instance;
enum intel_region_id id;
char name[16];
char uabi_name[16];
bool private; /* not for userspace */
struct {

View File

@ -57,182 +57,37 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
#include <linux/sort.h>
#define STACKDEPTH 8
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
unsigned int n;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
stack_depot_init();
ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
}
static noinline depot_stack_handle_t
static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
depot_stack_handle_t stack, *stacks;
unsigned long flags;
if (rpm->no_wakeref_tracking)
if (!rpm->available || rpm->no_wakeref_tracking)
return -1;
stack = __save_depot_stack();
if (!stack)
return -1;
spin_lock_irqsave(&rpm->debug.lock, flags);
if (!rpm->debug.count)
rpm->debug.last_acquire = stack;
stacks = krealloc(rpm->debug.owners,
(rpm->debug.count + 1) * sizeof(*stacks),
GFP_NOWAIT | __GFP_NOWARN);
if (stacks) {
stacks[rpm->debug.count++] = stack;
rpm->debug.owners = stacks;
} else {
stack = -1;
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
return stack;
return intel_ref_tracker_alloc(&rpm->debug);
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
depot_stack_handle_t stack)
intel_wakeref_t wakeref)
{
struct drm_i915_private *i915 = container_of(rpm,
struct drm_i915_private,
runtime_pm);
unsigned long flags, n;
bool found = false;
if (unlikely(stack == -1))
if (!rpm->available || rpm->no_wakeref_tracking)
return;
spin_lock_irqsave(&rpm->debug.lock, flags);
for (n = rpm->debug.count; n--; ) {
if (rpm->debug.owners[n] == stack) {
memmove(rpm->debug.owners + n,
rpm->debug.owners + n + 1,
(--rpm->debug.count - n) * sizeof(stack));
found = true;
break;
}
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
if (drm_WARN(&i915->drm, !found,
"Unmatched wakeref (tracking %lu), count %u\n",
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
stack = READ_ONCE(rpm->debug.last_release);
if (stack) {
stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
}
kfree(buf);
}
intel_ref_tracker_free(&rpm->debug, wakeref);
}
static int cmphandle(const void *_a, const void *_b)
static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
const depot_stack_handle_t * const a = _a, * const b = _b;
if (*a < *b)
return -1;
else if (*a > *b)
return 1;
else
return 0;
}
static void
__print_intel_runtime_pm_wakeref(struct drm_printer *p,
const struct intel_runtime_pm_debug *dbg)
{
unsigned long i;
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
if (dbg->last_acquire) {
stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last acquired:\n%s", buf);
}
if (dbg->last_release) {
stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last released:\n%s", buf);
}
drm_printf(p, "Wakeref count: %lu\n", dbg->count);
sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
for (i = 0; i < dbg->count; i++) {
depot_stack_handle_t stack = dbg->owners[i];
unsigned long rep;
rep = 1;
while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
rep++, i++;
stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
}
kfree(buf);
}
static noinline void
__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
struct intel_runtime_pm_debug *saved)
{
*saved = *debug;
debug->owners = NULL;
debug->count = 0;
debug->last_release = __save_depot_stack();
}
static void
dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
{
if (debug->count) {
struct drm_printer p = drm_debug_printer("i915");
__print_intel_runtime_pm_wakeref(&p, debug);
}
kfree(debug->owners);
ref_tracker_dir_exit(&rpm->debug);
}
static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
@ -240,60 +95,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
flags))
return;
__untrack_all_wakerefs(&rpm->debug, &dbg);
ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
dump_and_free_wakeref_tracking(&dbg);
}
static noinline void
untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
spin_lock_irqsave(&rpm->debug.lock, flags);
__untrack_all_wakerefs(&rpm->debug, &dbg);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
dump_and_free_wakeref_tracking(&dbg);
}
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
struct intel_runtime_pm_debug dbg = {};
do {
unsigned long alloc = dbg.count;
depot_stack_handle_t *s;
spin_lock_irq(&rpm->debug.lock);
dbg.count = rpm->debug.count;
if (dbg.count <= alloc) {
memcpy(dbg.owners,
rpm->debug.owners,
dbg.count * sizeof(*s));
}
dbg.last_acquire = rpm->debug.last_acquire;
dbg.last_release = rpm->debug.last_release;
spin_unlock_irq(&rpm->debug.lock);
if (dbg.count <= alloc)
break;
s = krealloc(dbg.owners,
dbg.count * sizeof(*s),
GFP_NOWAIT | __GFP_NOWARN);
if (!s)
goto out;
dbg.owners = s;
} while (1);
__print_intel_runtime_pm_wakeref(p, &dbg);
out:
kfree(dbg.owners);
intel_ref_tracker_show(&rpm->debug, p);
}
#else
@ -302,14 +111,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
static depot_stack_handle_t
static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
intel_wakeref_t wref)
intel_wakeref_t wakeref)
{
}
@ -636,7 +445,11 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
"i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count));
}
void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
{
intel_runtime_pm_driver_release(rpm);
untrack_all_intel_runtime_pm_wakerefs(rpm);
}

View File

@ -75,15 +75,7 @@ struct intel_runtime_pm {
* paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs.
*/
struct intel_runtime_pm_debug {
spinlock_t lock;
depot_stack_handle_t last_acquire;
depot_stack_handle_t last_release;
depot_stack_handle_t *owners;
unsigned long count;
} debug;
struct ref_tracker_dir debug;
#endif
};
@ -187,6 +179,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);

View File

@ -99,7 +99,8 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
struct intel_wakeref_lockclass *key)
struct intel_wakeref_lockclass *key,
const char *name)
{
wf->i915 = i915;
wf->ops = ops;
@ -111,6 +112,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
"wakeref.work", &key->work, 0);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
#endif
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
@ -191,3 +196,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
intel_wakeref_auto(wf, 0);
INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
void intel_ref_tracker_show(struct ref_tracker_dir *dir,
struct drm_printer *p)
{
const size_t buf_size = PAGE_SIZE;
char *buf, *sb, *se;
size_t count;
buf = kmalloc(buf_size, GFP_NOWAIT);
if (!buf)
return;
count = ref_tracker_dir_snprint(dir, buf, buf_size);
if (!count)
goto free;
/* printk does not like big buffers, so we split it */
for (sb = buf; *sb; sb = se + 1) {
se = strchrnul(sb, '\n');
drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
if (!*se)
break;
}
if (count >= buf_size)
drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
count + 1 - buf_size);
free:
kfree(buf);
}

View File

@ -7,16 +7,25 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
#include <drm/drm_print.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/ref_tracker.h>
#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
typedef unsigned long intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
#define INTEL_REFTRACK_PRINT_LIMIT 16
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else
@ -26,8 +35,6 @@
struct intel_runtime_pm;
struct intel_wakeref;
typedef depot_stack_handle_t intel_wakeref_t;
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
@ -43,6 +50,10 @@ struct intel_wakeref {
const struct intel_wakeref_ops *ops;
struct delayed_work work;
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
struct ref_tracker_dir debug;
#endif
};
struct intel_wakeref_lockclass {
@ -53,11 +64,12 @@ struct intel_wakeref_lockclass {
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
struct intel_wakeref_lockclass *key);
#define intel_wakeref_init(wf, i915, ops) do { \
struct intel_wakeref_lockclass *key,
const char *name);
#define intel_wakeref_init(wf, i915, ops, name) do { \
static struct intel_wakeref_lockclass __key; \
\
__intel_wakeref_init((wf), (i915), (ops), &__key); \
__intel_wakeref_init((wf), (i915), (ops), &__key, name); \
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
@ -261,6 +273,57 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
{
struct ref_tracker *user = NULL;
ref_tracker_alloc(dir, &user, GFP_NOWAIT);
return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
}
static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
intel_wakeref_t handle)
{
struct ref_tracker *user;
user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
ref_tracker_free(dir, &user);
}
void intel_ref_tracker_show(struct ref_tracker_dir *dir,
struct drm_printer *p);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
{
return intel_ref_tracker_alloc(&wf->debug);
}
static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
intel_wakeref_t handle)
{
intel_ref_tracker_free(&wf->debug, handle);
}
#else
static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
{
return -1;
}
static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
intel_wakeref_t handle)
{
}
#endif
struct intel_wakeref_auto {
struct drm_i915_private *i915;
struct timer_list timer;

View File

@ -199,6 +199,9 @@ int intel_pxp_init(struct drm_i915_private *i915)
struct intel_gt *gt;
bool is_full_feature = false;
if (intel_gt_is_wedged(to_gt(i915)))
return -ENOTCONN;
/*
* NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
* we still need it if PXP's backend tee transport is needed.
@ -303,6 +306,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
if (!pxp->arb_is_valid)
return 0;
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini");
/*
* To ensure synchronous and coherent session teardown completion
* in response to suspend or shutdown triggers, don't use a worker.
@ -324,6 +329,8 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
if (pxp->arb_is_valid)
return 0;
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart");
/*
* The arb-session is currently inactive and we are doing a reset and restart
* due to a runtime event. Use the worker that was designed for this.
@ -332,8 +339,11 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
timeout = intel_pxp_get_backend_timeout_ms(pxp);
if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
timeout);
return -ETIMEDOUT;
}
return 0;
}
@ -414,10 +424,12 @@ int intel_pxp_start(struct intel_pxp *pxp)
int ret = 0;
ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT);
if (ret < 0)
if (ret < 0) {
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret);
return ret;
else if (ret > 1)
} else if (ret > 1) {
return -EIO; /* per UAPI spec, user may retry later */
}
mutex_lock(&pxp->arb_mutex);

View File

@ -40,11 +40,12 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
/* immediately mark PXP as inactive on termination */
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED |
PXP_EVENT_TYPE_IRQ;
}
if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
pxp->session_events |= PXP_TERMINATION_COMPLETE;
pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
queue_work(system_unbound_wq, &pxp->session_work);

View File

@ -137,8 +137,10 @@ void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_res
static void pxp_terminate_complete(struct intel_pxp *pxp)
{
/* Re-create the arb session after teardown handle complete */
if (fetch_and_zero(&pxp->hw_state_invalidated))
if (fetch_and_zero(&pxp->hw_state_invalidated)) {
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: creating arb_session after invalidation");
pxp_create_arb_session(pxp);
}
complete_all(&pxp->termination);
}
@ -157,6 +159,8 @@ static void pxp_session_work(struct work_struct *work)
if (!events)
return;
drm_dbg(&gt->i915->drm, "PXP: processing event-flags 0x%08x", events);
if (events & PXP_INVAL_REQUIRED)
intel_pxp_invalidate(pxp);

View File

@ -124,6 +124,7 @@ struct intel_pxp {
#define PXP_TERMINATION_REQUEST BIT(0)
#define PXP_TERMINATION_COMPLETE BIT(1)
#define PXP_INVAL_REQUIRED BIT(2)
#define PXP_EVENT_TYPE_IRQ BIT(3)
};
#endif /* __INTEL_PXP_TYPES_H__ */

View File

@ -37,8 +37,9 @@ int igt_live_test_begin(struct igt_live_test *t,
}
for_each_engine(engine, gt, id)
t->reset_engine[id] =
i915_reset_engine_count(&i915->gpu_error, engine);
t->reset_engine[i][id] =
i915_reset_engine_count(&i915->gpu_error,
engine);
}
t->reset_global = i915_reset_count(&i915->gpu_error);
@ -66,14 +67,14 @@ int igt_live_test_end(struct igt_live_test *t)
for_each_gt(gt, i915, i) {
for_each_engine(engine, gt, id) {
if (t->reset_engine[id] ==
if (t->reset_engine[i][id] ==
i915_reset_engine_count(&i915->gpu_error, engine))
continue;
gt_err(gt, "%s(%s): engine '%s' was reset %d times!\n",
t->func, t->name, engine->name,
i915_reset_engine_count(&i915->gpu_error, engine) -
t->reset_engine[id]);
t->reset_engine[i][id]);
return -EIO;
}
}

View File

@ -7,6 +7,7 @@
#ifndef IGT_LIVE_TEST_H
#define IGT_LIVE_TEST_H
#include "gt/intel_gt_defines.h" /* for I915_MAX_GT */
#include "gt/intel_engine.h" /* for I915_NUM_ENGINES */
struct drm_i915_private;
@ -17,7 +18,7 @@ struct igt_live_test {
const char *name;
unsigned int reset_global;
unsigned int reset_engine[I915_NUM_ENGINES];
unsigned int reset_engine[I915_MAX_GT][I915_NUM_ENGINES];
};
/*

View File

@ -738,7 +738,8 @@
INTEL_DG2_G12_IDS(info)
#define INTEL_ATS_M150_IDS(info) \
INTEL_VGA_DEVICE(0x56C0, info)
INTEL_VGA_DEVICE(0x56C0, info), \
INTEL_VGA_DEVICE(0x56C2, info)
#define INTEL_ATS_M75_IDS(info) \
INTEL_VGA_DEVICE(0x56C1, info)