mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-18 02:46:06 +00:00
drm/xe: Use xe_pm_runtime_get in xe_bo_move() if reclaim-safe.
xe_bo_move() might be called in the TTM swapout path from validation by another TTM device. If so, we are not likely to have a RPM reference. So iff xe_pm_runtime_get() is safe to call from reclaim, use it instead of xe_pm_runtime_get_noresume(). Strictly this is currently needed only if handle_system_ccs is true, but use xe_pm_runtime_get() if possible anyway to increase test coverage. At the same time warn if handle_system_ccs is true and we can't call xe_pm_runtime_get() from reclaim context. This will likely trip if someone tries to enable SRIOV on LNL, without fixing Xe SRIOV runtime resume / suspend. Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240903094232.166342-1-thomas.hellstrom@linux.intel.com
This commit is contained in:
parent
8da19441d0
commit
34bb7b813a
@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
|
|||||||
|
|
||||||
xe_assert(xe, migrate);
|
xe_assert(xe, migrate);
|
||||||
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
|
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
|
||||||
xe_pm_runtime_get_noresume(xe);
|
if (xe_rpm_reclaim_safe(xe)) {
|
||||||
|
/*
|
||||||
|
* We might be called through swapout in the validation path of
|
||||||
|
* another TTM device, so unconditionally acquire rpm here.
|
||||||
|
*/
|
||||||
|
xe_pm_runtime_get(xe);
|
||||||
|
} else {
|
||||||
|
drm_WARN_ON(&xe->drm, handle_system_ccs);
|
||||||
|
xe_pm_runtime_get_noresume(xe);
|
||||||
|
}
|
||||||
|
|
||||||
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
|
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
|
||||||
/*
|
/*
|
||||||
|
@ -79,7 +79,14 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = {
|
|||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe)
|
/**
|
||||||
|
* xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
|
||||||
|
* @xe: The xe device.
|
||||||
|
*
|
||||||
|
* Return: true if it is safe to runtime resume from reclaim context.
|
||||||
|
* false otherwise.
|
||||||
|
*/
|
||||||
|
bool xe_rpm_reclaim_safe(const struct xe_device *xe)
|
||||||
{
|
{
|
||||||
return !xe->d3cold.capable && !xe->info.has_sriov;
|
return !xe->d3cold.capable && !xe->info.has_sriov;
|
||||||
}
|
}
|
||||||
|
@ -31,6 +31,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
|
|||||||
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
|
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
|
||||||
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
|
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
|
||||||
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
|
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
|
||||||
|
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
|
||||||
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
|
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
|
||||||
int xe_pm_module_init(void);
|
int xe_pm_module_init(void);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user