mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 01:54:00 +00:00
amd-drm-fixes-6.13-2025-01-09:
amdgpu: - Display interrupt fixes - Fix display max surface mismatches - Fix divide error in DM plane scale calcs - Display divide by 0 checks in dml helpers - SMU 13 AD/DC interrrupt handling fix - Fix locking around buddy trim handling amdkfd: - Fix page fault with shader debugger enabled - Fix eviction fence wq handling -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZ3/7hAAKCRC93/aFa7yZ 2B9TAP9dR33yUXRVLCfpk9SsuS9Vv33j6u21dfqI2nJm13yc+QEAt4yCdYcJc4PX SOVFKcetIHB3H/q+mGnD1Y6QNlIbeQE= =HjFg -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.13-2025-01-09' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes amd-drm-fixes-6.13-2025-01-09: amdgpu: - Display interrupt fixes - Fix display max surface mismatches - Fix divide error in DM plane scale calcs - Display divide by 0 checks in dml helpers - SMU 13 AD/DC interrrupt handling fix - Fix locking around buddy trim handling amdkfd: - Fix page fault with shader debugger enabled - Fix eviction fence wq handling Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250109164236.477295-1-alexander.deucher@amd.com
This commit is contained in:
commit
66d4709abc
@ -567,7 +567,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
else
|
||||
remaining_size -= size;
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
|
||||
struct drm_buddy_block *dcc_block;
|
||||
@ -584,6 +583,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
(u64)vres->base.size,
|
||||
&vres->blocks);
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
vres->base.start = 0;
|
||||
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
|
||||
|
@ -350,10 +350,27 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en)
|
||||
{
|
||||
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
|
||||
uint32_t flags = pdd->process->dbg_flags;
|
||||
struct amdgpu_device *adev = pdd->dev->adev;
|
||||
int r;
|
||||
|
||||
if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
|
||||
return 0;
|
||||
|
||||
if (!pdd->proc_ctx_cpu_ptr) {
|
||||
r = amdgpu_amdkfd_alloc_gtt_mem(adev,
|
||||
AMDGPU_MES_PROC_CTX_SIZE,
|
||||
&pdd->proc_ctx_bo,
|
||||
&pdd->proc_ctx_gpu_addr,
|
||||
&pdd->proc_ctx_cpu_ptr,
|
||||
false);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"failed to allocate process context bo\n");
|
||||
return r;
|
||||
}
|
||||
memset(pdd->proc_ctx_cpu_ptr, 0, AMDGPU_MES_PROC_CTX_SIZE);
|
||||
}
|
||||
|
||||
return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl,
|
||||
pdd->watch_points, flags, sq_trap_en);
|
||||
}
|
||||
|
@ -1160,7 +1160,8 @@ static void kfd_process_wq_release(struct work_struct *work)
|
||||
*/
|
||||
synchronize_rcu();
|
||||
ef = rcu_access_pointer(p->ef);
|
||||
dma_fence_signal(ef);
|
||||
if (ef)
|
||||
dma_fence_signal(ef);
|
||||
|
||||
kfd_process_remove_sysfs(p);
|
||||
|
||||
|
@ -8400,16 +8400,6 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
|
||||
struct amdgpu_crtc *acrtc,
|
||||
struct dm_crtc_state *acrtc_state)
|
||||
{
|
||||
/*
|
||||
* We have no guarantee that the frontend index maps to the same
|
||||
* backend index - some even map to more than one.
|
||||
*
|
||||
* TODO: Use a different interrupt or check DC itself for the mapping.
|
||||
*/
|
||||
int irq_type =
|
||||
amdgpu_display_crtc_idx_to_irq_type(
|
||||
adev,
|
||||
acrtc->crtc_id);
|
||||
struct drm_vblank_crtc_config config = {0};
|
||||
struct dc_crtc_timing *timing;
|
||||
int offdelay;
|
||||
@ -8435,28 +8425,7 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
|
||||
|
||||
drm_crtc_vblank_on_config(&acrtc->base,
|
||||
&config);
|
||||
|
||||
amdgpu_irq_get(
|
||||
adev,
|
||||
&adev->pageflip_irq,
|
||||
irq_type);
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
amdgpu_irq_get(
|
||||
adev,
|
||||
&adev->vline0_irq,
|
||||
irq_type);
|
||||
#endif
|
||||
} else {
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
amdgpu_irq_put(
|
||||
adev,
|
||||
&adev->vline0_irq,
|
||||
irq_type);
|
||||
#endif
|
||||
amdgpu_irq_put(
|
||||
adev,
|
||||
&adev->pageflip_irq,
|
||||
irq_type);
|
||||
drm_crtc_vblank_off(&acrtc->base);
|
||||
}
|
||||
}
|
||||
@ -11155,8 +11124,8 @@ dm_get_plane_scale(struct drm_plane_state *plane_state,
|
||||
int plane_src_w, plane_src_h;
|
||||
|
||||
dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
|
||||
*out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
|
||||
*out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
|
||||
*out_plane_scale_w = plane_src_w ? plane_state->crtc_w * 1000 / plane_src_w : 0;
|
||||
*out_plane_scale_h = plane_src_h ? plane_state->crtc_h * 1000 / plane_src_h : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4510,7 +4510,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
|
||||
struct pipe_split_policy_backup policy;
|
||||
struct dc_state *intermediate_context;
|
||||
struct dc_state *old_current_state = dc->current_state;
|
||||
struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
|
||||
struct dc_surface_update srf_updates[MAX_SURFACES] = {0};
|
||||
int surface_count;
|
||||
|
||||
/*
|
||||
|
@ -483,9 +483,9 @@ bool dc_state_add_plane(
|
||||
if (stream_status == NULL) {
|
||||
dm_error("Existing stream not found; failed to attach surface!\n");
|
||||
goto out;
|
||||
} else if (stream_status->plane_count == MAX_SURFACE_NUM) {
|
||||
} else if (stream_status->plane_count == MAX_SURFACES) {
|
||||
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
|
||||
plane_state, MAX_SURFACE_NUM);
|
||||
plane_state, MAX_SURFACES);
|
||||
goto out;
|
||||
} else if (!otg_master_pipe) {
|
||||
goto out;
|
||||
@ -600,7 +600,7 @@ bool dc_state_rem_all_planes_for_stream(
|
||||
{
|
||||
int i, old_plane_count;
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
|
||||
struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
|
||||
|
||||
for (i = 0; i < state->stream_count; i++)
|
||||
if (state->streams[i] == stream) {
|
||||
@ -875,7 +875,7 @@ bool dc_state_rem_all_phantom_planes_for_stream(
|
||||
{
|
||||
int i, old_plane_count;
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
|
||||
struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
|
||||
|
||||
for (i = 0; i < state->stream_count; i++)
|
||||
if (state->streams[i] == phantom_stream) {
|
||||
|
@ -57,7 +57,7 @@ struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.310"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_SURFACES 4
|
||||
#define MAX_PLANES 6
|
||||
#define MAX_STREAMS 6
|
||||
#define MIN_VIEWPORT_SIZE 12
|
||||
@ -1398,7 +1398,7 @@ struct dc_scratch_space {
|
||||
* store current value in plane states so we can still recover
|
||||
* a valid current state during dc update.
|
||||
*/
|
||||
struct dc_plane_state plane_states[MAX_SURFACE_NUM];
|
||||
struct dc_plane_state plane_states[MAX_SURFACES];
|
||||
|
||||
struct dc_stream_state stream_state;
|
||||
};
|
||||
|
@ -56,7 +56,7 @@ struct dc_stream_status {
|
||||
int plane_count;
|
||||
int audio_inst;
|
||||
struct timing_sync_info timing_sync_info;
|
||||
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
|
||||
struct dc_plane_state *plane_states[MAX_SURFACES];
|
||||
bool is_abm_supported;
|
||||
struct mall_stream_config mall_stream_config;
|
||||
bool fpo_in_use;
|
||||
|
@ -76,7 +76,6 @@ struct dc_perf_trace {
|
||||
unsigned long last_entry_write;
|
||||
};
|
||||
|
||||
#define MAX_SURFACE_NUM 6
|
||||
#define NUM_PIXEL_FORMATS 10
|
||||
|
||||
enum tiling_mode {
|
||||
|
@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e)
|
||||
|
||||
static inline double dml_ceil(double a, double granularity)
|
||||
{
|
||||
if (granularity == 0)
|
||||
return 0;
|
||||
return (double) dcn_bw_ceil2(a, granularity);
|
||||
}
|
||||
|
||||
static inline double dml_floor(double a, double granularity)
|
||||
{
|
||||
if (granularity == 0)
|
||||
return 0;
|
||||
return (double) dcn_bw_floor2(a, granularity);
|
||||
}
|
||||
|
||||
@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f)
|
||||
|
||||
static inline double dml_ceil_ex(double x, double granularity)
|
||||
{
|
||||
if (granularity == 0)
|
||||
return 0;
|
||||
return (double) dcn_bw_ceil2(x, granularity);
|
||||
}
|
||||
|
||||
static inline double dml_floor_ex(double x, double granularity)
|
||||
{
|
||||
if (granularity == 0)
|
||||
return 0;
|
||||
return (double) dcn_bw_floor2(x, granularity);
|
||||
}
|
||||
|
||||
|
@ -813,7 +813,7 @@ static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struc
|
||||
{
|
||||
int i, old_plane_count;
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
|
||||
struct dc_plane_state *del_planes[MAX_SURFACES] = { 0 };
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
if (context->streams[i] == stream) {
|
||||
|
@ -303,5 +303,7 @@ int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
|
||||
int smu_v13_0_get_boot_freq_by_index(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type,
|
||||
uint32_t *value);
|
||||
|
||||
void smu_v13_0_interrupt_work(struct smu_context *smu);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -1320,11 +1320,11 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
|
||||
void smu_v13_0_interrupt_work(struct smu_context *smu)
|
||||
{
|
||||
return smu_cmn_send_smc_msg(smu,
|
||||
SMU_MSG_ReenableAcDcInterrupt,
|
||||
NULL);
|
||||
smu_cmn_send_smc_msg(smu,
|
||||
SMU_MSG_ReenableAcDcInterrupt,
|
||||
NULL);
|
||||
}
|
||||
|
||||
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
|
||||
@ -1377,12 +1377,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
|
||||
switch (ctxid) {
|
||||
case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
|
||||
dev_dbg(adev->dev, "Switched to AC mode!\n");
|
||||
smu_v13_0_ack_ac_dc_interrupt(smu);
|
||||
schedule_work(&smu->interrupt_work);
|
||||
adev->pm.ac_power = true;
|
||||
break;
|
||||
case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
|
||||
dev_dbg(adev->dev, "Switched to DC mode!\n");
|
||||
smu_v13_0_ack_ac_dc_interrupt(smu);
|
||||
schedule_work(&smu->interrupt_work);
|
||||
adev->pm.ac_power = false;
|
||||
break;
|
||||
case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
|
||||
|
@ -3219,6 +3219,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
|
||||
.is_asic_wbrf_supported = smu_v13_0_0_wbrf_support_check,
|
||||
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
|
||||
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
|
||||
.interrupt_work = smu_v13_0_interrupt_work,
|
||||
};
|
||||
|
||||
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
|
||||
|
@ -2797,6 +2797,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
|
||||
.is_asic_wbrf_supported = smu_v13_0_7_wbrf_support_check,
|
||||
.enable_uclk_shadow = smu_v13_0_enable_uclk_shadow,
|
||||
.set_wbrf_exclusion_ranges = smu_v13_0_set_wbrf_exclusion_ranges,
|
||||
.interrupt_work = smu_v13_0_interrupt_work,
|
||||
};
|
||||
|
||||
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
|
||||
|
Loading…
x
Reference in New Issue
Block a user