mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-11 15:49:56 +00:00
Merge tag 'amd-drm-fixes-5.7-2020-05-13' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
amd-drm-fixes-5.7-2020-05-13: amdgpu: - Clockgating fixes - Fix fbdev with scatter/gather display - S4 fix for navi - Soft recovery for gfx10 - Freesync fixes - Atomic check cursor fix - Add a gfxoff quirk - MST fix amdkfd: - Fix GEM reference counting Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200514034046.3988-1-alexander.deucher@amd.com
This commit is contained in:
commit
f59bcda883
@ -945,6 +945,7 @@ struct amdgpu_device {
|
|||||||
|
|
||||||
/* s3/s4 mask */
|
/* s3/s4 mask */
|
||||||
bool in_suspend;
|
bool in_suspend;
|
||||||
|
bool in_hibernate;
|
||||||
|
|
||||||
/* record last mm index being written through WREG32*/
|
/* record last mm index being written through WREG32*/
|
||||||
unsigned long last_mm_index;
|
unsigned long last_mm_index;
|
||||||
|
@ -1343,7 +1343,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Free the BO*/
|
/* Free the BO*/
|
||||||
amdgpu_bo_unref(&mem->bo);
|
drm_gem_object_put_unlocked(&mem->bo->tbo.base);
|
||||||
mutex_destroy(&mem->lock);
|
mutex_destroy(&mem->lock);
|
||||||
kfree(mem);
|
kfree(mem);
|
||||||
|
|
||||||
@ -1688,7 +1688,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
|
|||||||
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
|
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
|
||||||
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
|
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
|
||||||
|
|
||||||
(*mem)->bo = amdgpu_bo_ref(bo);
|
drm_gem_object_get(&bo->tbo.base);
|
||||||
|
(*mem)->bo = bo;
|
||||||
(*mem)->va = va;
|
(*mem)->va = va;
|
||||||
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
|
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
|
||||||
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
|
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
|
||||||
|
@ -1181,7 +1181,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
|
|||||||
struct amdgpu_device *adev = drm_dev->dev_private;
|
struct amdgpu_device *adev = drm_dev->dev_private;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
adev->in_hibernate = true;
|
||||||
r = amdgpu_device_suspend(drm_dev, true);
|
r = amdgpu_device_suspend(drm_dev, true);
|
||||||
|
adev->in_hibernate = false;
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
return amdgpu_asic_reset(adev);
|
return amdgpu_asic_reset(adev);
|
||||||
|
@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||||||
u32 cpp;
|
u32 cpp;
|
||||||
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED |
|
AMDGPU_GEM_CREATE_VRAM_CLEARED;
|
||||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
|
||||||
|
|
||||||
info = drm_get_format_info(adev->ddev, mode_cmd);
|
info = drm_get_format_info(adev->ddev, mode_cmd);
|
||||||
cpp = info->cpp[0];
|
cpp = info->cpp[0];
|
||||||
|
@ -4273,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
|
|||||||
/* === CGCG /CGLS for GFX 3D Only === */
|
/* === CGCG /CGLS for GFX 3D Only === */
|
||||||
gfx_v10_0_update_3d_clock_gating(adev, enable);
|
gfx_v10_0_update_3d_clock_gating(adev, enable);
|
||||||
/* === MGCG + MGLS === */
|
/* === MGCG + MGLS === */
|
||||||
/* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
|
gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->cg_flags &
|
if (adev->cg_flags &
|
||||||
@ -4353,11 +4353,7 @@ static int gfx_v10_0_set_powergating_state(void *handle,
|
|||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_NAVI10:
|
case CHIP_NAVI10:
|
||||||
case CHIP_NAVI14:
|
case CHIP_NAVI14:
|
||||||
if (!enable) {
|
amdgpu_gfx_off_ctrl(adev, enable);
|
||||||
amdgpu_gfx_off_ctrl(adev, false);
|
|
||||||
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
|
|
||||||
} else
|
|
||||||
amdgpu_gfx_off_ctrl(adev, true);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -4918,6 +4914,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
|
|||||||
ref, mask);
|
ref, mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
|
||||||
|
unsigned vmid)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
uint32_t value = 0;
|
||||||
|
|
||||||
|
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
|
||||||
|
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
|
||||||
|
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
|
||||||
|
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
|
||||||
|
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
|
||||||
uint32_t me, uint32_t pipe,
|
uint32_t me, uint32_t pipe,
|
||||||
@ -5309,6 +5318,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
|
|||||||
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
.emit_wreg = gfx_v10_0_ring_emit_wreg,
|
||||||
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
|
||||||
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
|
||||||
|
.soft_recovery = gfx_v10_0_ring_soft_recovery,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
|
||||||
|
@ -1236,6 +1236,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
|
|||||||
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
|
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
|
||||||
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
|
/* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
|
||||||
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
|
{ 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
|
||||||
|
/* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
|
||||||
|
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
|
||||||
{ 0, 0, 0, 0, 0 },
|
{ 0, 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -5025,10 +5027,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
|
|||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
case CHIP_RENOIR:
|
case CHIP_RENOIR:
|
||||||
if (!enable) {
|
if (!enable)
|
||||||
amdgpu_gfx_off_ctrl(adev, false);
|
amdgpu_gfx_off_ctrl(adev, false);
|
||||||
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
|
|
||||||
}
|
|
||||||
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
|
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
|
||||||
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
|
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
|
||||||
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
|
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
|
||||||
@ -5052,12 +5053,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
|
|||||||
amdgpu_gfx_off_ctrl(adev, true);
|
amdgpu_gfx_off_ctrl(adev, true);
|
||||||
break;
|
break;
|
||||||
case CHIP_VEGA12:
|
case CHIP_VEGA12:
|
||||||
if (!enable) {
|
amdgpu_gfx_off_ctrl(adev, enable);
|
||||||
amdgpu_gfx_off_ctrl(adev, false);
|
|
||||||
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
|
|
||||||
} else {
|
|
||||||
amdgpu_gfx_off_ctrl(adev, true);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
|
@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* dm_crtc_high_irq() - Handles CRTC interrupt
|
* dm_crtc_high_irq() - Handles CRTC interrupt
|
||||||
* @interrupt_params: ignored
|
* @interrupt_params: used for determining the CRTC instance
|
||||||
*
|
*
|
||||||
* Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
|
* Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
|
||||||
* event handler.
|
* event handler.
|
||||||
@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
|
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
|
||||||
|
|
||||||
if (acrtc) {
|
|
||||||
acrtc_state = to_dm_crtc_state(acrtc->base.state);
|
|
||||||
|
|
||||||
DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
|
|
||||||
acrtc->crtc_id,
|
|
||||||
amdgpu_dm_vrr_active(acrtc_state));
|
|
||||||
|
|
||||||
/* Core vblank handling at start of front-porch is only possible
|
|
||||||
* in non-vrr mode, as only there vblank timestamping will give
|
|
||||||
* valid results while done in front-porch. Otherwise defer it
|
|
||||||
* to dm_vupdate_high_irq after end of front-porch.
|
|
||||||
*/
|
|
||||||
if (!amdgpu_dm_vrr_active(acrtc_state))
|
|
||||||
drm_crtc_handle_vblank(&acrtc->base);
|
|
||||||
|
|
||||||
/* Following stuff must happen at start of vblank, for crc
|
|
||||||
* computation and below-the-range btr support in vrr mode.
|
|
||||||
*/
|
|
||||||
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
|
||||||
|
|
||||||
if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
|
|
||||||
acrtc_state->vrr_params.supported &&
|
|
||||||
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
|
||||||
mod_freesync_handle_v_update(
|
|
||||||
adev->dm.freesync_module,
|
|
||||||
acrtc_state->stream,
|
|
||||||
&acrtc_state->vrr_params);
|
|
||||||
|
|
||||||
dc_stream_adjust_vmin_vmax(
|
|
||||||
adev->dm.dc,
|
|
||||||
acrtc_state->stream,
|
|
||||||
&acrtc_state->vrr_params.adjust);
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
|
||||||
/**
|
|
||||||
* dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
|
|
||||||
* @interrupt params - interrupt parameters
|
|
||||||
*
|
|
||||||
* Notify DRM's vblank event handler at VSTARTUP
|
|
||||||
*
|
|
||||||
* Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
|
|
||||||
* * We are close enough to VUPDATE - the point of no return for hw
|
|
||||||
* * We are in the fixed portion of variable front porch when vrr is enabled
|
|
||||||
* * We are before VUPDATE, where double-buffered vrr registers are swapped
|
|
||||||
*
|
|
||||||
* It is therefore the correct place to signal vblank, send user flip events,
|
|
||||||
* and update VRR.
|
|
||||||
*/
|
|
||||||
static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
|
||||||
{
|
|
||||||
struct common_irq_params *irq_params = interrupt_params;
|
|
||||||
struct amdgpu_device *adev = irq_params->adev;
|
|
||||||
struct amdgpu_crtc *acrtc;
|
|
||||||
struct dm_crtc_state *acrtc_state;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
|
|
||||||
|
|
||||||
if (!acrtc)
|
if (!acrtc)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
|||||||
amdgpu_dm_vrr_active(acrtc_state),
|
amdgpu_dm_vrr_active(acrtc_state),
|
||||||
acrtc_state->active_planes);
|
acrtc_state->active_planes);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Core vblank handling at start of front-porch is only possible
|
||||||
|
* in non-vrr mode, as only there vblank timestamping will give
|
||||||
|
* valid results while done in front-porch. Otherwise defer it
|
||||||
|
* to dm_vupdate_high_irq after end of front-porch.
|
||||||
|
*/
|
||||||
|
if (!amdgpu_dm_vrr_active(acrtc_state))
|
||||||
|
drm_crtc_handle_vblank(&acrtc->base);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Following stuff must happen at start of vblank, for crc
|
||||||
|
* computation and below-the-range btr support in vrr mode.
|
||||||
|
*/
|
||||||
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
|
||||||
drm_crtc_handle_vblank(&acrtc->base);
|
|
||||||
|
/* BTR updates need to happen before VUPDATE on Vega and above. */
|
||||||
|
if (adev->family < AMDGPU_FAMILY_AI)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
spin_lock_irqsave(&adev->ddev->event_lock, flags);
|
||||||
|
|
||||||
if (acrtc_state->vrr_params.supported &&
|
if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
|
||||||
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
|
||||||
mod_freesync_handle_v_update(
|
mod_freesync_handle_v_update(adev->dm.freesync_module,
|
||||||
adev->dm.freesync_module,
|
acrtc_state->stream,
|
||||||
acrtc_state->stream,
|
&acrtc_state->vrr_params);
|
||||||
&acrtc_state->vrr_params);
|
|
||||||
|
|
||||||
dc_stream_adjust_vmin_vmax(
|
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
|
||||||
adev->dm.dc,
|
&acrtc_state->vrr_params.adjust);
|
||||||
acrtc_state->stream,
|
|
||||||
&acrtc_state->vrr_params.adjust);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
|||||||
* avoid race conditions between flip programming and completion,
|
* avoid race conditions between flip programming and completion,
|
||||||
* which could cause too early flip completion events.
|
* which could cause too early flip completion events.
|
||||||
*/
|
*/
|
||||||
if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
|
if (adev->family >= AMDGPU_FAMILY_RV &&
|
||||||
|
acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
|
||||||
acrtc_state->active_planes == 0) {
|
acrtc_state->active_planes == 0) {
|
||||||
if (acrtc->event) {
|
if (acrtc->event) {
|
||||||
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
|
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
|
||||||
@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
|
|||||||
|
|
||||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
static int dm_set_clockgating_state(void *handle,
|
static int dm_set_clockgating_state(void *handle,
|
||||||
enum amd_clockgating_state state)
|
enum amd_clockgating_state state)
|
||||||
@ -2445,8 +2394,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
|
|||||||
c_irq_params->adev = adev;
|
c_irq_params->adev = adev;
|
||||||
c_irq_params->irq_src = int_params.irq_source;
|
c_irq_params->irq_src = int_params.irq_source;
|
||||||
|
|
||||||
|
amdgpu_dm_irq_register_interrupt(
|
||||||
|
adev, &int_params, dm_crtc_high_irq, c_irq_params);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
|
||||||
|
* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
|
||||||
|
* to trigger at end of each vblank, regardless of state of the lock,
|
||||||
|
* matching DCE behaviour.
|
||||||
|
*/
|
||||||
|
for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
|
||||||
|
i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
|
||||||
|
i++) {
|
||||||
|
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
|
||||||
|
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Failed to add vupdate irq id!\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
|
||||||
|
int_params.irq_source =
|
||||||
|
dc_interrupt_to_irq_source(dc, i, 0);
|
||||||
|
|
||||||
|
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
|
||||||
|
|
||||||
|
c_irq_params->adev = adev;
|
||||||
|
c_irq_params->irq_src = int_params.irq_source;
|
||||||
|
|
||||||
amdgpu_dm_irq_register_interrupt(adev, &int_params,
|
amdgpu_dm_irq_register_interrupt(adev, &int_params,
|
||||||
dm_dcn_crtc_high_irq, c_irq_params);
|
dm_vupdate_high_irq, c_irq_params);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use GRPH_PFLIP interrupt */
|
/* Use GRPH_PFLIP interrupt */
|
||||||
@ -4453,10 +4430,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
|
|||||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* Do not set vupdate for DCN hardware */
|
|
||||||
if (adev->family > AMDGPU_FAMILY_AI)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
|
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
|
||||||
|
|
||||||
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
||||||
@ -7882,6 +7855,7 @@ static int dm_update_plane_state(struct dc *dc,
|
|||||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||||
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
|
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
|
||||||
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
|
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
|
||||||
|
struct amdgpu_crtc *new_acrtc;
|
||||||
bool needs_reset;
|
bool needs_reset;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -7891,9 +7865,30 @@ static int dm_update_plane_state(struct dc *dc,
|
|||||||
dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
dm_new_plane_state = to_dm_plane_state(new_plane_state);
|
||||||
dm_old_plane_state = to_dm_plane_state(old_plane_state);
|
dm_old_plane_state = to_dm_plane_state(old_plane_state);
|
||||||
|
|
||||||
/*TODO Implement atomic check for cursor plane */
|
/*TODO Implement better atomic check for cursor plane */
|
||||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
if (plane->type == DRM_PLANE_TYPE_CURSOR) {
|
||||||
|
if (!enable || !new_plane_crtc ||
|
||||||
|
drm_atomic_plane_disabling(plane->state, new_plane_state))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
new_acrtc = to_amdgpu_crtc(new_plane_crtc);
|
||||||
|
|
||||||
|
if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
|
||||||
|
(new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
|
||||||
|
DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
|
||||||
|
new_plane_state->crtc_w, new_plane_state->crtc_h);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_plane_state->crtc_x <= -new_acrtc->max_cursor_width ||
|
||||||
|
new_plane_state->crtc_y <= -new_acrtc->max_cursor_height) {
|
||||||
|
DRM_DEBUG_ATOMIC("Bad cursor position %d, %d\n",
|
||||||
|
new_plane_state->crtc_x, new_plane_state->crtc_y);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
needs_reset = should_reset_plane(state, plane, old_plane_state,
|
needs_reset = should_reset_plane(state, plane, old_plane_state,
|
||||||
new_plane_state);
|
new_plane_state);
|
||||||
|
@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
|||||||
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
|
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
|
||||||
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
|
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
|
||||||
|
|
||||||
memset(display, 0, sizeof(*display));
|
|
||||||
memset(link, 0, sizeof(*link));
|
|
||||||
|
|
||||||
display->index = aconnector->base.index;
|
|
||||||
|
|
||||||
if (config->dpms_off) {
|
if (config->dpms_off) {
|
||||||
hdcp_remove_display(hdcp_work, link_index, aconnector);
|
hdcp_remove_display(hdcp_work, link_index, aconnector);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
memset(display, 0, sizeof(*display));
|
||||||
|
memset(link, 0, sizeof(*link));
|
||||||
|
|
||||||
|
display->index = aconnector->base.index;
|
||||||
display->state = MOD_HDCP_DISPLAY_ACTIVE;
|
display->state = MOD_HDCP_DISPLAY_ACTIVE;
|
||||||
|
|
||||||
if (aconnector->dc_sink != NULL)
|
if (aconnector->dc_sink != NULL)
|
||||||
|
@ -319,12 +319,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
|
|||||||
if (*level & profile_mode_mask) {
|
if (*level & profile_mode_mask) {
|
||||||
hwmgr->saved_dpm_level = hwmgr->dpm_level;
|
hwmgr->saved_dpm_level = hwmgr->dpm_level;
|
||||||
hwmgr->en_umd_pstate = true;
|
hwmgr->en_umd_pstate = true;
|
||||||
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
|
|
||||||
AMD_IP_BLOCK_TYPE_GFX,
|
|
||||||
AMD_CG_STATE_UNGATE);
|
|
||||||
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
|
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
|
||||||
AMD_IP_BLOCK_TYPE_GFX,
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
AMD_PG_STATE_UNGATE);
|
AMD_PG_STATE_UNGATE);
|
||||||
|
amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_CG_STATE_UNGATE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* exit umd pstate, restore level, enable gfx cg*/
|
/* exit umd pstate, restore level, enable gfx cg*/
|
||||||
|
@ -1476,7 +1476,7 @@ static int smu_disable_dpm(struct smu_context *smu)
|
|||||||
bool use_baco = !smu->is_apu &&
|
bool use_baco = !smu->is_apu &&
|
||||||
((adev->in_gpu_reset &&
|
((adev->in_gpu_reset &&
|
||||||
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
|
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
|
||||||
(adev->in_runpm && amdgpu_asic_supports_baco(adev)));
|
((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
|
||||||
|
|
||||||
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
ret = smu_get_smc_version(smu, NULL, &smu_version);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1744,12 +1744,12 @@ static int smu_enable_umd_pstate(void *handle,
|
|||||||
if (*level & profile_mode_mask) {
|
if (*level & profile_mode_mask) {
|
||||||
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
|
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
|
||||||
smu_dpm_ctx->enable_umd_pstate = true;
|
smu_dpm_ctx->enable_umd_pstate = true;
|
||||||
amdgpu_device_ip_set_clockgating_state(smu->adev,
|
|
||||||
AMD_IP_BLOCK_TYPE_GFX,
|
|
||||||
AMD_CG_STATE_UNGATE);
|
|
||||||
amdgpu_device_ip_set_powergating_state(smu->adev,
|
amdgpu_device_ip_set_powergating_state(smu->adev,
|
||||||
AMD_IP_BLOCK_TYPE_GFX,
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
AMD_PG_STATE_UNGATE);
|
AMD_PG_STATE_UNGATE);
|
||||||
|
amdgpu_device_ip_set_clockgating_state(smu->adev,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_CG_STATE_UNGATE);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* exit umd pstate, restore level, enable gfx cg*/
|
/* exit umd pstate, restore level, enable gfx cg*/
|
||||||
|
Loading…
x
Reference in New Issue
Block a user