mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 12:16:41 +00:00
drm fixes part 2 for 6.4-rc1
amdgpu: - SR-IOV fixes - DCN 3.2 fixes - DC mclk handling fixes - eDP fixes - SubVP fixes - HDCP regression fix - DSC fixes - DC FP fixes - DCN 3.x fixes - Display flickering fix when switching between vram and gtt - Z8 power saving fix - Fix hang when skipping modeset - GPU reset fixes - Doorbell fix when resizing BARs - Fix spurious warnings in gmc - Locking fix for AMDGPU_SCHED IOCTL - SR-IOV fix - DCN 3.1.4 fix - DCN 3.2 fix - Fix job cleanup when CS is aborted i915: - skl pipe source size check - mtl transcoder mask fix - DSI power on sequence fix - GuC versioning corner case fix -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmRUcWYACgkQDHTzWXnE hr7uhg/+LIIt5kgdCA95HjUENcve+RVgQX6fOnDlROUi75t80gbB08RQr5e+/6/8 PgT4RkhB8KRZ7uMAFTgb0qiIvIh2Xffdn0no44xV47r+0cj6uZRjeo1WtIkLCbZZ ratZ1WOxzDdJRWZiKMdtdk34Par3++CfIPKze/wcUIUyoTN0VDm7qycBNBa/Yoes VCCGDEazUfnom4xALO96HrzdozNiB5D/xUU5/ZMfULcNzXvj6S0HFrqwKS3XxHej 79XbFuZerUn6aT+yJRzpF16HSLXd2mKpFRRqS/sRlLYFRFru1YdffkJJfJZQBRko mluKu1g21zDR4Z1Qe2RZK++bTK2q9Azhv4EJmXCJ5p/t8vFBnmTnrzD2xSQOYz5I kxKNliT23ro4eHU5/CQd3xpe8pv4h/J5wm7JU5sbQLe2ypbOqC/Z6SZ4xMqjj0bZ JW7Yjcprvy/l9Rze6kKSLkdjNkpSFlZJS5dh1N/k0G7MuVr4LfHBN8TtYGrFc6G/ qDZzVrgKHZywyYu9Vox4lt1ZMbd47xYYEXAJTjVewAjUKvqo9BIo5zuOMcyaw6zi KVFqS5ygcekbtnQM8oZSicATpNNNnEjEMMoW6gDQJRqdcocCtYKQugEZi/YOVxZs aMX/HsldfR9srYj1tTz4c5sgyURK8NgB0FY5g7Ant6Kzjp4lRY8= =W1N0 -----END PGP SIGNATURE----- Merge tag 'drm-next-2023-05-05' of git://anongit.freedesktop.org/drm/drm Pull more drm fixes from Dave Airlie: "This is the fixes for the last couple of weeks for i915 and last 3 weeks for amdgpu, lots of them but pretty scattered around and all pretty small. amdgpu: - SR-IOV fixes - DCN 3.2 fixes - DC mclk handling fixes - eDP fixes - SubVP fixes - HDCP regression fix - DSC fixes - DC FP fixes - DCN 3.x fixes - Display flickering fix when switching between vram and gtt - Z8 power saving fix - Fix hang when skipping modeset - GPU reset fixes - Doorbell fix when resizing BARs - Fix spurious warnings in gmc - Locking fix for AMDGPU_SCHED IOCTL - SR-IOV fix - DCN 3.1.4 fix - DCN 3.2 fix - Fix job cleanup when CS is aborted i915: - skl pipe source size check - mtl transcoder mask fix - DSI power on sequence fix - GuC versioning corner case fix" * tag 'drm-next-2023-05-05' of git://anongit.freedesktop.org/drm/drm: (48 commits) drm/amdgpu: drop redundant sched job cleanup when cs is aborted drm/amd/display: filter out invalid bits in pipe_fuses drm/amd/display: Change default Z8 watermark values drm/amdgpu: disable SDMA WPTR_POLL_ENABLE for SR-IOV drm/amdgpu: add a missing lock for AMDGPU_SCHED drm/amdgpu: fix an amdgpu_irq_put() issue in gmc_v9_0_hw_fini() drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v10_0_hw_fini drm/amdgpu: fix amdgpu_irq_put call trace in gmc_v11_0_hw_fini drm/amdgpu: Enable doorbell selfring after resize FB BAR drm/amdgpu: Use the default reset when loading or reloading the driver drm/amdgpu: Fix mode2 reset for sienna cichlid drm/i915/dsi: Use unconditional msleep() instead of intel_dsi_msleep() drm/i915/mtl: Add the missing CPU transcoder mask in intel_device_info drm/i915/guc: Actually return an error if GuC version range check fails drm/amd/display: Lowering min Z8 residency time drm/amd/display: fix flickering caused by S/G mode drm/amd/display: Set min_width and min_height capability for DCN30 drm/amd/display: Isolate remaining FPU code in DCN32 drm/amd/display: Update bounding box values for DCN321 drm/amd/display: Do not clear GPINT register when releasing DMUB from reset ...
This commit is contained in:
commit
084f51d473
@ -1276,7 +1276,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
r = drm_sched_job_add_dependency(&leader->base, fence);
|
||||
if (r) {
|
||||
dma_fence_put(fence);
|
||||
goto error_cleanup;
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1303,7 +1303,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
if (r) {
|
||||
r = -EAGAIN;
|
||||
goto error_unlock;
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
p->fence = dma_fence_get(&leader->base.s_fence->finished);
|
||||
@ -1350,14 +1351,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
return 0;
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
|
||||
error_cleanup:
|
||||
for (i = 0; i < p->gang_size; ++i)
|
||||
drm_sched_job_cleanup(&p->jobs[i]->base);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Cleanup the parser structure */
|
||||
|
@ -2539,8 +2539,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
|
||||
amdgpu_fru_get_product_info(adev);
|
||||
|
||||
init_failed:
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -3580,6 +3578,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
int r, i;
|
||||
bool px = false;
|
||||
u32 max_MBps;
|
||||
int tmp;
|
||||
|
||||
adev->shutdown = false;
|
||||
adev->flags = flags;
|
||||
@ -3801,7 +3800,13 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tmp = amdgpu_reset_method;
|
||||
/* It should do a default reset when loading or reloading the driver,
|
||||
* regardless of the module parameter reset_method.
|
||||
*/
|
||||
amdgpu_reset_method = AMD_RESET_METHOD_NONE;
|
||||
r = amdgpu_asic_reset(adev);
|
||||
amdgpu_reset_method = tmp;
|
||||
if (r) {
|
||||
dev_err(adev->dev, "asic reset on init failed\n");
|
||||
goto failed;
|
||||
@ -3859,18 +3864,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
|
||||
r = amdgpu_device_ip_init(adev);
|
||||
if (r) {
|
||||
/* failed in exclusive mode due to timeout */
|
||||
if (amdgpu_sriov_vf(adev) &&
|
||||
!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_mmio_blocked(adev) &&
|
||||
!amdgpu_virt_wait_reset(adev)) {
|
||||
dev_err(adev->dev, "VF exclusive mode timeout\n");
|
||||
/* Don't send request since VF is inactive. */
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
adev->virt.ops = NULL;
|
||||
r = -EAGAIN;
|
||||
goto release_ras_con;
|
||||
}
|
||||
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
|
||||
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
|
||||
goto release_ras_con;
|
||||
@ -3939,8 +3932,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
msecs_to_jiffies(AMDGPU_RESUME_MS));
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
}
|
||||
|
||||
r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
|
||||
if (r)
|
||||
@ -3980,6 +3975,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
|
||||
release_ras_con:
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, true);
|
||||
|
||||
/* failed in exclusive mode due to timeout */
|
||||
if (amdgpu_sriov_vf(adev) &&
|
||||
!amdgpu_sriov_runtime(adev) &&
|
||||
amdgpu_virt_mmio_blocked(adev) &&
|
||||
!amdgpu_virt_wait_reset(adev)) {
|
||||
dev_err(adev->dev, "VF exclusive mode timeout\n");
|
||||
/* Don't send request since VF is inactive. */
|
||||
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
|
||||
adev->virt.ops = NULL;
|
||||
r = -EAGAIN;
|
||||
}
|
||||
amdgpu_release_ras_context(adev);
|
||||
|
||||
failed:
|
||||
|
@ -38,6 +38,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
|
||||
{
|
||||
struct fd f = fdget(fd);
|
||||
struct amdgpu_fpriv *fpriv;
|
||||
struct amdgpu_ctx_mgr *mgr;
|
||||
struct amdgpu_ctx *ctx;
|
||||
uint32_t id;
|
||||
int r;
|
||||
@ -51,8 +52,11 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
|
||||
return r;
|
||||
}
|
||||
|
||||
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
|
||||
mgr = &fpriv->ctx_mgr;
|
||||
mutex_lock(&mgr->lock);
|
||||
idr_for_each_entry(&mgr->ctx_handles, ctx, id)
|
||||
amdgpu_ctx_priority_override(ctx, priority);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
fdput(f);
|
||||
return 0;
|
||||
|
@ -1143,7 +1143,6 @@ static int gmc_v10_0_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
|
||||
return 0;
|
||||
|
@ -951,7 +951,6 @@ static int gmc_v11_0_hw_fini(void *handle)
|
||||
return 0;
|
||||
}
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
gmc_v11_0_gart_disable(adev);
|
||||
|
||||
|
@ -1999,7 +1999,6 @@ static int gmc_v9_0_hw_fini(void *handle)
|
||||
if (adev->mmhub.funcs->update_power_gating)
|
||||
adev->mmhub.funcs->update_power_gating(adev, false);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
|
||||
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
|
||||
|
||||
return 0;
|
||||
|
@ -430,7 +430,7 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
|
||||
MMSCH_COMMAND__END;
|
||||
|
||||
header.version = MMSCH_VERSION;
|
||||
header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
|
||||
header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE);
|
||||
|
||||
header.jpegdec.init_status = 0;
|
||||
header.jpegdec.table_offset = 0;
|
||||
|
@ -531,13 +531,6 @@ static void nv_program_aspm(struct amdgpu_device *adev)
|
||||
|
||||
}
|
||||
|
||||
static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version nv_common_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
@ -999,6 +992,11 @@ static int nv_common_late_init(void *handle)
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable selfring doorbell aperture late because doorbell BAR
|
||||
* aperture will change if resize BAR successfully in gmc sw_init.
|
||||
*/
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1038,7 +1036,7 @@ static int nv_common_hw_init(void *handle)
|
||||
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
|
||||
adev->nbio.funcs->remap_hdp_registers(adev);
|
||||
/* enable the doorbell aperture */
|
||||
nv_enable_doorbell_aperture(adev, true);
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1047,8 +1045,13 @@ static int nv_common_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* disable the doorbell aperture */
|
||||
nv_enable_doorbell_aperture(adev, false);
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because nv_enable_doorbell_aperture
|
||||
* has been removed and there is no need to delay disabling
|
||||
* selfring doorbell.
|
||||
*/
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -510,10 +510,7 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev)
|
||||
lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
|
||||
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
|
||||
else
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
|
||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, F32_WPTR_POLL_ENABLE, 1);
|
||||
|
||||
WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
|
||||
|
@ -40,7 +40,7 @@ static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_c
|
||||
adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev))
|
||||
return true;
|
||||
#endif
|
||||
return false;
|
||||
return amdgpu_reset_method == AMD_RESET_METHOD_MODE2;
|
||||
}
|
||||
|
||||
static struct amdgpu_reset_handler *
|
||||
|
@ -619,13 +619,6 @@ static void soc15_program_aspm(struct amdgpu_device *adev)
|
||||
adev->nbio.funcs->program_aspm(adev);
|
||||
}
|
||||
|
||||
static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version vega10_common_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
@ -1125,6 +1118,11 @@ static int soc15_common_late_init(void *handle)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_ai_mailbox_get_irq(adev);
|
||||
|
||||
/* Enable selfring doorbell aperture late because doorbell BAR
|
||||
* aperture will change if resize BAR successfully in gmc sw_init.
|
||||
*/
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1182,7 +1180,8 @@ static int soc15_common_hw_init(void *handle)
|
||||
adev->nbio.funcs->remap_hdp_registers(adev);
|
||||
|
||||
/* enable the doorbell aperture */
|
||||
soc15_enable_doorbell_aperture(adev, true);
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
|
||||
|
||||
/* HW doorbell routing policy: doorbell writing not
|
||||
* in SDMA/IH/MM/ACV range will be routed to CP. So
|
||||
* we need to init SDMA doorbell range prior
|
||||
@ -1198,8 +1197,14 @@ static int soc15_common_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* disable the doorbell aperture */
|
||||
soc15_enable_doorbell_aperture(adev, false);
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because soc15_enable_doorbell_aperture
|
||||
* has been removed and there is no need to delay disabling
|
||||
* selfring doorbell.
|
||||
*/
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_ai_mailbox_put_irq(adev);
|
||||
|
||||
|
@ -450,13 +450,6 @@ static void soc21_program_aspm(struct amdgpu_device *adev)
|
||||
adev->nbio.funcs->program_aspm(adev);
|
||||
}
|
||||
|
||||
static void soc21_enable_doorbell_aperture(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, enable);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version soc21_common_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_COMMON,
|
||||
@ -764,6 +757,11 @@ static int soc21_common_late_init(void *handle)
|
||||
amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
|
||||
}
|
||||
|
||||
/* Enable selfring doorbell aperture late because doorbell BAR
|
||||
* aperture will change if resize BAR successfully in gmc sw_init.
|
||||
*/
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -797,7 +795,7 @@ static int soc21_common_hw_init(void *handle)
|
||||
if (adev->nbio.funcs->remap_hdp_registers)
|
||||
adev->nbio.funcs->remap_hdp_registers(adev);
|
||||
/* enable the doorbell aperture */
|
||||
soc21_enable_doorbell_aperture(adev, true);
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -806,8 +804,13 @@ static int soc21_common_hw_fini(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* disable the doorbell aperture */
|
||||
soc21_enable_doorbell_aperture(adev, false);
|
||||
/* Disable the doorbell aperture and selfring doorbell aperture
|
||||
* separately in hw_fini because soc21_enable_doorbell_aperture
|
||||
* has been removed and there is no need to delay disabling
|
||||
* selfring doorbell.
|
||||
*/
|
||||
adev->nbio.funcs->enable_doorbell_aperture(adev, false);
|
||||
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false);
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
xgpu_nv_mailbox_put_irq(adev);
|
||||
|
@ -3128,9 +3128,12 @@ void amdgpu_dm_update_connector_after_detect(
|
||||
aconnector->edid);
|
||||
}
|
||||
|
||||
aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
|
||||
if (!aconnector->timing_requested)
|
||||
dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
|
||||
if (!aconnector->timing_requested) {
|
||||
aconnector->timing_requested =
|
||||
kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
|
||||
if (!aconnector->timing_requested)
|
||||
dm_error("failed to create aconnector->requested_timing\n");
|
||||
}
|
||||
|
||||
drm_connector_update_edid_property(connector, aconnector->edid);
|
||||
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
|
||||
@ -7894,6 +7897,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
|
||||
amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
|
||||
}
|
||||
|
||||
static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
|
||||
|
||||
return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
|
||||
}
|
||||
|
||||
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct dc_state *dc_state,
|
||||
struct drm_device *dev,
|
||||
@ -7968,6 +7978,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
continue;
|
||||
|
||||
dc_plane = dm_new_plane_state->dc_state;
|
||||
if (!dc_plane)
|
||||
continue;
|
||||
|
||||
bundle->surface_updates[planes_count].surface = dc_plane;
|
||||
if (new_pcrtc_state->color_mgmt_changed) {
|
||||
@ -8034,11 +8046,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
|
||||
/*
|
||||
* Only allow immediate flips for fast updates that don't
|
||||
* change FB pitch, DCC state, rotation or mirroing.
|
||||
* change memory domain, FB pitch, DCC state, rotation or
|
||||
* mirroring.
|
||||
*/
|
||||
bundle->flip_addrs[planes_count].flip_immediate =
|
||||
crtc->state->async_flip &&
|
||||
acrtc_state->update_type == UPDATE_TYPE_FAST;
|
||||
acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
get_mem_type(old_plane_state->fb) == get_mem_type(fb);
|
||||
|
||||
timestamp_ns = ktime_get_ns();
|
||||
bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
|
||||
@ -8550,6 +8564,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!adev->dm.hdcp_workqueue)
|
||||
continue;
|
||||
|
||||
pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
|
||||
|
||||
if (!connector)
|
||||
@ -8598,6 +8615,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
|
||||
if (!adev->dm.hdcp_workqueue)
|
||||
continue;
|
||||
|
||||
new_crtc_state = NULL;
|
||||
old_crtc_state = NULL;
|
||||
|
||||
@ -9616,8 +9636,9 @@ static int dm_update_plane_state(struct dc *dc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dm_old_plane_state->dc_state)
|
||||
dc_plane_state_release(dm_old_plane_state->dc_state);
|
||||
|
||||
dc_plane_state_release(dm_old_plane_state->dc_state);
|
||||
dm_new_plane_state->dc_state = NULL;
|
||||
|
||||
*lock_and_validation_needed = true;
|
||||
@ -10154,6 +10175,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
|
||||
if (ret) {
|
||||
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -687,7 +687,6 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
|
||||
return;
|
||||
|
||||
data[0] |= (1 << 1); // set bit 1 to 1
|
||||
return;
|
||||
|
||||
if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
|
||||
return;
|
||||
|
@ -379,13 +379,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
if (aconnector->dc_sink && connector->state) {
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
|
||||
struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index];
|
||||
|
||||
connector->state->hdcp_content_type =
|
||||
hdcp_w->hdcp_content_type[connector->index];
|
||||
connector->state->content_protection =
|
||||
hdcp_w->content_protection[connector->index];
|
||||
if (adev->dm.hdcp_workqueue) {
|
||||
struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
|
||||
struct hdcp_workqueue *hdcp_w =
|
||||
&hdcp_work[aconnector->dc_link->link_index];
|
||||
|
||||
connector->state->hdcp_content_type =
|
||||
hdcp_w->hdcp_content_type[connector->index];
|
||||
connector->state->content_protection =
|
||||
hdcp_w->content_protection[connector->index];
|
||||
}
|
||||
}
|
||||
|
||||
if (aconnector->dc_sink) {
|
||||
@ -1406,6 +1410,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
|
||||
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
|
||||
if (ret != 0) {
|
||||
DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
|
||||
ret = -EINVAL;
|
||||
goto clean_exit;
|
||||
}
|
||||
|
||||
|
@ -89,6 +89,7 @@ void dc_fpu_begin(const char *function_name, const int line)
|
||||
|
||||
if (*pcpu == 1) {
|
||||
#if defined(CONFIG_X86)
|
||||
migrate_disable();
|
||||
kernel_fpu_begin();
|
||||
#elif defined(CONFIG_PPC64)
|
||||
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||||
@ -129,6 +130,7 @@ void dc_fpu_end(const char *function_name, const int line)
|
||||
if (*pcpu <= 0) {
|
||||
#if defined(CONFIG_X86)
|
||||
kernel_fpu_end();
|
||||
migrate_enable();
|
||||
#elif defined(CONFIG_PPC64)
|
||||
if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
|
||||
disable_kernel_vsx();
|
||||
|
@ -878,6 +878,8 @@ void dcn32_clk_mgr_construct(
|
||||
struct pp_smu_funcs *pp_smu,
|
||||
struct dccg *dccg)
|
||||
{
|
||||
struct clk_log_info log_info = {0};
|
||||
|
||||
clk_mgr->base.ctx = ctx;
|
||||
clk_mgr->base.funcs = &dcn32_funcs;
|
||||
if (ASICREV_IS_GC_11_0_2(clk_mgr->base.ctx->asic_id.hw_internal_rev)) {
|
||||
@ -911,6 +913,7 @@ void dcn32_clk_mgr_construct(
|
||||
clk_mgr->base.clks.ref_dtbclk_khz = 268750;
|
||||
}
|
||||
|
||||
|
||||
/* integer part is now VCO frequency in kHz */
|
||||
clk_mgr->base.dentist_vco_freq_khz = dcn32_get_vco_frequency_from_reg(clk_mgr);
|
||||
|
||||
@ -918,6 +921,8 @@ void dcn32_clk_mgr_construct(
|
||||
if (clk_mgr->base.dentist_vco_freq_khz == 0)
|
||||
clk_mgr->base.dentist_vco_freq_khz = 4300000; /* Updated as per HW docs */
|
||||
|
||||
dcn32_dump_clk_registers(&clk_mgr->base.boot_snapshot, &clk_mgr->base, &log_info);
|
||||
|
||||
if (ctx->dc->debug.disable_dtb_ref_clk_switch &&
|
||||
clk_mgr->base.clks.ref_dtbclk_khz != clk_mgr->base.boot_snapshot.dtbclk) {
|
||||
clk_mgr->base.clks.ref_dtbclk_khz = clk_mgr->base.boot_snapshot.dtbclk;
|
||||
|
@ -1707,6 +1707,9 @@ bool dc_remove_plane_from_context(
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct resource_pool *pool = dc->res_pool;
|
||||
|
||||
if (!plane_state)
|
||||
return true;
|
||||
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
if (context->streams[i] == stream) {
|
||||
stream_status = &context->stream_status[i];
|
||||
|
@ -1454,6 +1454,7 @@ struct dc_link {
|
||||
|
||||
struct ddc_service *ddc;
|
||||
|
||||
enum dp_panel_mode panel_mode;
|
||||
bool aux_mode;
|
||||
|
||||
/* Private to DC core */
|
||||
|
@ -144,7 +144,7 @@ struct test_pattern {
|
||||
unsigned int cust_pattern_size;
|
||||
};
|
||||
|
||||
#define SUBVP_DRR_MARGIN_US 600 // 600us for DRR margin (SubVP + DRR)
|
||||
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
|
||||
|
||||
enum mall_stream_type {
|
||||
SUBVP_NONE, // subvp not in use
|
||||
|
@ -3031,10 +3031,12 @@ void dce110_enable_dp_link_output(
|
||||
const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
|
||||
unsigned int i;
|
||||
|
||||
|
||||
/*
|
||||
* Add the logic to extract BOTH power up and power down sequences
|
||||
* from enable/disable link output and only call edp panel control
|
||||
* in enable_link_dp and disable_link_dp once.
|
||||
*/
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
if (!link->dc->config.edp_no_power_sequencing)
|
||||
link->dc->hwss.edp_power_control(link, true);
|
||||
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
|
||||
}
|
||||
|
||||
@ -3096,11 +3098,12 @@ void dce110_disable_link_output(struct dc_link *link,
|
||||
|
||||
link_hwss->disable_link_output(link, link_res, signal);
|
||||
link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF;
|
||||
|
||||
if (signal == SIGNAL_TYPE_EDP &&
|
||||
link->dc->hwss.edp_backlight_control)
|
||||
link->dc->hwss.edp_power_control(link, false);
|
||||
else if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
/*
|
||||
* Add the logic to extract BOTH power up and power down sequences
|
||||
* from enable/disable link output and only call edp panel control
|
||||
* in enable_link_dp and disable_link_dp once.
|
||||
*/
|
||||
if (dmcu != NULL && dmcu->funcs->lock_phy)
|
||||
dmcu->funcs->unlock_phy(dmcu);
|
||||
dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
|
||||
}
|
||||
|
@ -2113,6 +2113,15 @@ void dcn20_optimize_bandwidth(
|
||||
if (hubbub->funcs->program_compbuf_size)
|
||||
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
|
||||
dc_dmub_srv_p_state_delegate(dc,
|
||||
true, context);
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
|
||||
dc->clk_mgr->clks.fw_based_mclk_switching = true;
|
||||
} else {
|
||||
dc->clk_mgr->clks.fw_based_mclk_switching = false;
|
||||
}
|
||||
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
|
@ -983,13 +983,36 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
|
||||
}
|
||||
|
||||
void dcn30_prepare_bandwidth(struct dc *dc,
|
||||
struct dc_state *context)
|
||||
struct dc_state *context)
|
||||
{
|
||||
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
/* Any transition into an FPO config should disable MCLK switching first to avoid
|
||||
* driver and FW P-State synchronization issues.
|
||||
*/
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
|
||||
dc->optimized_required = true;
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
|
||||
}
|
||||
|
||||
if (dc->clk_mgr->dc_mode_softmax_enabled)
|
||||
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
|
||||
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
|
||||
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
|
||||
|
||||
dcn20_prepare_bandwidth(dc, context);
|
||||
/*
|
||||
* enabled -> enabled: do not disable
|
||||
* enabled -> disabled: disable
|
||||
* disabled -> enabled: don't care
|
||||
* disabled -> disabled: don't care
|
||||
*/
|
||||
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
|
||||
dc_dmub_srv_p_state_delegate(dc, false, context);
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
|
||||
/* After disabling P-State, restore the original value to ensure we get the correct P-State
|
||||
* on the next optimize. */
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -701,7 +701,9 @@ static const struct dc_plane_cap plane_cap = {
|
||||
.argb8888 = 167,
|
||||
.nv12 = 167,
|
||||
.fp16 = 167
|
||||
}
|
||||
},
|
||||
16,
|
||||
16
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
|
@ -295,6 +295,10 @@ void dcn31_init_hw(struct dc *dc)
|
||||
if (dc->res_pool->hubbub->funcs->init_crb)
|
||||
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
|
||||
#endif
|
||||
|
||||
// Get DMCUB capabilities
|
||||
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
|
||||
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
|
||||
}
|
||||
|
||||
void dcn31_dsc_pg_control(
|
||||
|
@ -274,7 +274,7 @@ static void dccg314_set_dpstreamclk(
|
||||
}
|
||||
}
|
||||
|
||||
void dccg314_init(struct dccg *dccg)
|
||||
static void dccg314_init(struct dccg *dccg)
|
||||
{
|
||||
int otg_inst;
|
||||
|
||||
|
@ -885,7 +885,7 @@ static const struct dc_plane_cap plane_cap = {
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_z10 = false,
|
||||
.enable_z9_disable_interface = true,
|
||||
.minimum_z8_residency_time = 3080,
|
||||
.minimum_z8_residency_time = 2000,
|
||||
.psr_skip_crtc_disable = true,
|
||||
.disable_dmcu = true,
|
||||
.force_abm_enable = false,
|
||||
|
@ -948,6 +948,7 @@ void dcn32_init_hw(struct dc *dc)
|
||||
if (dc->ctx->dmub_srv) {
|
||||
dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv->dmub);
|
||||
dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr;
|
||||
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -324,7 +324,6 @@ static const struct dcn10_link_enc_shift le_shift = {
|
||||
|
||||
static const struct dcn10_link_enc_mask le_mask = {
|
||||
LINK_ENCODER_MASK_SH_LIST_DCN31(_MASK), \
|
||||
|
||||
//DPCS_DCN31_MASK_SH_LIST(_MASK)
|
||||
};
|
||||
|
||||
@ -2024,7 +2023,7 @@ int dcn32_populate_dml_pipes_from_context(
|
||||
// In general cases we want to keep the dram clock change requirement
|
||||
// (prefer configs that support MCLK switch). Only override to false
|
||||
// for SubVP
|
||||
if (subvp_in_use)
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || subvp_in_use)
|
||||
context->bw_ctx.dml.soc.dram_clock_change_requirement_final = false;
|
||||
else
|
||||
context->bw_ctx.dml.soc.dram_clock_change_requirement_final = true;
|
||||
@ -2080,6 +2079,14 @@ static struct resource_funcs dcn32_res_pool_funcs = {
|
||||
.restore_mall_state = dcn32_restore_mall_state,
|
||||
};
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
{
|
||||
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
|
||||
/* DCN32 support max 4 pipes */
|
||||
value = value & 0xf;
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
static bool dcn32_resource_construct(
|
||||
uint8_t num_virtual_links,
|
||||
@ -2093,27 +2100,28 @@ static bool dcn32_resource_construct(
|
||||
uint32_t pipe_fuses = 0;
|
||||
uint32_t num_pipes = 4;
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT bios_regs
|
||||
bios_regs_init();
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT bios_regs
|
||||
bios_regs_init();
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT clk_src_regs
|
||||
clk_src_regs_init(0, A),
|
||||
clk_src_regs_init(1, B),
|
||||
clk_src_regs_init(2, C),
|
||||
clk_src_regs_init(3, D),
|
||||
clk_src_regs_init(4, E);
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT abm_regs
|
||||
abm_regs_init(0),
|
||||
abm_regs_init(1),
|
||||
abm_regs_init(2),
|
||||
abm_regs_init(3);
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT clk_src_regs
|
||||
clk_src_regs_init(0, A),
|
||||
clk_src_regs_init(1, B),
|
||||
clk_src_regs_init(2, C),
|
||||
clk_src_regs_init(3, D),
|
||||
clk_src_regs_init(4, E);
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT dccg_regs
|
||||
dccg_regs_init();
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT abm_regs
|
||||
abm_regs_init(0),
|
||||
abm_regs_init(1),
|
||||
abm_regs_init(2),
|
||||
abm_regs_init(3);
|
||||
|
||||
#undef REG_STRUCT
|
||||
#define REG_STRUCT dccg_regs
|
||||
dccg_regs_init();
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
@ -2122,7 +2130,7 @@ static bool dcn32_resource_construct(
|
||||
pool->base.res_cap = &res_cap_dcn32;
|
||||
/* max number of pipes for ASIC before checking for pipe fuses */
|
||||
num_pipes = pool->base.res_cap->num_timing_generator;
|
||||
pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
|
||||
pipe_fuses = read_pipe_fuses(ctx);
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
|
||||
if (pipe_fuses & 1 << i)
|
||||
|
@ -1632,6 +1632,14 @@ static struct resource_funcs dcn321_res_pool_funcs = {
|
||||
.restore_mall_state = dcn32_restore_mall_state,
|
||||
};
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
{
|
||||
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
|
||||
/* DCN321 support max 4 pipes */
|
||||
value = value & 0xf;
|
||||
return value;
|
||||
}
|
||||
|
||||
|
||||
static bool dcn321_resource_construct(
|
||||
uint8_t num_virtual_links,
|
||||
@ -1674,7 +1682,7 @@ static bool dcn321_resource_construct(
|
||||
pool->base.res_cap = &res_cap_dcn321;
|
||||
/* max number of pipes for ASIC before checking for pipe fuses */
|
||||
num_pipes = pool->base.res_cap->num_timing_generator;
|
||||
pipe_fuses = REG_READ(CC_DC_PIPE_DIS);
|
||||
pipe_fuses = read_pipe_fuses(ctx);
|
||||
|
||||
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++)
|
||||
if (pipe_fuses & 1 << i)
|
||||
|
@ -917,19 +917,19 @@ void dcn20_populate_dml_writeback_from_context(struct dc *dc,
|
||||
}
|
||||
|
||||
void dcn20_fpu_set_wb_arb_params(struct mcif_arb_params *wb_arb_params,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt, int i)
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt, int i)
|
||||
{
|
||||
int k;
|
||||
int k;
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
|
||||
wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
}
|
||||
wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
|
||||
for (k = 0; k < sizeof(wb_arb_params->cli_watermark)/sizeof(wb_arb_params->cli_watermark[0]); k++) {
|
||||
wb_arb_params->cli_watermark[k] = get_wm_writeback_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
wb_arb_params->pstate_watermark[k] = get_wm_writeback_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
}
|
||||
wb_arb_params->time_per_pixel = 16.0 * 1000 / (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk / 1000); /* 4 bit fraction, ms */
|
||||
}
|
||||
|
||||
static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
|
||||
@ -1037,11 +1037,11 @@ static void dcn20_adjust_freesync_v_startup(
|
||||
*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
|
||||
}
|
||||
|
||||
void dcn20_calculate_dlg_params(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel)
|
||||
void dcn20_calculate_dlg_params(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel)
|
||||
{
|
||||
int i, pipe_idx;
|
||||
|
||||
@ -1083,6 +1083,7 @@ void dcn20_calculate_dlg_params(
|
||||
pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
|
||||
if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
|
||||
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
|
||||
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
|
||||
@ -1091,6 +1092,7 @@ void dcn20_calculate_dlg_params(
|
||||
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
|
||||
context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
|
||||
}
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
|
||||
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
|
||||
@ -1118,6 +1120,7 @@ void dcn20_calculate_dlg_params(
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
/* cstate disabled on 201 */
|
||||
if (dc->ctx->dce_version == DCN_VERSION_2_01)
|
||||
cstate_en = false;
|
||||
|
||||
@ -1201,11 +1204,10 @@ static void swizzle_to_dml_params(
|
||||
}
|
||||
}
|
||||
|
||||
int dcn20_populate_dml_pipes_from_context(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
bool fast_validate)
|
||||
int dcn20_populate_dml_pipes_from_context(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
bool fast_validate)
|
||||
{
|
||||
int pipe_cnt, i;
|
||||
bool synchronized_vblank = true;
|
||||
@ -1257,6 +1259,8 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
|
||||
pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
|
||||
|
||||
pipes[pipe_cnt].pipe.dest.use_maximum_vstartup = dc->ctx->dce_version == DCN_VERSION_2_01;
|
||||
|
||||
pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
|
||||
/* todo: rotation?*/
|
||||
pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
|
||||
@ -1296,8 +1300,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
pipes[pipe_cnt].pipe.dest.pixel_rate_mhz *= 2;
|
||||
pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
|
||||
pipes[pipe_cnt].dout.dp_lanes = 4;
|
||||
if (res_ctx->pipe_ctx[i].stream->link)
|
||||
pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_na;
|
||||
pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_na;
|
||||
pipes[pipe_cnt].dout.is_virtual = 0;
|
||||
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
|
||||
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
|
||||
@ -1357,7 +1360,6 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
pipes[pipe_cnt].dout.is_virtual = 1;
|
||||
pipes[pipe_cnt].dout.output_type = dm_dp;
|
||||
pipes[pipe_cnt].dout.dp_lanes = 4;
|
||||
pipes[pipe_cnt].dout.dp_rate = dm_dp_rate_hbr2;
|
||||
}
|
||||
|
||||
switch (res_ctx->pipe_ctx[i].stream->timing.display_color_depth) {
|
||||
@ -1507,6 +1509,7 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
|
||||
pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
|
||||
pipes[pipe_cnt].pipe.src.viewport_x_y = scl->viewport.x;
|
||||
@ -1615,13 +1618,12 @@ int dcn20_populate_dml_pipes_from_context(
|
||||
return pipe_cnt;
|
||||
}
|
||||
|
||||
void dcn20_calculate_wm(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int *out_pipe_cnt,
|
||||
int *pipe_split_from,
|
||||
int vlevel,
|
||||
bool fast_validate)
|
||||
void dcn20_calculate_wm(struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int *out_pipe_cnt,
|
||||
int *pipe_split_from,
|
||||
int vlevel,
|
||||
bool fast_validate)
|
||||
{
|
||||
int pipe_cnt, i, pipe_idx;
|
||||
|
||||
@ -1733,8 +1735,11 @@ void dcn20_calculate_wm(
|
||||
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
|
||||
}
|
||||
|
||||
void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
|
||||
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
|
||||
void dcn20_update_bounding_box(struct dc *dc,
|
||||
struct _vcs_dpi_soc_bounding_box_st *bb,
|
||||
struct pp_smu_nv_clock_table *max_clocks,
|
||||
unsigned int *uclk_states,
|
||||
unsigned int num_states)
|
||||
{
|
||||
int num_calculated_states = 0;
|
||||
int min_dcfclk = 0;
|
||||
@ -1796,9 +1801,8 @@ void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
|
||||
bb->clock_limits[num_calculated_states].state = bb->num_states;
|
||||
}
|
||||
|
||||
void dcn20_cap_soc_clocks(
|
||||
struct _vcs_dpi_soc_bounding_box_st *bb,
|
||||
struct pp_smu_nv_clock_table max_clocks)
|
||||
void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb,
|
||||
struct pp_smu_nv_clock_table max_clocks)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1954,80 +1958,80 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
|
||||
}
|
||||
|
||||
bool dcn20_validate_bandwidth_fp(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool fast_validate)
|
||||
struct dc_state *context,
|
||||
bool fast_validate)
|
||||
{
|
||||
bool voltage_supported = false;
|
||||
bool full_pstate_supported = false;
|
||||
bool dummy_pstate_supported = false;
|
||||
double p_state_latency_us;
|
||||
bool voltage_supported = false;
|
||||
bool full_pstate_supported = false;
|
||||
bool dummy_pstate_supported = false;
|
||||
double p_state_latency_us;
|
||||
|
||||
dc_assert_fp_enabled();
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
|
||||
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
|
||||
dc->debug.disable_dram_clock_change_vactive_support;
|
||||
context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
|
||||
dc->debug.enable_dram_clock_change_one_display_vactive;
|
||||
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
|
||||
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
|
||||
dc->debug.disable_dram_clock_change_vactive_support;
|
||||
context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
|
||||
dc->debug.enable_dram_clock_change_one_display_vactive;
|
||||
|
||||
/*Unsafe due to current pipe merge and split logic*/
|
||||
ASSERT(context != dc->current_state);
|
||||
/*Unsafe due to current pipe merge and split logic*/
|
||||
ASSERT(context != dc->current_state);
|
||||
|
||||
if (fast_validate) {
|
||||
return dcn20_validate_bandwidth_internal(dc, context, true);
|
||||
}
|
||||
if (fast_validate) {
|
||||
return dcn20_validate_bandwidth_internal(dc, context, true);
|
||||
}
|
||||
|
||||
// Best case, we support full UCLK switch latency
|
||||
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||||
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
// Best case, we support full UCLK switch latency
|
||||
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||||
full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
|
||||
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
|
||||
(voltage_supported && full_pstate_supported)) {
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
|
||||
goto restore_dml_state;
|
||||
}
|
||||
if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 ||
|
||||
(voltage_supported && full_pstate_supported)) {
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = full_pstate_supported;
|
||||
goto restore_dml_state;
|
||||
}
|
||||
|
||||
// Fallback: Try to only support G6 temperature read latency
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
|
||||
// Fallback: Try to only support G6 temperature read latency
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us;
|
||||
|
||||
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||||
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
|
||||
dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
|
||||
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
|
||||
goto restore_dml_state;
|
||||
}
|
||||
if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
|
||||
goto restore_dml_state;
|
||||
}
|
||||
|
||||
// ERROR: fallback is supposed to always work.
|
||||
ASSERT(false);
|
||||
// ERROR: fallback is supposed to always work.
|
||||
ASSERT(false);
|
||||
|
||||
restore_dml_state:
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
|
||||
return voltage_supported;
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
|
||||
return voltage_supported;
|
||||
}
|
||||
|
||||
void dcn20_fpu_set_wm_ranges(int i,
|
||||
struct pp_smu_wm_range_sets *ranges,
|
||||
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
|
||||
struct pp_smu_wm_range_sets *ranges,
|
||||
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
|
||||
{
|
||||
dc_assert_fp_enabled();
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
|
||||
ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
|
||||
ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
|
||||
ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
|
||||
}
|
||||
|
||||
void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v,
|
||||
int vlevel,
|
||||
int max_mpc_comb,
|
||||
int pipe_idx,
|
||||
bool is_validating_bw)
|
||||
int vlevel,
|
||||
int max_mpc_comb,
|
||||
int pipe_idx,
|
||||
bool is_validating_bw)
|
||||
{
|
||||
dc_assert_fp_enabled();
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
if (is_validating_bw)
|
||||
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
|
||||
else
|
||||
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
|
||||
if (is_validating_bw)
|
||||
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] *= 2;
|
||||
else
|
||||
v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
|
||||
}
|
||||
|
||||
int dcn21_populate_dml_pipes_from_context(struct dc *dc,
|
||||
@ -2329,7 +2333,7 @@ void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
|
||||
k++;
|
||||
}
|
||||
|
||||
memcpy(dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
|
||||
memcpy(&dcn2_1_soc.clock_limits, s, sizeof(dcn2_1_soc.clock_limits));
|
||||
|
||||
if (clk_table->num_entries) {
|
||||
dcn2_1_soc.num_states = clk_table->num_entries + 1;
|
||||
|
@ -368,7 +368,9 @@ void dcn30_fpu_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
|
||||
dc_assert_fp_enabled();
|
||||
|
||||
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
|
||||
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching ||
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us == 0)
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
|
||||
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
|
||||
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
|
||||
}
|
||||
@ -563,6 +565,20 @@ void dcn30_fpu_calculate_wm_and_dlg(
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
// WA: restrict FPO to use first non-strobe mode (NV24 BW issue)
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching &&
|
||||
dc->dml.soc.num_chans <= 4 &&
|
||||
context->bw_ctx.dml.vba.DRAMSpeed <= 1700 &&
|
||||
context->bw_ctx.dml.vba.DRAMSpeed >= 1500) {
|
||||
|
||||
for (i = 0; i < dc->dml.soc.num_states; i++) {
|
||||
if (dc->dml.soc.clock_limits[i].dram_speed_mts > 1700) {
|
||||
context->bw_ctx.dml.vba.DRAMSpeed = dc->dml.soc.clock_limits[i].dram_speed_mts;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
|
||||
|
||||
if (!pstate_en)
|
||||
|
@ -149,8 +149,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 16.5,
|
||||
.sr_enter_plus_exit_time_us = 18.5,
|
||||
.sr_exit_z8_time_us = 210.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 310.0,
|
||||
.sr_exit_z8_time_us = 268.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 393.0,
|
||||
.writeback_latency_us = 12.0,
|
||||
.dram_channel_width_bytes = 4,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 106,
|
||||
|
@ -109,7 +109,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
|
||||
{
|
||||
.state = 0,
|
||||
.dcfclk_mhz = 1564.0,
|
||||
.fabricclk_mhz = 400.0,
|
||||
.fabricclk_mhz = 2500.0,
|
||||
.dispclk_mhz = 2150.0,
|
||||
.dppclk_mhz = 2150.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
@ -117,7 +117,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
|
||||
.phyclk_d32_mhz = 625.0,
|
||||
.socclk_mhz = 1200.0,
|
||||
.dscclk_mhz = 716.667,
|
||||
.dram_speed_mts = 16000.0,
|
||||
.dram_speed_mts = 18000.0,
|
||||
.dtbclk_mhz = 1564.0,
|
||||
},
|
||||
},
|
||||
@ -148,7 +148,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
|
||||
.max_avg_fabric_bw_use_normal_percent = 60.0,
|
||||
.max_avg_dram_bw_use_normal_strobe_percent = 50.0,
|
||||
.max_avg_dram_bw_use_normal_percent = 15.0,
|
||||
.num_chans = 8,
|
||||
.num_chans = 24,
|
||||
.dram_channel_width_bytes = 2,
|
||||
.fabric_datapath_to_dcn_data_return_bytes = 64,
|
||||
.return_bus_width_bytes = 64,
|
||||
@ -1331,6 +1331,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
|
||||
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
|
||||
!= dm_dram_clock_change_unsupported;
|
||||
|
||||
/* Pstate change might not be supported by hardware, but it might be
|
||||
* possible with firmware driven vertical blank stretching.
|
||||
*/
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support |= context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
|
||||
|
||||
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
|
||||
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
|
||||
context->bw_ctx.bw.dcn.clk.ref_dtbclk_khz = context->bw_ctx.dml.vba.DTBCLKPerState[vlevel] * 1000;
|
||||
@ -2871,3 +2876,9 @@ bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint
|
||||
}
|
||||
return vactive_found;
|
||||
}
|
||||
|
||||
void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb)
|
||||
{
|
||||
dc_assert_fp_enabled();
|
||||
dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0;
|
||||
}
|
||||
|
@ -80,4 +80,6 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
|
||||
|
||||
bool dcn32_find_vactive_pipe(struct dc *dc, const struct dc_state *context, uint32_t vactive_margin_req);
|
||||
|
||||
void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb);
|
||||
|
||||
#endif
|
||||
|
@ -106,16 +106,16 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
|
||||
.clock_limits = {
|
||||
{
|
||||
.state = 0,
|
||||
.dcfclk_mhz = 1564.0,
|
||||
.fabricclk_mhz = 400.0,
|
||||
.dispclk_mhz = 2150.0,
|
||||
.dppclk_mhz = 2150.0,
|
||||
.dcfclk_mhz = 1434.0,
|
||||
.fabricclk_mhz = 2250.0,
|
||||
.dispclk_mhz = 1720.0,
|
||||
.dppclk_mhz = 1720.0,
|
||||
.phyclk_mhz = 810.0,
|
||||
.phyclk_d18_mhz = 667.0,
|
||||
.phyclk_d32_mhz = 625.0,
|
||||
.phyclk_d32_mhz = 313.0,
|
||||
.socclk_mhz = 1200.0,
|
||||
.dscclk_mhz = 716.667,
|
||||
.dram_speed_mts = 1600.0,
|
||||
.dscclk_mhz = 573.333,
|
||||
.dram_speed_mts = 16000.0,
|
||||
.dtbclk_mhz = 1564.0,
|
||||
},
|
||||
},
|
||||
@ -125,14 +125,14 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
|
||||
.sr_exit_z8_time_us = 285.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 320,
|
||||
.writeback_latency_us = 12.0,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 263,
|
||||
.round_trip_ping_latency_dcfclk_cycles = 207,
|
||||
.urgent_latency_pixel_data_only_us = 4,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4,
|
||||
.urgent_latency_vm_data_only_us = 4,
|
||||
.fclk_change_latency_us = 20,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.smn_latency_us = 2,
|
||||
.mall_allocated_for_dcn_mbytes = 64,
|
||||
.fclk_change_latency_us = 7,
|
||||
.usr_retraining_latency_us = 0,
|
||||
.smn_latency_us = 0,
|
||||
.mall_allocated_for_dcn_mbytes = 32,
|
||||
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
|
||||
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
|
||||
|
@ -1927,6 +1927,11 @@ static void disable_link_dp(struct dc_link *link,
|
||||
|
||||
dp_disable_link_phy(link, link_res, signal);
|
||||
|
||||
if (link->connector_signal == SIGNAL_TYPE_EDP) {
|
||||
if (!link->dc->config.edp_no_power_sequencing)
|
||||
link->dc->hwss.edp_power_control(link, false);
|
||||
}
|
||||
|
||||
if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
/* set the sink to SST mode after disabling the link */
|
||||
enable_mst_on_sink(link, false);
|
||||
|
@ -1596,7 +1596,10 @@ bool perform_link_training_with_retries(
|
||||
* Report and continue with eDP panel mode to
|
||||
* perform eDP link training with right settings
|
||||
*/
|
||||
cp_psp->funcs.enable_assr(cp_psp->handle, link);
|
||||
bool result;
|
||||
result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
|
||||
if (!result && link->panel_mode != DP_PANEL_MODE_EDP)
|
||||
panel_mode = DP_PANEL_MODE_DEFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -83,6 +83,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
|
||||
ASSERT(result == DC_OK);
|
||||
}
|
||||
}
|
||||
link->panel_mode = panel_mode;
|
||||
DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
|
||||
"eDP panel mode enabled: %d \n",
|
||||
link->link_index,
|
||||
|
@ -130,12 +130,13 @@ void dmub_dcn32_reset(struct dmub_srv *dmub)
|
||||
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX1_WPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX0_RPTR, 0);
|
||||
REG_WRITE(DMCUB_OUTBOX0_WPTR, 0);
|
||||
REG_WRITE(DMCUB_SCRATCH0, 0);
|
||||
}
|
||||
|
||||
void dmub_dcn32_reset_release(struct dmub_srv *dmub)
|
||||
{
|
||||
REG_WRITE(DMCUB_GPINT_DATAIN1, 0);
|
||||
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0);
|
||||
REG_WRITE(DMCUB_SCRATCH15, dmub->psp_version & 0x001100FF);
|
||||
REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1);
|
||||
|
@ -104,6 +104,7 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal)
|
||||
{
|
||||
return (signal == SIGNAL_TYPE_DISPLAY_PORT ||
|
||||
signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
|
||||
signal == SIGNAL_TYPE_VIRTUAL ||
|
||||
dc_is_hdmi_signal(signal));
|
||||
}
|
||||
|
||||
|
@ -62,8 +62,8 @@
|
||||
#define CTF_OFFSET_HOTSPOT 5
|
||||
#define CTF_OFFSET_MEM 5
|
||||
|
||||
static const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
|
||||
static const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
|
||||
extern const int pmfw_decoded_link_speed[5];
|
||||
extern const int pmfw_decoded_link_width[7];
|
||||
|
||||
#define DECODE_GEN_SPEED(gen_speed_idx) (pmfw_decoded_link_speed[gen_speed_idx])
|
||||
#define DECODE_LANE_WIDTH(lane_width_idx) (pmfw_decoded_link_width[lane_width_idx])
|
||||
|
@ -85,6 +85,9 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin");
|
||||
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
|
||||
static const int link_speed[] = {25, 50, 80, 160};
|
||||
|
||||
const int pmfw_decoded_link_speed[5] = {1, 2, 3, 4, 5};
|
||||
const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16};
|
||||
|
||||
int smu_v13_0_init_microcode(struct smu_context *smu)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
|
@ -1140,7 +1140,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
||||
|
||||
/* panel power on related mipi dsi vbt sequences */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
|
||||
msleep(intel_dsi->panel_on_delay);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
@ -763,17 +763,6 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
|
||||
gpiod_set_value_cansleep(intel_dsi->gpio_backlight, 0);
|
||||
}
|
||||
|
||||
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
|
||||
{
|
||||
struct intel_connector *connector = intel_dsi->attached_connector;
|
||||
|
||||
/* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
|
||||
if (is_vid_mode(intel_dsi) && connector->panel.vbt.dsi.seq_version >= 3)
|
||||
return;
|
||||
|
||||
msleep(msec);
|
||||
}
|
||||
|
||||
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
|
||||
|
@ -16,7 +16,6 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
|
||||
void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
|
||||
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
|
||||
enum mipi_seq seq_id);
|
||||
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
|
||||
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
|
||||
|
||||
#endif /* __INTEL_DSI_VBT_H__ */
|
||||
|
@ -111,6 +111,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int pipe_src_w = drm_rect_width(&crtc_state->pipe_src);
|
||||
int pipe_src_h = drm_rect_height(&crtc_state->pipe_src);
|
||||
int min_src_w, min_src_h, min_dst_w, min_dst_h;
|
||||
int max_src_w, max_src_h, max_dst_w, max_dst_h;
|
||||
|
||||
@ -207,6 +209,21 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The pipe scaler does not use all the bits of PIPESRC, at least
|
||||
* on the earlier platforms. So even when we're scaling a plane
|
||||
* the *pipe* source size must not be too large. For simplicity
|
||||
* we assume the limits match the scaler source size limits. Might
|
||||
* not be 100% accurate on all platforms, but good enough for now.
|
||||
*/
|
||||
if (pipe_src_w > max_src_w || pipe_src_h > max_src_h) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"scaler_user index %u.%u: pipe src size %ux%u "
|
||||
"is out of scaler range\n",
|
||||
crtc->pipe, scaler_user, pipe_src_w, pipe_src_h);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* mark this plane as a scaler user in crtc_state */
|
||||
scaler_state->scaler_users |= (1 << scaler_user);
|
||||
drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
|
||||
|
@ -737,7 +737,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
{
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
|
||||
struct intel_connector *connector = to_intel_connector(conn_state->connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
enum port port;
|
||||
@ -779,21 +778,10 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
if (!IS_GEMINILAKE(dev_priv))
|
||||
intel_dsi_prepare(encoder, pipe_config);
|
||||
|
||||
/* Give the panel time to power-on and then deassert its reset */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
|
||||
|
||||
/*
|
||||
* Give the panel time to power-on and then deassert its reset.
|
||||
* Depending on the VBT MIPI sequences version the deassert-seq
|
||||
* may contain the necessary delay, intel_dsi_msleep() will skip
|
||||
* the delay in that case. If there is no deassert-seq, then an
|
||||
* unconditional msleep is used to give the panel time to power-on.
|
||||
*/
|
||||
if (connector->panel.vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
|
||||
} else {
|
||||
msleep(intel_dsi->panel_on_delay);
|
||||
}
|
||||
msleep(intel_dsi->panel_on_delay);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
|
||||
|
||||
if (IS_GEMINILAKE(dev_priv)) {
|
||||
glk_cold_boot = glk_dsi_enable_io(encoder);
|
||||
@ -827,7 +815,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
|
||||
msleep(20); /* XXX */
|
||||
for_each_dsi_port(port, intel_dsi->ports)
|
||||
dpi_send_cmd(intel_dsi, TURN_ON, false, port);
|
||||
intel_dsi_msleep(intel_dsi, 100);
|
||||
msleep(100);
|
||||
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
|
||||
|
||||
@ -949,7 +937,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
|
||||
/* Assert reset */
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
|
||||
|
||||
intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
|
||||
msleep(intel_dsi->panel_off_delay);
|
||||
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
|
||||
|
||||
intel_dsi->panel_power_off_time = ktime_get_boottime();
|
||||
|
@ -635,9 +635,10 @@ static bool is_ver_8bit(struct intel_uc_fw_ver *ver)
|
||||
return ver->major < 0xFF && ver->minor < 0xFF && ver->patch < 0xFF;
|
||||
}
|
||||
|
||||
static bool guc_check_version_range(struct intel_uc_fw *uc_fw)
|
||||
static int guc_check_version_range(struct intel_uc_fw *uc_fw)
|
||||
{
|
||||
struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
|
||||
struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
|
||||
|
||||
/*
|
||||
* GuC version number components are defined as being 8-bits.
|
||||
@ -646,24 +647,24 @@ static bool guc_check_version_range(struct intel_uc_fw *uc_fw)
|
||||
*/
|
||||
|
||||
if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
|
||||
gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
|
||||
gt_warn(gt, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
uc_fw->file_selected.ver.major,
|
||||
uc_fw->file_selected.ver.minor,
|
||||
uc_fw->file_selected.ver.patch);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!is_ver_8bit(&guc->submission_version)) {
|
||||
gt_warn(__uc_fw_to_gt(uc_fw), "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
|
||||
gt_warn(gt, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
|
||||
intel_uc_fw_type_repr(uc_fw->type),
|
||||
guc->submission_version.major,
|
||||
guc->submission_version.minor,
|
||||
guc->submission_version.patch);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return true;
|
||||
return i915_inject_probe_error(gt->i915, -EINVAL);
|
||||
}
|
||||
|
||||
static int check_fw_header(struct intel_gt *gt,
|
||||
@ -772,8 +773,11 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC && !guc_check_version_range(uc_fw))
|
||||
goto fail;
|
||||
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC) {
|
||||
err = guc_check_version_range(uc_fw);
|
||||
if (err)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
|
||||
/* Check the file's major version was as it claimed */
|
||||
|
@ -1134,6 +1134,8 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
|
||||
static const struct intel_device_info mtl_info = {
|
||||
XE_HP_FEATURES,
|
||||
XE_LPDP_FEATURES,
|
||||
.__runtime.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
|
||||
BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
|
||||
/*
|
||||
* Real graphics IP version will be obtained from hardware GMD_ID
|
||||
* register. Value provided here is just for sanity checking.
|
||||
|
Loading…
Reference in New Issue
Block a user