mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 12:12:05 +00:00
Merge tag 'amd-drm-next-5.20-2022-07-26' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amdgpu: - VCN4 fixes - RAS support for UMC 8.10 - ACP support for jadeite platforms - NBIO HDP flush fixes - Misc spelling and grammar fixes - Runtime PM fixes - Non-DC HPD fix - Clean up amdgpu DM code - DSC fixes - Expose some additional GFXOFF data via debugfs - More FP clean up for new DCN blocks - PPC DC FP fixes - DCN 3.1.4 fixes - DC DML stack usage fixes - GMC fixes - SPM fixes for RDNA2 amdkfd: - MMU notifier fix - Mutex fix UAPI: - Add a comment about VCN4 unified queues - IP version information for UMDs Proposed mesa change: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/17411/diffs?commit_id=c8a63590dfd0d64e6e6a634dcfed993f135dd075 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220726181536.5759-1-alexander.deucher@amd.com
This commit is contained in:
commit
ee8b1ef9a6
@ -63,3 +63,44 @@ gpu_metrics
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
|
||||
:doc: gpu_metrics
|
||||
|
||||
GFXOFF
|
||||
======
|
||||
|
||||
GFXOFF is a feature found in most recent GPUs that saves power at runtime. The
|
||||
card's RLC (RunList Controller) firmware powers off the gfx engine
|
||||
dynamically when there is no workload on gfx or compute pipes. GFXOFF is on by
|
||||
default on supported GPUs.
|
||||
|
||||
Userspace can interact with GFXOFF through a debugfs interface:
|
||||
|
||||
``amdgpu_gfxoff``
|
||||
-----------------
|
||||
|
||||
Use it to enable/disable GFXOFF, and to check if it's current enabled/disabled::
|
||||
|
||||
$ xxd -l1 -p /sys/kernel/debug/dri/0/amdgpu_gfxoff
|
||||
01
|
||||
|
||||
- Write 0 to disable it, and 1 to enable it.
|
||||
- Read 0 means it's disabled, 1 it's enabled.
|
||||
|
||||
If it's enabled, that means that the GPU is free to enter into GFXOFF mode as
|
||||
needed. Disabled means that it will never enter GFXOFF mode.
|
||||
|
||||
``amdgpu_gfxoff_status``
|
||||
------------------------
|
||||
|
||||
Read it to check current GFXOFF's status of a GPU::
|
||||
|
||||
$ xxd -l1 -p /sys/kernel/debug/dri/0/amdgpu_gfxoff_status
|
||||
02
|
||||
|
||||
- 0: GPU is in GFXOFF state, the gfx engine is powered down.
|
||||
- 1: Transition out of GFXOFF state
|
||||
- 2: Not in GFXOFF state
|
||||
- 3: Transition into GFXOFF state
|
||||
|
||||
If GFXOFF is enabled, the value will be transitioning around [0, 3], always
|
||||
getting into 0 when possible. When it's disabled, it's always at 2. Returns
|
||||
``-EINVAL`` if it's not supported.
|
||||
|
@ -4,7 +4,7 @@ config DRM_AMDGPU_SI
|
||||
depends on DRM_AMDGPU
|
||||
help
|
||||
Choose this option if you want to enable experimental support
|
||||
for SI asics.
|
||||
for SI (Southern Islands) asics.
|
||||
|
||||
SI is already supported in radeon. Experimental support for SI
|
||||
in amdgpu will be disabled by default and is still provided by
|
||||
@ -16,7 +16,8 @@ config DRM_AMDGPU_CIK
|
||||
bool "Enable amdgpu support for CIK parts"
|
||||
depends on DRM_AMDGPU
|
||||
help
|
||||
Choose this option if you want to enable support for CIK asics.
|
||||
Choose this option if you want to enable support for CIK (Sea
|
||||
Islands) asics.
|
||||
|
||||
CIK is already supported in radeon. Support for CIK in amdgpu
|
||||
will be disabled by default and is still provided by radeon.
|
||||
|
@ -93,7 +93,7 @@ amdgpu-y += \
|
||||
|
||||
# add UMC block
|
||||
amdgpu-y += \
|
||||
umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o
|
||||
umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o
|
||||
|
||||
# add IH block
|
||||
amdgpu-y += \
|
||||
|
@ -197,6 +197,7 @@ extern uint amdgpu_smu_memory_pool_size;
|
||||
extern int amdgpu_smu_pptable_id;
|
||||
extern uint amdgpu_dc_feature_mask;
|
||||
extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dc_visual_confirm;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
extern int amdgpu_backlight;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
@ -1011,7 +1012,6 @@ struct amdgpu_device {
|
||||
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
|
||||
|
||||
/* enable runtime pm on the device */
|
||||
bool runpm;
|
||||
bool in_runpm;
|
||||
bool has_pr3;
|
||||
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <sound/designware_i2s.h>
|
||||
#include <sound/pcm.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "atom.h"
|
||||
@ -36,6 +38,7 @@
|
||||
|
||||
#include "acp_gfx_if.h"
|
||||
|
||||
#define ST_JADEITE 1
|
||||
#define ACP_TILE_ON_MASK 0x03
|
||||
#define ACP_TILE_OFF_MASK 0x02
|
||||
#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
|
||||
@ -85,6 +88,8 @@
|
||||
#define ACP_DEVS 4
|
||||
#define ACP_SRC_ID 162
|
||||
|
||||
static unsigned long acp_machine_id;
|
||||
|
||||
enum {
|
||||
ACP_TILE_P1 = 0,
|
||||
ACP_TILE_P2,
|
||||
@ -128,16 +133,14 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
apd = container_of(genpd, struct acp_pm_domain, gpd);
|
||||
if (apd != NULL) {
|
||||
adev = apd->adev;
|
||||
adev = apd->adev;
|
||||
/* call smu to POWER GATE ACP block
|
||||
* smu will
|
||||
* 1. turn off the acp clock
|
||||
* 2. power off the acp tiles
|
||||
* 3. check and enter ulv state
|
||||
*/
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
|
||||
}
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -147,16 +150,14 @@ static int acp_poweron(struct generic_pm_domain *genpd)
|
||||
struct amdgpu_device *adev;
|
||||
|
||||
apd = container_of(genpd, struct acp_pm_domain, gpd);
|
||||
if (apd != NULL) {
|
||||
adev = apd->adev;
|
||||
adev = apd->adev;
|
||||
/* call smu to UNGATE ACP block
|
||||
* smu will
|
||||
* 1. exit ulv
|
||||
* 2. turn on acp clock
|
||||
* 3. power on acp tiles
|
||||
*/
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
|
||||
}
|
||||
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -184,6 +185,37 @@ static int acp_genpd_remove_device(struct device *dev, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int acp_quirk_cb(const struct dmi_system_id *id)
|
||||
{
|
||||
acp_machine_id = ST_JADEITE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id acp_quirk_table[] = {
|
||||
{
|
||||
.callback = acp_quirk_cb,
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMD"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jadeite"),
|
||||
}
|
||||
},
|
||||
{
|
||||
.callback = acp_quirk_cb,
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "IP3 Technology CO.,Ltd."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN1D"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = acp_quirk_cb,
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Standard"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN10"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
/**
|
||||
* acp_hw_init - start and test ACP block
|
||||
*
|
||||
@ -193,7 +225,7 @@ static int acp_genpd_remove_device(struct device *dev, void *data)
|
||||
static int acp_hw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
uint64_t acp_base;
|
||||
u64 acp_base;
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct i2s_platform_data *i2s_pdata = NULL;
|
||||
@ -220,141 +252,210 @@ static int acp_hw_init(void *handle)
|
||||
return -EINVAL;
|
||||
|
||||
acp_base = adev->rmmio_base;
|
||||
|
||||
|
||||
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
|
||||
if (adev->acp.acp_genpd == NULL)
|
||||
if (!adev->acp.acp_genpd)
|
||||
return -ENOMEM;
|
||||
|
||||
adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
|
||||
adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
|
||||
adev->acp.acp_genpd->gpd.power_on = acp_poweron;
|
||||
|
||||
|
||||
adev->acp.acp_genpd->adev = adev;
|
||||
|
||||
pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
|
||||
dmi_check_system(acp_quirk_table);
|
||||
switch (acp_machine_id) {
|
||||
case ST_JADEITE:
|
||||
{
|
||||
adev->acp.acp_cell = kcalloc(2, sizeof(struct mfd_cell),
|
||||
GFP_KERNEL);
|
||||
if (!adev->acp.acp_cell) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
|
||||
GFP_KERNEL);
|
||||
adev->acp.acp_res = kcalloc(3, sizeof(struct resource), GFP_KERNEL);
|
||||
if (!adev->acp.acp_res) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
if (adev->acp.acp_cell == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
i2s_pdata = kcalloc(1, sizeof(struct i2s_platform_data), GFP_KERNEL);
|
||||
if (!i2s_pdata) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
|
||||
if (adev->acp.acp_res == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
|
||||
if (i2s_pdata == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
}
|
||||
i2s_pdata[0].cap = DWC_I2S_PLAY;
|
||||
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
|
||||
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1 |
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1;
|
||||
}
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
i2s_pdata[0].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
|
||||
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
|
||||
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
|
||||
|
||||
i2s_pdata[1].cap = DWC_I2S_RECORD;
|
||||
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
|
||||
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
|
||||
adev->acp.acp_res[0].name = "acp2x_dma";
|
||||
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[0].start = acp_base;
|
||||
adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
|
||||
|
||||
i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
adev->acp.acp_res[1].name = "acp2x_dw_i2s_play_cap";
|
||||
adev->acp.acp_res[1].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[1].start = acp_base + ACP_I2S_CAP_REGS_START;
|
||||
adev->acp.acp_res[1].end = acp_base + ACP_I2S_CAP_REGS_END;
|
||||
|
||||
adev->acp.acp_res[2].name = "acp2x_dma_irq";
|
||||
adev->acp.acp_res[2].flags = IORESOURCE_IRQ;
|
||||
adev->acp.acp_res[2].start = amdgpu_irq_create_mapping(adev, 162);
|
||||
adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
|
||||
|
||||
adev->acp.acp_cell[0].name = "acp_audio_dma";
|
||||
adev->acp.acp_cell[0].num_resources = 3;
|
||||
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
|
||||
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
|
||||
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
|
||||
|
||||
adev->acp.acp_cell[1].name = "designware-i2s";
|
||||
adev->acp.acp_cell[1].num_resources = 1;
|
||||
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
|
||||
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
|
||||
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
|
||||
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
|
||||
if (r)
|
||||
goto failure;
|
||||
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
|
||||
acp_genpd_add_device);
|
||||
if (r)
|
||||
goto failure;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
|
||||
GFP_KERNEL);
|
||||
|
||||
i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
|
||||
i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
|
||||
i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
|
||||
if (!adev->acp.acp_cell) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
adev->acp.acp_res[0].name = "acp2x_dma";
|
||||
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[0].start = acp_base;
|
||||
adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
|
||||
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
|
||||
if (!adev->acp.acp_res) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
|
||||
adev->acp.acp_res[1].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
|
||||
adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
|
||||
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
|
||||
if (!i2s_pdata) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
|
||||
adev->acp.acp_res[2].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
|
||||
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
}
|
||||
i2s_pdata[0].cap = DWC_I2S_PLAY;
|
||||
i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
|
||||
i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1 |
|
||||
DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
|
||||
DW_I2S_QUIRK_COMP_PARAM1;
|
||||
}
|
||||
|
||||
adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
|
||||
adev->acp.acp_res[3].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
|
||||
adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
|
||||
i2s_pdata[1].cap = DWC_I2S_RECORD;
|
||||
i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
|
||||
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
|
||||
|
||||
adev->acp.acp_res[4].name = "acp2x_dma_irq";
|
||||
adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
|
||||
adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
|
||||
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
|
||||
i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
adev->acp.acp_cell[0].name = "acp_audio_dma";
|
||||
adev->acp.acp_cell[0].num_resources = 5;
|
||||
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
|
||||
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
|
||||
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
|
||||
i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
|
||||
i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
|
||||
i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
|
||||
i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
|
||||
|
||||
adev->acp.acp_cell[1].name = "designware-i2s";
|
||||
adev->acp.acp_cell[1].num_resources = 1;
|
||||
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
|
||||
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
|
||||
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
|
||||
i2s_pdata[3].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_STONEY:
|
||||
i2s_pdata[3].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
adev->acp.acp_res[0].name = "acp2x_dma";
|
||||
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[0].start = acp_base;
|
||||
adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
|
||||
|
||||
adev->acp.acp_cell[2].name = "designware-i2s";
|
||||
adev->acp.acp_cell[2].num_resources = 1;
|
||||
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
|
||||
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
|
||||
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
|
||||
adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
|
||||
adev->acp.acp_res[1].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
|
||||
adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
|
||||
|
||||
adev->acp.acp_cell[3].name = "designware-i2s";
|
||||
adev->acp.acp_cell[3].num_resources = 1;
|
||||
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
|
||||
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
|
||||
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
|
||||
adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
|
||||
adev->acp.acp_res[2].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
|
||||
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
|
||||
|
||||
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
|
||||
ACP_DEVS);
|
||||
if (r)
|
||||
goto failure;
|
||||
adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
|
||||
adev->acp.acp_res[3].flags = IORESOURCE_MEM;
|
||||
adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
|
||||
adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
|
||||
|
||||
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
|
||||
acp_genpd_add_device);
|
||||
if (r)
|
||||
goto failure;
|
||||
adev->acp.acp_res[4].name = "acp2x_dma_irq";
|
||||
adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
|
||||
adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
|
||||
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
|
||||
|
||||
adev->acp.acp_cell[0].name = "acp_audio_dma";
|
||||
adev->acp.acp_cell[0].num_resources = 5;
|
||||
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
|
||||
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
|
||||
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
|
||||
|
||||
adev->acp.acp_cell[1].name = "designware-i2s";
|
||||
adev->acp.acp_cell[1].num_resources = 1;
|
||||
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
|
||||
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
|
||||
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
|
||||
|
||||
adev->acp.acp_cell[2].name = "designware-i2s";
|
||||
adev->acp.acp_cell[2].num_resources = 1;
|
||||
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
|
||||
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
|
||||
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
|
||||
|
||||
adev->acp.acp_cell[3].name = "designware-i2s";
|
||||
adev->acp.acp_cell[3].num_resources = 1;
|
||||
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
|
||||
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
|
||||
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
|
||||
|
||||
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
|
||||
acp_genpd_add_device);
|
||||
if (r)
|
||||
goto failure;
|
||||
}
|
||||
|
||||
/* Assert Soft reset of ACP */
|
||||
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
|
||||
@ -546,8 +647,7 @@ static const struct amd_ip_funcs acp_ip_funcs = {
|
||||
.set_powergating_state = acp_set_powergating_state,
|
||||
};
|
||||
|
||||
const struct amdgpu_ip_block_version acp_ip_block =
|
||||
{
|
||||
const struct amdgpu_ip_block_version acp_ip_block = {
|
||||
.type = AMD_IP_BLOCK_TYPE_ACP,
|
||||
.major = 2,
|
||||
.minor = 2,
|
||||
|
@ -115,21 +115,12 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
|
||||
* compromise that should work in most cases without reserving too
|
||||
* much memory for page tables unnecessarily (factor 16K, >> 14).
|
||||
*/
|
||||
|
||||
#define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
|
||||
|
||||
static size_t amdgpu_amdkfd_acc_size(uint64_t size)
|
||||
{
|
||||
size >>= PAGE_SHIFT;
|
||||
size *= sizeof(dma_addr_t) + sizeof(void *);
|
||||
|
||||
return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
|
||||
__roundup_pow_of_two(sizeof(struct ttm_tt)) +
|
||||
PAGE_ALIGN(size);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
|
||||
* of buffer including any reserved for control structures
|
||||
* of buffer.
|
||||
*
|
||||
* @adev: Device to which allocated BO belongs to
|
||||
* @size: Size of buffer, in bytes, encapsulated by B0. This should be
|
||||
@ -143,19 +134,16 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
{
|
||||
uint64_t reserved_for_pt =
|
||||
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
|
||||
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
|
||||
size_t system_mem_needed, ttm_mem_needed, vram_needed;
|
||||
int ret = 0;
|
||||
|
||||
acc_size = amdgpu_amdkfd_acc_size(size);
|
||||
|
||||
system_mem_needed = 0;
|
||||
ttm_mem_needed = 0;
|
||||
vram_needed = 0;
|
||||
if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
|
||||
system_mem_needed = acc_size + size;
|
||||
ttm_mem_needed = acc_size + size;
|
||||
system_mem_needed = size;
|
||||
ttm_mem_needed = size;
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
system_mem_needed = acc_size;
|
||||
ttm_mem_needed = acc_size;
|
||||
|
||||
/*
|
||||
* Conservatively round up the allocation requirement to 2 MB
|
||||
* to avoid fragmentation caused by 4K allocations in the tail
|
||||
@ -163,14 +151,10 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
*/
|
||||
vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
system_mem_needed = acc_size + size;
|
||||
ttm_mem_needed = acc_size;
|
||||
} else if (alloc_flag &
|
||||
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
|
||||
system_mem_needed = acc_size;
|
||||
ttm_mem_needed = acc_size;
|
||||
} else {
|
||||
system_mem_needed = size;
|
||||
} else if (!(alloc_flag &
|
||||
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
|
||||
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -208,28 +192,18 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
static void unreserve_mem_limit(struct amdgpu_device *adev,
|
||||
uint64_t size, u32 alloc_flag)
|
||||
{
|
||||
size_t acc_size;
|
||||
|
||||
acc_size = amdgpu_amdkfd_acc_size(size);
|
||||
|
||||
spin_lock(&kfd_mem_limit.mem_limit_lock);
|
||||
|
||||
if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
|
||||
kfd_mem_limit.system_mem_used -= (acc_size + size);
|
||||
kfd_mem_limit.ttm_mem_used -= (acc_size + size);
|
||||
kfd_mem_limit.system_mem_used -= size;
|
||||
kfd_mem_limit.ttm_mem_used -= size;
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
kfd_mem_limit.system_mem_used -= acc_size;
|
||||
kfd_mem_limit.ttm_mem_used -= acc_size;
|
||||
adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
kfd_mem_limit.system_mem_used -= (acc_size + size);
|
||||
kfd_mem_limit.ttm_mem_used -= acc_size;
|
||||
} else if (alloc_flag &
|
||||
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
|
||||
kfd_mem_limit.system_mem_used -= acc_size;
|
||||
kfd_mem_limit.ttm_mem_used -= acc_size;
|
||||
} else {
|
||||
kfd_mem_limit.system_mem_used -= size;
|
||||
} else if (!(alloc_flag &
|
||||
(KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
|
||||
KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
|
||||
pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
|
||||
goto release;
|
||||
}
|
||||
@ -436,45 +410,42 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_ARCTURUS:
|
||||
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
if (bo_adev == adev)
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
|
||||
else
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
} else {
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
}
|
||||
break;
|
||||
case CHIP_ALDEBARAN:
|
||||
if (coherent && uncached) {
|
||||
if (adev->gmc.xgmi.connected_to_cpu ||
|
||||
!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
|
||||
snoop = true;
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
} else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
if (bo_adev == adev) {
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
if (uncached)
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
else if (coherent)
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_CC;
|
||||
else
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_RW;
|
||||
if (adev->asic_type == CHIP_ALDEBARAN &&
|
||||
adev->gmc.xgmi.connected_to_cpu)
|
||||
snoop = true;
|
||||
} else {
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
if (uncached || coherent)
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
else
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_NC;
|
||||
if (amdgpu_xgmi_same_hive(adev, bo_adev))
|
||||
snoop = true;
|
||||
}
|
||||
} else {
|
||||
if (uncached || coherent)
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
else
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_NC;
|
||||
snoop = true;
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
mapping_flags |= coherent ?
|
||||
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
|
||||
if (uncached || coherent)
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_UC;
|
||||
else
|
||||
mapping_flags |= AMDGPU_VM_MTYPE_NC;
|
||||
|
||||
if (!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
|
||||
snoop = true;
|
||||
}
|
||||
|
||||
pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
|
||||
|
@ -40,7 +40,7 @@ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
|
||||
rhead);
|
||||
|
||||
mutex_destroy(&list->bo_list_mutex);
|
||||
kvfree(list);
|
||||
}
|
||||
|
||||
@ -136,6 +136,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
|
||||
|
||||
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
|
||||
|
||||
mutex_init(&list->bo_list_mutex);
|
||||
*result = list;
|
||||
return 0;
|
||||
|
||||
|
@ -47,6 +47,10 @@ struct amdgpu_bo_list {
|
||||
struct amdgpu_bo *oa_obj;
|
||||
unsigned first_userptr;
|
||||
unsigned num_entries;
|
||||
|
||||
/* Protect access during command submission.
|
||||
*/
|
||||
struct mutex bo_list_mutex;
|
||||
};
|
||||
|
||||
int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
|
||||
|
@ -519,6 +519,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
return r;
|
||||
}
|
||||
|
||||
mutex_lock(&p->bo_list->bo_list_mutex);
|
||||
|
||||
/* One for TTM and one for the CS job */
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list)
|
||||
e->tv.num_shared = 2;
|
||||
@ -651,6 +653,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
}
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
@ -690,9 +693,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (error && backoff)
|
||||
if (error && backoff) {
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
mutex_unlock(&parser->bo_list->bo_list_mutex);
|
||||
}
|
||||
|
||||
for (i = 0; i < parser->num_post_deps; i++) {
|
||||
drm_syncobj_put(parser->post_deps[i].syncobj);
|
||||
@ -832,12 +837,16 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
if (r)
|
||||
if (r) {
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
|
||||
if (r)
|
||||
if (r) {
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_vm_handle_moved(adev, vm);
|
||||
@ -1278,6 +1287,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -272,32 +272,6 @@ static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
|
||||
return res;
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
|
||||
struct drm_file *filp, struct amdgpu_ctx *ctx)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_ctx_priority_permit(filp, priority);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
kref_init(&ctx->refcount);
|
||||
ctx->mgr = mgr;
|
||||
spin_lock_init(&ctx->ring_lock);
|
||||
mutex_init(&ctx->lock);
|
||||
|
||||
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
|
||||
ctx->reset_counter_query = ctx->reset_counter;
|
||||
ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
|
||||
ctx->init_priority = priority;
|
||||
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
|
||||
ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
|
||||
u32 *stable_pstate)
|
||||
{
|
||||
@ -326,6 +300,38 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
|
||||
struct drm_file *filp, struct amdgpu_ctx *ctx)
|
||||
{
|
||||
u32 current_stable_pstate;
|
||||
int r;
|
||||
|
||||
r = amdgpu_ctx_priority_permit(filp, priority);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
kref_init(&ctx->refcount);
|
||||
ctx->mgr = mgr;
|
||||
spin_lock_init(&ctx->ring_lock);
|
||||
mutex_init(&ctx->lock);
|
||||
|
||||
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
|
||||
ctx->reset_counter_query = ctx->reset_counter;
|
||||
ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
|
||||
ctx->init_priority = priority;
|
||||
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
|
||||
|
||||
r = amdgpu_ctx_get_stable_pstate(ctx, ¤t_stable_pstate);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ctx->stable_pstate = current_stable_pstate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
|
||||
u32 stable_pstate)
|
||||
{
|
||||
@ -397,7 +403,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
|
||||
}
|
||||
|
||||
if (drm_dev_enter(&adev->ddev, &idx)) {
|
||||
amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
|
||||
amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
|
||||
drm_dev_exit(idx);
|
||||
}
|
||||
|
||||
|
@ -1117,13 +1117,50 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
|
||||
}
|
||||
|
||||
while (size) {
|
||||
uint32_t value;
|
||||
u32 value = adev->gfx.gfx_off_state;
|
||||
|
||||
r = put_user(value, (u32 *)buf);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
result += 4;
|
||||
buf += 4;
|
||||
*pos += 4;
|
||||
size -= 4;
|
||||
}
|
||||
|
||||
r = result;
|
||||
out:
|
||||
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf,
|
||||
size_t size, loff_t *pos)
|
||||
{
|
||||
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||
ssize_t result = 0;
|
||||
int r;
|
||||
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
while (size) {
|
||||
u32 value;
|
||||
|
||||
r = amdgpu_get_gfx_off_status(adev, &value);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
r = put_user(value, (uint32_t *)buf);
|
||||
r = put_user(value, (u32 *)buf);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
@ -1206,6 +1243,12 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.read = amdgpu_debugfs_gfxoff_status_read,
|
||||
.llseek = default_llseek
|
||||
};
|
||||
|
||||
static const struct file_operations *debugfs_regs[] = {
|
||||
&amdgpu_debugfs_regs_fops,
|
||||
&amdgpu_debugfs_regs2_fops,
|
||||
@ -1217,6 +1260,7 @@ static const struct file_operations *debugfs_regs[] = {
|
||||
&amdgpu_debugfs_wave_fops,
|
||||
&amdgpu_debugfs_gpr_fops,
|
||||
&amdgpu_debugfs_gfxoff_fops,
|
||||
&amdgpu_debugfs_gfxoff_status_fops,
|
||||
};
|
||||
|
||||
static const char *debugfs_regs_names[] = {
|
||||
@ -1230,6 +1274,7 @@ static const char *debugfs_regs_names[] = {
|
||||
"amdgpu_wave",
|
||||
"amdgpu_gpr",
|
||||
"amdgpu_gfxoff",
|
||||
"amdgpu_gfxoff_status",
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -5230,8 +5230,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
*
|
||||
* job->base holds a reference to parent fence
|
||||
*/
|
||||
if (job && (job->hw_fence.ops != NULL) &&
|
||||
dma_fence_is_signaled(&job->hw_fence)) {
|
||||
if (job && dma_fence_is_signaled(&job->hw_fence)) {
|
||||
job_signaled = true;
|
||||
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
|
||||
goto skip_hw_reset;
|
||||
|
@ -1716,6 +1716,7 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
|
||||
case IP_VERSION(3, 0, 1):
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
case IP_VERSION(3, 1, 4):
|
||||
case IP_VERSION(3, 1, 5):
|
||||
case IP_VERSION(3, 1, 6):
|
||||
case IP_VERSION(3, 2, 0):
|
||||
@ -2206,12 +2207,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
break;
|
||||
case IP_VERSION(7, 4, 0):
|
||||
case IP_VERSION(7, 4, 1):
|
||||
adev->nbio.funcs = &nbio_v7_4_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
|
||||
break;
|
||||
case IP_VERSION(7, 4, 4):
|
||||
adev->nbio.funcs = &nbio_v7_4_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
|
||||
break;
|
||||
case IP_VERSION(7, 2, 0):
|
||||
case IP_VERSION(7, 2, 1):
|
||||
@ -2225,15 +2223,12 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
|
||||
case IP_VERSION(2, 3, 0):
|
||||
case IP_VERSION(2, 3, 1):
|
||||
case IP_VERSION(2, 3, 2):
|
||||
adev->nbio.funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
|
||||
break;
|
||||
case IP_VERSION(3, 3, 0):
|
||||
case IP_VERSION(3, 3, 1):
|
||||
case IP_VERSION(3, 3, 2):
|
||||
case IP_VERSION(3, 3, 3):
|
||||
adev->nbio.funcs = &nbio_v2_3_funcs;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc;
|
||||
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
|
||||
break;
|
||||
case IP_VERSION(4, 3, 0):
|
||||
case IP_VERSION(4, 3, 1):
|
||||
|
@ -80,7 +80,7 @@
|
||||
* - 3.24.0 - Add high priority compute support for gfx9
|
||||
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
|
||||
* - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
|
||||
* - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
|
||||
* - 3.27.0 - Add new chunk to AMDGPU_CS to enable BO_LIST creation.
|
||||
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
|
||||
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
|
||||
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
|
||||
@ -100,10 +100,11 @@
|
||||
* - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
|
||||
* - 3.45.0 - Add context ioctl stable pstate interface
|
||||
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
|
||||
* * 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
|
||||
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
|
||||
* - 3.48.0 - Add IP discovery version info to HW INFO
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 47
|
||||
#define KMS_DRIVER_MINOR 48
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit;
|
||||
@ -167,6 +168,7 @@ int amdgpu_smu_pptable_id = -1;
|
||||
*/
|
||||
uint amdgpu_dc_feature_mask = 2;
|
||||
uint amdgpu_dc_debug_mask;
|
||||
uint amdgpu_dc_visual_confirm;
|
||||
int amdgpu_async_gfx_ring = 1;
|
||||
int amdgpu_mcbp;
|
||||
int amdgpu_discovery = -1;
|
||||
@ -827,6 +829,9 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
|
||||
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
|
||||
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
|
||||
|
||||
MODULE_PARM_DESC(visualconfirm, "Visual confirm (0 = off (default), 1 = MPO, 5 = PSR)");
|
||||
module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: abmlevel (uint)
|
||||
* Override the default ABM (Adaptive Backlight Management) level used for DC
|
||||
@ -2121,7 +2126,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
|
||||
if (ret)
|
||||
DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
|
||||
|
||||
if (adev->runpm) {
|
||||
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
|
||||
/* only need to skip on ATPX */
|
||||
if (amdgpu_device_supports_px(ddev))
|
||||
dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
|
||||
@ -2178,7 +2183,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
||||
|
||||
drm_dev_unplug(dev);
|
||||
|
||||
if (adev->runpm) {
|
||||
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
pm_runtime_forbid(dev->dev);
|
||||
}
|
||||
@ -2461,7 +2466,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int ret, i;
|
||||
|
||||
if (!adev->runpm) {
|
||||
if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -2530,7 +2535,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
|
||||
struct amdgpu_device *adev = drm_to_adev(drm_dev);
|
||||
int ret;
|
||||
|
||||
if (!adev->runpm)
|
||||
if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
|
||||
return -EINVAL;
|
||||
|
||||
/* Avoids registers access if device is physically gone */
|
||||
@ -2574,7 +2579,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
|
||||
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
|
||||
int ret = 1;
|
||||
|
||||
if (!adev->runpm) {
|
||||
if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
|
||||
pm_runtime_forbid(dev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@ -47,7 +47,7 @@
|
||||
* for GPU/CPU synchronization. When the fence is written,
|
||||
* it is expected that all buffers associated with that fence
|
||||
* are no longer in use by the associated ring on the GPU and
|
||||
* that the the relevant GPU caches have been flushed.
|
||||
* that the relevant GPU caches have been flushed.
|
||||
*/
|
||||
|
||||
struct amdgpu_fence {
|
||||
|
@ -133,16 +133,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
|
||||
struct dma_fence *f;
|
||||
struct dma_fence *hw_fence;
|
||||
unsigned i;
|
||||
|
||||
if (job->hw_fence.ops == NULL)
|
||||
hw_fence = job->external_hw_fence;
|
||||
else
|
||||
hw_fence = &job->hw_fence;
|
||||
|
||||
/* use sched fence if available */
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
|
||||
f = job->base.s_fence ? &job->base.s_fence->finished : &job->hw_fence;
|
||||
for (i = 0; i < job->num_ibs; ++i)
|
||||
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
|
||||
}
|
||||
@ -156,11 +150,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (job->hw_fence.ops != NULL)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
else
|
||||
kfree(job);
|
||||
dma_fence_put(&job->hw_fence);
|
||||
}
|
||||
|
||||
void amdgpu_job_free(struct amdgpu_job *job)
|
||||
@ -169,11 +159,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
|
||||
/* only put the hw fence if has embedded fence */
|
||||
if (job->hw_fence.ops != NULL)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
else
|
||||
kfree(job);
|
||||
dma_fence_put(&job->hw_fence);
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
@ -203,15 +189,12 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
int r;
|
||||
|
||||
job->base.sched = &ring->sched;
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
|
||||
/* record external_hw_fence for direct submit */
|
||||
job->external_hw_fence = dma_fence_get(*fence);
|
||||
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
|
||||
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_job_free(job);
|
||||
dma_fence_put(*fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,6 @@ struct amdgpu_job {
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct dma_fence hw_fence;
|
||||
struct dma_fence *external_hw_fence;
|
||||
uint32_t preamble_status;
|
||||
uint32_t preemption_status;
|
||||
bool vm_needs_flush;
|
||||
|
@ -43,17 +43,6 @@
|
||||
#include "amdgpu_display.h"
|
||||
#include "amdgpu_ras.h"
|
||||
|
||||
static void amdgpu_runtime_pm_quirk(struct amdgpu_device *adev)
|
||||
{
|
||||
/*
|
||||
* Add below quirk on several sienna_cichlid cards to disable
|
||||
* runtime pm to fix EMI failures.
|
||||
*/
|
||||
if (((adev->pdev->device == 0x73A1) && (adev->pdev->revision == 0x00)) ||
|
||||
((adev->pdev->device == 0x73BF) && (adev->pdev->revision == 0xCF)))
|
||||
adev->runpm = false;
|
||||
}
|
||||
|
||||
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_gpu_instance *gpu_instance;
|
||||
@ -158,37 +147,36 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
|
||||
goto out;
|
||||
}
|
||||
|
||||
adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
|
||||
if (amdgpu_device_supports_px(dev) &&
|
||||
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
|
||||
adev->runpm = true;
|
||||
(amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */
|
||||
adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
|
||||
dev_info(adev->dev, "Using ATPX for runtime pm\n");
|
||||
} else if (amdgpu_device_supports_boco(dev) &&
|
||||
(amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
|
||||
adev->runpm = true;
|
||||
(amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */
|
||||
adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
|
||||
dev_info(adev->dev, "Using BOCO for runtime pm\n");
|
||||
} else if (amdgpu_device_supports_baco(dev) &&
|
||||
(amdgpu_runtime_pm != 0)) {
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA20:
|
||||
case CHIP_ARCTURUS:
|
||||
/* enable runpm if runpm=1 */
|
||||
/* enable BACO as runpm mode if runpm=1 */
|
||||
if (amdgpu_runtime_pm > 0)
|
||||
adev->runpm = true;
|
||||
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
|
||||
break;
|
||||
case CHIP_VEGA10:
|
||||
/* turn runpm on if noretry=0 */
|
||||
/* enable BACO as runpm mode if noretry=0 */
|
||||
if (!adev->gmc.noretry)
|
||||
adev->runpm = true;
|
||||
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
|
||||
break;
|
||||
default:
|
||||
/* enable runpm on CI+ */
|
||||
adev->runpm = true;
|
||||
/* enable BACO as runpm mode on CI+ */
|
||||
adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
|
||||
break;
|
||||
}
|
||||
|
||||
amdgpu_runtime_pm_quirk(adev);
|
||||
|
||||
if (adev->runpm)
|
||||
if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)
|
||||
dev_info(adev->dev, "Using BACO for runtime pm\n");
|
||||
}
|
||||
|
||||
@ -473,6 +461,30 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
|
||||
|
||||
result->hw_ip_version_major = adev->ip_blocks[i].version->major;
|
||||
result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
|
||||
|
||||
if (adev->asic_type >= CHIP_VEGA10) {
|
||||
switch (type) {
|
||||
case AMD_IP_BLOCK_TYPE_GFX:
|
||||
result->ip_discovery_version = adev->ip_versions[GC_HWIP][0];
|
||||
break;
|
||||
case AMD_IP_BLOCK_TYPE_SDMA:
|
||||
result->ip_discovery_version = adev->ip_versions[SDMA0_HWIP][0];
|
||||
break;
|
||||
case AMD_IP_BLOCK_TYPE_UVD:
|
||||
case AMD_IP_BLOCK_TYPE_VCN:
|
||||
case AMD_IP_BLOCK_TYPE_JPEG:
|
||||
result->ip_discovery_version = adev->ip_versions[UVD_HWIP][0];
|
||||
break;
|
||||
case AMD_IP_BLOCK_TYPE_VCE:
|
||||
result->ip_discovery_version = adev->ip_versions[VCE_HWIP][0];
|
||||
break;
|
||||
default:
|
||||
result->ip_discovery_version = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
result->ip_discovery_version = 0;
|
||||
}
|
||||
result->capabilities_flags = 0;
|
||||
result->available_rings = (1 << num_rings) - 1;
|
||||
result->ib_start_alignment = ib_start_alignment;
|
||||
|
@ -2168,6 +2168,21 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
|
||||
case AMDGPU_UCODE_ID_RLC_DRAM:
|
||||
*type = GFX_FW_TYPE_RLC_DRAM_BOOT;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
|
||||
*type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
|
||||
*type = GFX_FW_TYPE_SE0_TAP_DELAYS;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
|
||||
*type = GFX_FW_TYPE_SE1_TAP_DELAYS;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
|
||||
*type = GFX_FW_TYPE_SE2_TAP_DELAYS;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
|
||||
*type = GFX_FW_TYPE_SE3_TAP_DELAYS;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SMC:
|
||||
*type = GFX_FW_TYPE_SMU;
|
||||
break;
|
||||
@ -2348,6 +2363,13 @@ static int psp_load_smu_fw(struct psp_context *psp)
|
||||
&adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
|
||||
struct amdgpu_ras *ras = psp->ras_context.ras;
|
||||
|
||||
/*
|
||||
* Skip SMU FW reloading in case of using BACO for runpm only,
|
||||
* as SMU is always alive.
|
||||
*/
|
||||
if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO))
|
||||
return 0;
|
||||
|
||||
if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
|
@ -69,8 +69,8 @@ enum psp_bootloader_cmd {
|
||||
PSP_BL__LOAD_SOSDRV = 0x20000,
|
||||
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
|
||||
PSP_BL__LOAD_SOCDRV = 0xB0000,
|
||||
PSP_BL__LOAD_INTFDRV = 0xC0000,
|
||||
PSP_BL__LOAD_DBGDRV = 0xD0000,
|
||||
PSP_BL__LOAD_DBGDRV = 0xC0000,
|
||||
PSP_BL__LOAD_INTFDRV = 0xD0000,
|
||||
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
|
||||
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
|
||||
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
|
||||
|
@ -222,6 +222,11 @@ struct amdgpu_rlc {
|
||||
u32 rlc_dram_ucode_size_bytes;
|
||||
u32 rlcp_ucode_size_bytes;
|
||||
u32 rlcv_ucode_size_bytes;
|
||||
u32 global_tap_delays_ucode_size_bytes;
|
||||
u32 se0_tap_delays_ucode_size_bytes;
|
||||
u32 se1_tap_delays_ucode_size_bytes;
|
||||
u32 se2_tap_delays_ucode_size_bytes;
|
||||
u32 se3_tap_delays_ucode_size_bytes;
|
||||
|
||||
u32 *register_list_format;
|
||||
u32 *register_restore;
|
||||
@ -232,6 +237,11 @@ struct amdgpu_rlc {
|
||||
u8 *rlc_dram_ucode;
|
||||
u8 *rlcp_ucode;
|
||||
u8 *rlcv_ucode;
|
||||
u8 *global_tap_delays_ucode;
|
||||
u8 *se0_tap_delays_ucode;
|
||||
u8 *se1_tap_delays_ucode;
|
||||
u8 *se2_tap_delays_ucode;
|
||||
u8 *se3_tap_delays_ucode;
|
||||
|
||||
bool is_rlc_v2_1;
|
||||
|
||||
|
@ -561,6 +561,16 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
|
||||
return "RLC_P";
|
||||
case AMDGPU_UCODE_ID_RLC_V:
|
||||
return "RLC_V";
|
||||
case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
|
||||
return "GLOBAL_TAP_DELAYS";
|
||||
case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
|
||||
return "SE0_TAP_DELAYS";
|
||||
case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
|
||||
return "SE1_TAP_DELAYS";
|
||||
case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
|
||||
return "SE2_TAP_DELAYS";
|
||||
case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
|
||||
return "SE3_TAP_DELAYS";
|
||||
case AMDGPU_UCODE_ID_IMU_I:
|
||||
return "IMU_I";
|
||||
case AMDGPU_UCODE_ID_IMU_D:
|
||||
@ -745,6 +755,26 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
|
||||
ucode->ucode_size = adev->gfx.rlc.rlcv_ucode_size_bytes;
|
||||
ucode_addr = adev->gfx.rlc.rlcv_ucode;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
|
||||
ucode->ucode_size = adev->gfx.rlc.global_tap_delays_ucode_size_bytes;
|
||||
ucode_addr = adev->gfx.rlc.global_tap_delays_ucode;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
|
||||
ucode->ucode_size = adev->gfx.rlc.se0_tap_delays_ucode_size_bytes;
|
||||
ucode_addr = adev->gfx.rlc.se0_tap_delays_ucode;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
|
||||
ucode->ucode_size = adev->gfx.rlc.se1_tap_delays_ucode_size_bytes;
|
||||
ucode_addr = adev->gfx.rlc.se1_tap_delays_ucode;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
|
||||
ucode->ucode_size = adev->gfx.rlc.se2_tap_delays_ucode_size_bytes;
|
||||
ucode_addr = adev->gfx.rlc.se2_tap_delays_ucode;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
|
||||
ucode->ucode_size = adev->gfx.rlc.se3_tap_delays_ucode_size_bytes;
|
||||
ucode_addr = adev->gfx.rlc.se3_tap_delays_ucode;
|
||||
break;
|
||||
case AMDGPU_UCODE_ID_CP_MES:
|
||||
ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
|
||||
ucode_addr = (u8 *)ucode->fw->data +
|
||||
|
@ -266,6 +266,21 @@ struct rlc_firmware_header_v2_3 {
|
||||
uint32_t rlcv_ucode_offset_bytes;
|
||||
};
|
||||
|
||||
/* version_major=2, version_minor=4 */
|
||||
struct rlc_firmware_header_v2_4 {
|
||||
struct rlc_firmware_header_v2_3 v2_3;
|
||||
uint32_t global_tap_delays_ucode_size_bytes;
|
||||
uint32_t global_tap_delays_ucode_offset_bytes;
|
||||
uint32_t se0_tap_delays_ucode_size_bytes;
|
||||
uint32_t se0_tap_delays_ucode_offset_bytes;
|
||||
uint32_t se1_tap_delays_ucode_size_bytes;
|
||||
uint32_t se1_tap_delays_ucode_offset_bytes;
|
||||
uint32_t se2_tap_delays_ucode_size_bytes;
|
||||
uint32_t se2_tap_delays_ucode_offset_bytes;
|
||||
uint32_t se3_tap_delays_ucode_size_bytes;
|
||||
uint32_t se3_tap_delays_ucode_offset_bytes;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct sdma_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
@ -426,6 +441,11 @@ enum AMDGPU_UCODE_ID {
|
||||
AMDGPU_UCODE_ID_CP_MES1_DATA,
|
||||
AMDGPU_UCODE_ID_IMU_I,
|
||||
AMDGPU_UCODE_ID_IMU_D,
|
||||
AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS,
|
||||
AMDGPU_UCODE_ID_SE0_TAP_DELAYS,
|
||||
AMDGPU_UCODE_ID_SE1_TAP_DELAYS,
|
||||
AMDGPU_UCODE_ID_SE2_TAP_DELAYS,
|
||||
AMDGPU_UCODE_ID_SE3_TAP_DELAYS,
|
||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
|
||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
|
||||
AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
|
||||
|
@ -41,6 +41,12 @@
|
||||
#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
|
||||
#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
|
||||
|
||||
#define LOOP_UMC_NODE_INST(node_inst) \
|
||||
for ((node_inst) = 0; (node_inst) < adev->umc.node_inst_num; (node_inst)++)
|
||||
|
||||
#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
|
||||
LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
|
||||
|
||||
struct amdgpu_umc_ras {
|
||||
struct amdgpu_ras_block_object ras_block;
|
||||
void (*err_cnt_init)(struct amdgpu_device *adev);
|
||||
@ -62,6 +68,10 @@ struct amdgpu_umc {
|
||||
uint32_t channel_inst_num;
|
||||
/* number of umc instance with memory map register access */
|
||||
uint32_t umc_inst_num;
|
||||
|
||||
/*number of umc node instance with memory map register access*/
|
||||
uint32_t node_inst_num;
|
||||
|
||||
/* UMC regiser per channel offset */
|
||||
uint32_t channel_offs;
|
||||
/* channel index table of interleaved memory */
|
||||
|
@ -339,7 +339,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
|
||||
|
||||
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
|
||||
|
||||
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
|
||||
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
|
||||
WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
|
||||
|
||||
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
|
||||
}
|
||||
|
@ -3976,6 +3976,23 @@ static void gfx_v10_0_init_rlc_iram_dram_microcode(struct amdgpu_device *adev)
|
||||
adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_init_tap_delays_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct rlc_firmware_header_v2_4 *rlc_hdr;
|
||||
|
||||
rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
|
||||
adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
|
||||
adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
|
||||
adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
|
||||
adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
|
||||
adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
|
||||
adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
|
||||
adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
|
||||
adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
|
||||
adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
|
||||
adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
|
||||
}
|
||||
|
||||
static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
|
||||
{
|
||||
bool ret = false;
|
||||
@ -4153,8 +4170,11 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
if (version_major == 2) {
|
||||
if (version_minor >= 1)
|
||||
gfx_v10_0_init_rlc_ext_microcode(adev);
|
||||
if (version_minor == 2)
|
||||
if (version_minor >= 2)
|
||||
gfx_v10_0_init_rlc_iram_dram_microcode(adev);
|
||||
if (version_minor == 4) {
|
||||
gfx_v10_0_init_tap_delays_microcode(adev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4251,8 +4271,39 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
|
||||
info->fw = adev->gfx.rlc_fw;
|
||||
adev->firmware.fw_size +=
|
||||
ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
|
||||
|
||||
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
|
||||
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
|
||||
info->fw = adev->gfx.mec_fw;
|
||||
|
@ -987,23 +987,23 @@ static void gfx_v9_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel,
|
||||
static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
uint32_t scratch = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0);
|
||||
uint32_t tmp = 0;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
WREG32_SOC15(GC, 0, mmSCRATCH_REG0, 0xCAFEDEAD);
|
||||
WREG32(scratch, 0xCAFEDEAD);
|
||||
r = amdgpu_ring_alloc(ring, 3);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
|
||||
amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0) -
|
||||
PACKET3_SET_UCONFIG_REG_START);
|
||||
amdgpu_ring_write(ring, scratch - PACKET3_SET_UCONFIG_REG_START);
|
||||
amdgpu_ring_write(ring, 0xDEADBEEF);
|
||||
amdgpu_ring_commit(ring);
|
||||
|
||||
for (i = 0; i < adev->usec_timeout; i++) {
|
||||
tmp = RREG32_SOC15(GC, 0, mmSCRATCH_REG0);
|
||||
tmp = RREG32(scratch);
|
||||
if (tmp == 0xDEADBEEF)
|
||||
break;
|
||||
udelay(1);
|
||||
|
@ -22,6 +22,9 @@
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_cache.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "gmc_v10_0.h"
|
||||
@ -980,6 +983,8 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
r = gmc_v10_0_mc_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -22,10 +22,13 @@
|
||||
*/
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_cache.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_atomfirmware.h"
|
||||
#include "gmc_v11_0.h"
|
||||
#include "umc_v8_7.h"
|
||||
#include "umc_v8_10.h"
|
||||
#include "athub/athub_3_0_0_sh_mask.h"
|
||||
#include "athub/athub_3_0_0_offset.h"
|
||||
#include "oss/osssys_6_0_0_offset.h"
|
||||
@ -537,11 +540,36 @@ static void gmc_v11_0_set_umc_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (adev->ip_versions[UMC_HWIP][0]) {
|
||||
case IP_VERSION(8, 10, 0):
|
||||
adev->umc.channel_inst_num = UMC_V8_10_CHANNEL_INSTANCE_NUM;
|
||||
adev->umc.umc_inst_num = UMC_V8_10_UMC_INSTANCE_NUM;
|
||||
adev->umc.node_inst_num = adev->gmc.num_umc;
|
||||
adev->umc.max_ras_err_cnt_per_query = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
|
||||
adev->umc.channel_offs = UMC_V8_10_PER_CHANNEL_OFFSET;
|
||||
adev->umc.channel_idx_tbl = &umc_v8_10_channel_idx_tbl[0][0][0];
|
||||
adev->umc.ras = &umc_v8_10_ras;
|
||||
break;
|
||||
case IP_VERSION(8, 11, 0):
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (adev->umc.ras) {
|
||||
amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
|
||||
|
||||
strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
|
||||
adev->umc.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
|
||||
adev->umc.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
|
||||
adev->umc.ras_if = &adev->umc.ras->ras_block.ras_comm;
|
||||
|
||||
/* If don't define special ras_late_init function, use default ras_late_init */
|
||||
if (!adev->umc.ras->ras_block.ras_late_init)
|
||||
adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
|
||||
|
||||
/* If not define special ras_cb function, use default ras_cb */
|
||||
if (!adev->umc.ras->ras_block.ras_cb)
|
||||
adev->umc.ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -750,6 +778,8 @@ static int gmc_v11_0_sw_init(void *handle)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
r = gmc_v11_0_mc_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -328,27 +328,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
|
||||
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
|
||||
};
|
||||
|
||||
const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc = {
|
||||
.ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
|
||||
.ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
|
||||
.ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
|
||||
.ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
|
||||
.ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
|
||||
.ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
|
||||
.ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
|
||||
.ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
|
||||
.ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
|
||||
.ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
|
||||
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
|
||||
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
|
||||
.ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
|
||||
.ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
|
||||
.ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
|
||||
.ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK,
|
||||
.ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK,
|
||||
.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK,
|
||||
};
|
||||
|
||||
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
|
||||
extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc;
|
||||
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
|
||||
|
||||
#endif
|
||||
|
@ -339,27 +339,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
|
||||
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
|
||||
};
|
||||
|
||||
const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
|
||||
.ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
|
||||
.ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
|
||||
.ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
|
||||
.ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
|
||||
.ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
|
||||
.ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
|
||||
.ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
|
||||
.ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
|
||||
.ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
|
||||
.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
|
||||
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
|
||||
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
|
||||
.ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
|
||||
.ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
|
||||
.ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
|
||||
.ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK,
|
||||
.ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK,
|
||||
.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK,
|
||||
};
|
||||
|
||||
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t baco_cntl;
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "soc15_common.h"
|
||||
|
||||
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
|
||||
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald;
|
||||
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
|
||||
extern struct amdgpu_nbio_ras nbio_v7_4_ras;
|
||||
|
||||
|
@ -259,6 +259,8 @@ enum psp_gfx_fw_type {
|
||||
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
|
||||
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
|
||||
GFX_FW_TYPE_CAP = 62, /* CAP_FW */
|
||||
GFX_FW_TYPE_SE2_TAP_DELAYS = 65, /* SE2 TAP DELAYS NV */
|
||||
GFX_FW_TYPE_SE3_TAP_DELAYS = 66, /* SE3 TAP DELAYS NV */
|
||||
GFX_FW_TYPE_REG_LIST = 67, /* REG_LIST MI */
|
||||
GFX_FW_TYPE_IMU_I = 68, /* IMU Instruction FW SOC21 */
|
||||
GFX_FW_TYPE_IMU_D = 69, /* IMU Data FW SOC21 */
|
||||
|
@ -320,6 +320,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
|
||||
|
||||
switch (adev->ip_versions[MP1_HWIP][0]) {
|
||||
case IP_VERSION(13, 0, 0):
|
||||
case IP_VERSION(13, 0, 7):
|
||||
return AMD_RESET_METHOD_MODE1;
|
||||
case IP_VERSION(13, 0, 4):
|
||||
return AMD_RESET_METHOD_MODE2;
|
||||
|
357
drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
Normal file
357
drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include "umc_v8_10.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include "amdgpu_umc.h"
|
||||
#include "amdgpu.h"
|
||||
#include "umc/umc_8_10_0_offset.h"
|
||||
#include "umc/umc_8_10_0_sh_mask.h"
|
||||
|
||||
#define UMC_8_NODE_DIST 0x800000
|
||||
#define UMC_8_INST_DIST 0x4000
|
||||
|
||||
struct channelnum_map_colbit {
|
||||
uint32_t channel_num;
|
||||
uint32_t col_bit;
|
||||
};
|
||||
|
||||
const struct channelnum_map_colbit umc_v8_10_channelnum_map_colbit_table[] = {
|
||||
{24, 13},
|
||||
{20, 13},
|
||||
{16, 12},
|
||||
{14, 12},
|
||||
{12, 12},
|
||||
{10, 12},
|
||||
{6, 11},
|
||||
};
|
||||
|
||||
const uint32_t
|
||||
umc_v8_10_channel_idx_tbl[]
|
||||
[UMC_V8_10_UMC_INSTANCE_NUM]
|
||||
[UMC_V8_10_CHANNEL_INSTANCE_NUM] = {
|
||||
{{16, 18}, {17, 19}},
|
||||
{{15, 11}, {3, 7}},
|
||||
{{1, 5}, {13, 9}},
|
||||
{{23, 21}, {22, 20}},
|
||||
{{0, 4}, {12, 8}},
|
||||
{{14, 10}, {2, 6}}
|
||||
};
|
||||
|
||||
static inline uint32_t get_umc_v8_10_reg_offset(struct amdgpu_device *adev,
|
||||
uint32_t node_inst,
|
||||
uint32_t umc_inst,
|
||||
uint32_t ch_inst)
|
||||
{
|
||||
return adev->umc.channel_offs * ch_inst + UMC_8_INST_DIST * umc_inst +
|
||||
UMC_8_NODE_DIST * node_inst;
|
||||
}
|
||||
|
||||
static void umc_v8_10_clear_error_count_per_channel(struct amdgpu_device *adev,
|
||||
uint32_t umc_reg_offset)
|
||||
{
|
||||
uint32_t ecc_err_cnt_addr;
|
||||
|
||||
ecc_err_cnt_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
|
||||
|
||||
/* clear error count */
|
||||
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
|
||||
UMC_V8_10_CE_CNT_INIT);
|
||||
}
|
||||
|
||||
static void umc_v8_10_clear_error_count(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t node_inst = 0;
|
||||
uint32_t umc_inst = 0;
|
||||
uint32_t ch_inst = 0;
|
||||
uint32_t umc_reg_offset = 0;
|
||||
|
||||
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
|
||||
umc_reg_offset = get_umc_v8_10_reg_offset(adev,
|
||||
node_inst,
|
||||
umc_inst,
|
||||
ch_inst);
|
||||
|
||||
umc_v8_10_clear_error_count_per_channel(adev,
|
||||
umc_reg_offset);
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_query_correctable_error_count(struct amdgpu_device *adev,
|
||||
uint32_t umc_reg_offset,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint32_t ecc_err_cnt, ecc_err_cnt_addr;
|
||||
uint64_t mc_umc_status;
|
||||
uint32_t mc_umc_status_addr;
|
||||
|
||||
/* UMC 8_10 registers */
|
||||
ecc_err_cnt_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
|
||||
mc_umc_status_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
|
||||
|
||||
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
|
||||
*error_count +=
|
||||
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_GeccErrCnt, GeccErrCnt) -
|
||||
UMC_V8_10_CE_CNT_INIT);
|
||||
|
||||
/* Check for SRAM correctable error, MCUMC_STATUS is a 64 bit register */
|
||||
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
static void umc_v8_10_query_uncorrectable_error_count(struct amdgpu_device *adev,
|
||||
uint32_t umc_reg_offset,
|
||||
unsigned long *error_count)
|
||||
{
|
||||
uint64_t mc_umc_status;
|
||||
uint32_t mc_umc_status_addr;
|
||||
|
||||
mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
|
||||
|
||||
/* Check the MCUMC_STATUS. */
|
||||
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||
if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
|
||||
*error_count += 1;
|
||||
}
|
||||
|
||||
static void umc_v8_10_query_ras_error_count(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
|
||||
uint32_t node_inst = 0;
|
||||
uint32_t umc_inst = 0;
|
||||
uint32_t ch_inst = 0;
|
||||
uint32_t umc_reg_offset = 0;
|
||||
|
||||
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
|
||||
umc_reg_offset = get_umc_v8_10_reg_offset(adev,
|
||||
node_inst,
|
||||
umc_inst,
|
||||
ch_inst);
|
||||
|
||||
umc_v8_10_query_correctable_error_count(adev,
|
||||
umc_reg_offset,
|
||||
&(err_data->ce_count));
|
||||
umc_v8_10_query_uncorrectable_error_count(adev,
|
||||
umc_reg_offset,
|
||||
&(err_data->ue_count));
|
||||
}
|
||||
|
||||
umc_v8_10_clear_error_count(adev);
|
||||
}
|
||||
|
||||
static uint32_t umc_v8_10_get_col_bit(uint32_t channel_num)
|
||||
{
|
||||
uint32_t t = 0;
|
||||
|
||||
for (t = 0; t < ARRAY_SIZE(umc_v8_10_channelnum_map_colbit_table); t++)
|
||||
if (channel_num == umc_v8_10_channelnum_map_colbit_table[t].channel_num)
|
||||
return umc_v8_10_channelnum_map_colbit_table[t].col_bit;
|
||||
|
||||
/* Failed to get col_bit. */
|
||||
return U32_MAX;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mapping normal address to soc physical address in swizzle mode.
|
||||
*/
|
||||
static int umc_v8_10_swizzle_mode_na_to_pa(struct amdgpu_device *adev,
|
||||
uint32_t channel_idx,
|
||||
uint64_t na, uint64_t *soc_pa)
|
||||
{
|
||||
uint32_t channel_num = UMC_V8_10_TOTAL_CHANNEL_NUM(adev);
|
||||
uint32_t col_bit = umc_v8_10_get_col_bit(channel_num);
|
||||
uint64_t tmp_addr;
|
||||
|
||||
if (col_bit == U32_MAX)
|
||||
return -1;
|
||||
|
||||
tmp_addr = SWIZZLE_MODE_TMP_ADDR(na, channel_num, channel_idx);
|
||||
*soc_pa = SWIZZLE_MODE_ADDR_HI(tmp_addr, col_bit) |
|
||||
SWIZZLE_MODE_ADDR_MID(na, col_bit) |
|
||||
SWIZZLE_MODE_ADDR_LOW(tmp_addr, col_bit) |
|
||||
SWIZZLE_MODE_ADDR_LSB(na);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void umc_v8_10_query_error_address(struct amdgpu_device *adev,
|
||||
struct ras_err_data *err_data,
|
||||
uint32_t umc_reg_offset,
|
||||
uint32_t node_inst,
|
||||
uint32_t ch_inst,
|
||||
uint32_t umc_inst)
|
||||
{
|
||||
uint64_t mc_umc_status_addr;
|
||||
uint64_t mc_umc_status, err_addr;
|
||||
uint32_t channel_index;
|
||||
|
||||
mc_umc_status_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0);
|
||||
mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
|
||||
|
||||
if (mc_umc_status == 0)
|
||||
return;
|
||||
|
||||
if (!err_data->err_addr) {
|
||||
/* clear umc status */
|
||||
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
|
||||
return;
|
||||
}
|
||||
|
||||
channel_index =
|
||||
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
|
||||
adev->umc.channel_inst_num +
|
||||
umc_inst * adev->umc.channel_inst_num +
|
||||
ch_inst];
|
||||
|
||||
/* calculate error address if ue/ce error is detected */
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrV) == 1 &&
|
||||
(REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
|
||||
REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) {
|
||||
uint32_t addr_lsb;
|
||||
uint64_t mc_umc_addrt0;
|
||||
|
||||
mc_umc_addrt0 = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_ADDRT0);
|
||||
err_addr = RREG64_PCIE((mc_umc_addrt0 + umc_reg_offset) * 4);
|
||||
err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr);
|
||||
|
||||
/* the lowest lsb bits should be ignored */
|
||||
addr_lsb = REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, AddrLsb);
|
||||
|
||||
err_addr &= ~((0x1ULL << addr_lsb) - 1);
|
||||
|
||||
/* we only save ue error information currently, ce is skipped */
|
||||
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1) {
|
||||
uint64_t na_err_addr_base = err_addr & ~(0x3ULL << UMC_V8_10_NA_C5_BIT);
|
||||
uint64_t na_err_addr, retired_page_addr;
|
||||
uint32_t col = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* loop for all possibilities of [C6 C5] in normal address. */
|
||||
for (col = 0; col < UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM; col++) {
|
||||
na_err_addr = na_err_addr_base | (col << UMC_V8_10_NA_C5_BIT);
|
||||
|
||||
/* Mapping normal error address to retired soc physical address. */
|
||||
ret = umc_v8_10_swizzle_mode_na_to_pa(adev, channel_index,
|
||||
na_err_addr, &retired_page_addr);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Failed to map pa from umc na.\n");
|
||||
break;
|
||||
}
|
||||
dev_info(adev->dev, "Error Address(PA): 0x%llx\n",
|
||||
retired_page_addr);
|
||||
amdgpu_umc_fill_error_record(err_data, na_err_addr,
|
||||
retired_page_addr, channel_index, umc_inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* clear umc status */
|
||||
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
|
||||
}
|
||||
|
||||
static void umc_v8_10_query_ras_error_address(struct amdgpu_device *adev,
|
||||
void *ras_error_status)
|
||||
{
|
||||
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
|
||||
uint32_t node_inst = 0;
|
||||
uint32_t umc_inst = 0;
|
||||
uint32_t ch_inst = 0;
|
||||
uint32_t umc_reg_offset = 0;
|
||||
|
||||
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
|
||||
umc_reg_offset = get_umc_v8_10_reg_offset(adev,
|
||||
node_inst,
|
||||
umc_inst,
|
||||
ch_inst);
|
||||
|
||||
umc_v8_10_query_error_address(adev,
|
||||
err_data,
|
||||
umc_reg_offset,
|
||||
node_inst,
|
||||
ch_inst,
|
||||
umc_inst);
|
||||
}
|
||||
}
|
||||
|
||||
static void umc_v8_10_err_cnt_init_per_channel(struct amdgpu_device *adev,
|
||||
uint32_t umc_reg_offset)
|
||||
{
|
||||
uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
|
||||
uint32_t ecc_err_cnt_addr;
|
||||
|
||||
ecc_err_cnt_sel_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCntSel);
|
||||
ecc_err_cnt_addr =
|
||||
SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccErrCnt);
|
||||
|
||||
ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4);
|
||||
|
||||
/* set ce error interrupt type to APIC based interrupt */
|
||||
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_GeccErrCntSel,
|
||||
GeccErrInt, 0x1);
|
||||
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
|
||||
/* set error count to initial value */
|
||||
WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V8_10_CE_CNT_INIT);
|
||||
}
|
||||
|
||||
static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t node_inst = 0;
|
||||
uint32_t umc_inst = 0;
|
||||
uint32_t ch_inst = 0;
|
||||
uint32_t umc_reg_offset = 0;
|
||||
|
||||
LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
|
||||
umc_reg_offset = get_umc_v8_10_reg_offset(adev,
|
||||
node_inst,
|
||||
umc_inst,
|
||||
ch_inst);
|
||||
|
||||
umc_v8_10_err_cnt_init_per_channel(adev, umc_reg_offset);
|
||||
}
|
||||
}
|
||||
|
||||
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
|
||||
.query_ras_error_count = umc_v8_10_query_ras_error_count,
|
||||
.query_ras_error_address = umc_v8_10_query_ras_error_address,
|
||||
};
|
||||
|
||||
struct amdgpu_umc_ras umc_v8_10_ras = {
|
||||
.ras_block = {
|
||||
.hw_ops = &umc_v8_10_ras_hw_ops,
|
||||
},
|
||||
.err_cnt_init = umc_v8_10_err_cnt_init,
|
||||
};
|
70
drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
Normal file
70
drivers/gpu/drm/amd/amdgpu/umc_v8_10.h
Normal file
@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#ifndef __UMC_V8_10_H__
|
||||
#define __UMC_V8_10_H__
|
||||
|
||||
#include "soc15_common.h"
|
||||
#include "amdgpu.h"
|
||||
|
||||
/* number of umc channel instance with memory map register access */
|
||||
#define UMC_V8_10_CHANNEL_INSTANCE_NUM 2
|
||||
/* number of umc instance with memory map register access */
|
||||
#define UMC_V8_10_UMC_INSTANCE_NUM 2
|
||||
|
||||
/* Total channel instances for all umc nodes */
|
||||
#define UMC_V8_10_TOTAL_CHANNEL_NUM(adev) \
|
||||
(UMC_V8_10_CHANNEL_INSTANCE_NUM * UMC_V8_10_UMC_INSTANCE_NUM * (adev)->umc.node_inst_num)
|
||||
|
||||
/* UMC regiser per channel offset */
|
||||
#define UMC_V8_10_PER_CHANNEL_OFFSET 0x400
|
||||
|
||||
/* EccErrCnt max value */
|
||||
#define UMC_V8_10_CE_CNT_MAX 0xffff
|
||||
/* umc ce interrupt threshold */
|
||||
#define UUMC_V8_10_CE_INT_THRESHOLD 0xffff
|
||||
/* umc ce count initial value */
|
||||
#define UMC_V8_10_CE_CNT_INIT (UMC_V8_10_CE_CNT_MAX - UUMC_V8_10_CE_INT_THRESHOLD)
|
||||
|
||||
#define UMC_V8_10_NA_COL_2BITS_POWER_OF_2_NUM 4
|
||||
|
||||
/* The C5 bit in NA address */
|
||||
#define UMC_V8_10_NA_C5_BIT 14
|
||||
|
||||
/* Map to swizzle mode address */
|
||||
#define SWIZZLE_MODE_TMP_ADDR(na, ch_num, ch_idx) \
|
||||
((((na) >> 10) * (ch_num) + (ch_idx)) << 10)
|
||||
#define SWIZZLE_MODE_ADDR_HI(addr, col_bit) \
|
||||
(((addr) >> ((col_bit) + 2)) << ((col_bit) + 2))
|
||||
#define SWIZZLE_MODE_ADDR_MID(na, col_bit) ((((na) >> 8) & 0x3) << (col_bit))
|
||||
#define SWIZZLE_MODE_ADDR_LOW(addr, col_bit) \
|
||||
((((addr) >> 10) & ((0x1ULL << (col_bit - 8)) - 1)) << 8)
|
||||
#define SWIZZLE_MODE_ADDR_LSB(na) ((na) & 0xFF)
|
||||
|
||||
extern struct amdgpu_umc_ras umc_v8_10_ras;
|
||||
extern const uint32_t
|
||||
umc_v8_10_channel_idx_tbl[]
|
||||
[UMC_V8_10_UMC_INSTANCE_NUM]
|
||||
[UMC_V8_10_CHANNEL_INSTANCE_NUM];
|
||||
|
||||
#endif
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_vcn.h"
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_cs.h"
|
||||
#include "soc15.h"
|
||||
#include "soc15d.h"
|
||||
#include "soc15_hw_ip.h"
|
||||
@ -44,6 +45,9 @@
|
||||
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
|
||||
#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
|
||||
|
||||
#define RDECODE_MSG_CREATE 0x00000000
|
||||
#define RDECODE_MESSAGE_CREATE 0x00000001
|
||||
|
||||
static int amdgpu_ih_clientid_vcns[] = {
|
||||
SOC15_IH_CLIENTID_VCN,
|
||||
SOC15_IH_CLIENTID_VCN1
|
||||
@ -1323,6 +1327,132 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
}
|
||||
}
|
||||
|
||||
static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct drm_gpu_scheduler **scheds;
|
||||
|
||||
/* The create msg must be in the first IB submitted */
|
||||
if (atomic_read(&p->entity->fence_seq))
|
||||
return -EINVAL;
|
||||
|
||||
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
|
||||
[AMDGPU_RING_PRIO_0].sched;
|
||||
drm_sched_entity_modify_sched(p->entity, scheds, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_bo_va_mapping *map;
|
||||
uint32_t *msg, num_buffers;
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t start, end;
|
||||
unsigned int i;
|
||||
void *ptr;
|
||||
int r;
|
||||
|
||||
addr &= AMDGPU_GMC_HOLE_MASK;
|
||||
r = amdgpu_cs_find_mapping(p, addr, &bo, &map);
|
||||
if (r) {
|
||||
DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
|
||||
return r;
|
||||
}
|
||||
|
||||
start = map->start * AMDGPU_GPU_PAGE_SIZE;
|
||||
end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE;
|
||||
if (addr & 0x7) {
|
||||
DRM_ERROR("VCN messages must be 8 byte aligned!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed validating the VCN message BO (%d)!\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_kmap(bo, &ptr);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed mapping the VCN message (%d)!\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
msg = ptr + addr - start;
|
||||
|
||||
/* Check length */
|
||||
if (msg[1] > end - addr) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (msg[3] != RDECODE_MSG_CREATE)
|
||||
goto out;
|
||||
|
||||
num_buffers = msg[2];
|
||||
for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) {
|
||||
uint32_t offset, size, *create;
|
||||
|
||||
if (msg[0] != RDECODE_MESSAGE_CREATE)
|
||||
continue;
|
||||
|
||||
offset = msg[1];
|
||||
size = msg[2];
|
||||
|
||||
if (offset + size > end) {
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
create = ptr + addr + offset - start;
|
||||
|
||||
/* H246, HEVC and VP9 can run on any instance */
|
||||
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
|
||||
continue;
|
||||
|
||||
r = vcn_v4_0_limit_sched(p);
|
||||
if (r)
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
amdgpu_bo_kunmap(bo);
|
||||
return r;
|
||||
}
|
||||
|
||||
#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
|
||||
|
||||
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_ib *ib)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
|
||||
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
|
||||
uint32_t val;
|
||||
int r = 0;
|
||||
|
||||
/* The first instance can decode anything */
|
||||
if (!ring->me)
|
||||
return r;
|
||||
|
||||
/* unified queue ib header has 8 double words. */
|
||||
if (ib->length_dw < 8)
|
||||
return r;
|
||||
|
||||
val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
|
||||
|
||||
if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
|
||||
decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
|
||||
|
||||
if (decode_buffer->valid_buf_flag & 0x1)
|
||||
r = vcn_v4_0_dec_msg(p, ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
|
||||
decode_buffer->msg_buffer_address_lo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VCN_ENC,
|
||||
.align_mask = 0x3f,
|
||||
@ -1331,6 +1461,7 @@ static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
|
||||
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
|
||||
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
|
||||
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
|
||||
.patch_cs_in_place = vcn_v4_0_ring_patch_cs_in_place,
|
||||
.emit_frame_size =
|
||||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
|
||||
|
@ -1115,6 +1115,15 @@ static void kfd_process_wq_release(struct work_struct *work)
|
||||
struct kfd_process *p = container_of(work, struct kfd_process,
|
||||
release_work);
|
||||
|
||||
kfd_process_dequeue_from_all_devices(p);
|
||||
pqm_uninit(&p->pqm);
|
||||
|
||||
/* Signal the eviction fence after user mode queues are
|
||||
* destroyed. This allows any BOs to be freed without
|
||||
* triggering pointless evictions or waiting for fences.
|
||||
*/
|
||||
dma_fence_signal(p->ef);
|
||||
|
||||
kfd_process_remove_sysfs(p);
|
||||
kfd_iommu_unbind_process(p);
|
||||
|
||||
@ -1179,20 +1188,8 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
|
||||
cancel_delayed_work_sync(&p->eviction_work);
|
||||
cancel_delayed_work_sync(&p->restore_work);
|
||||
|
||||
mutex_lock(&p->mutex);
|
||||
|
||||
kfd_process_dequeue_from_all_devices(p);
|
||||
pqm_uninit(&p->pqm);
|
||||
|
||||
/* Indicate to other users that MM is no longer valid */
|
||||
p->mm = NULL;
|
||||
/* Signal the eviction fence after user mode queues are
|
||||
* destroyed. This allows any BOs to be freed without
|
||||
* triggering pointless evictions or waiting for fences.
|
||||
*/
|
||||
dma_fence_signal(p->ef);
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
|
||||
mmu_notifier_put(&p->mmu_notifier);
|
||||
}
|
||||
@ -1405,6 +1402,11 @@ static struct kfd_process *create_process(const struct task_struct *thread)
|
||||
hash_add_rcu(kfd_processes_table, &process->kfd_processes,
|
||||
(uintptr_t)process->mm);
|
||||
|
||||
/* Avoid free_notifier to start kfd_process_wq_release if
|
||||
* mmu_notifier_get failed because of pending signal.
|
||||
*/
|
||||
kref_get(&process->ref);
|
||||
|
||||
/* MMU notifier registration must be the last call that can fail
|
||||
* because after this point we cannot unwind the process creation.
|
||||
* After this point, mmu_notifier_put will trigger the cleanup by
|
||||
@ -1417,6 +1419,7 @@ static struct kfd_process *create_process(const struct task_struct *thread)
|
||||
}
|
||||
BUG_ON(mn != &process->mmu_notifier);
|
||||
|
||||
kfd_unref_process(process);
|
||||
get_task_struct(process->lead_thread);
|
||||
|
||||
return process;
|
||||
|
@ -6,7 +6,7 @@ config DRM_AMD_DC
|
||||
bool "AMD DC - Enable new display engine"
|
||||
default y
|
||||
select SND_HDA_COMPONENT if SND_HDA_CORE
|
||||
select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
|
||||
select DRM_AMD_DC_DCN if (X86 || PPC64)
|
||||
help
|
||||
Choose this option if you want to use the new display engine
|
||||
support for AMDGPU. This adds required support for Vega and
|
||||
|
@ -25,7 +25,13 @@
|
||||
|
||||
|
||||
|
||||
AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
|
||||
AMDGPUDM = \
|
||||
amdgpu_dm.o \
|
||||
amdgpu_dm_plane.o \
|
||||
amdgpu_dm_crtc.o \
|
||||
amdgpu_dm_irq.o \
|
||||
amdgpu_dm_mst_types.o \
|
||||
amdgpu_dm_color.o
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
AMDGPUDM += dc_fpu.o
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -571,6 +571,14 @@ struct dsc_preferred_settings {
|
||||
bool dsc_force_disable_passthrough;
|
||||
};
|
||||
|
||||
enum mst_progress_status {
|
||||
MST_STATUS_DEFAULT = 0,
|
||||
MST_PROBE = BIT(0),
|
||||
MST_REMOTE_EDID = BIT(1),
|
||||
MST_ALLOCATE_NEW_PAYLOAD = BIT(2),
|
||||
MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
|
||||
};
|
||||
|
||||
struct amdgpu_dm_connector {
|
||||
|
||||
struct drm_connector base;
|
||||
@ -623,8 +631,20 @@ struct amdgpu_dm_connector {
|
||||
struct drm_display_mode freesync_vid_base;
|
||||
|
||||
int psr_skip_count;
|
||||
|
||||
/* Record progress status of mst*/
|
||||
uint8_t mst_status;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
uint8_t flags, bool set)
|
||||
{
|
||||
if (set)
|
||||
*status |= flags;
|
||||
else
|
||||
*status &= ~flags;
|
||||
}
|
||||
|
||||
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
|
||||
|
||||
extern const struct amdgpu_ip_block_version dm_ip_block;
|
||||
|
464
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
Normal file
464
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
Normal file
@ -0,0 +1,464 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
|
||||
#include "dc.h"
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_dm_psr.h"
|
||||
#include "amdgpu_dm_crtc.h"
|
||||
#include "amdgpu_dm_plane.h"
|
||||
#include "amdgpu_dm_trace.h"
|
||||
#include "amdgpu_dm_debugfs.h"
|
||||
|
||||
void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc)
|
||||
{
|
||||
struct drm_crtc *crtc = &acrtc->base;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
drm_crtc_handle_vblank(crtc);
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
|
||||
/* Send completion event for cursor-only commits */
|
||||
if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
|
||||
drm_crtc_send_vblank_event(crtc, acrtc->event);
|
||||
drm_crtc_vblank_put(crtc);
|
||||
acrtc->event = NULL;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
bool modeset_required(struct drm_crtc_state *crtc_state,
|
||||
struct dc_stream_state *new_stream,
|
||||
struct dc_stream_state *old_stream)
|
||||
{
|
||||
return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
|
||||
}
|
||||
|
||||
bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
|
||||
|
||||
{
|
||||
return acrtc->dm_irq_params.freesync_config.state ==
|
||||
VRR_STATE_ACTIVE_VARIABLE ||
|
||||
acrtc->dm_irq_params.freesync_config.state ==
|
||||
VRR_STATE_ACTIVE_FIXED;
|
||||
}
|
||||
|
||||
int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
int rc;
|
||||
|
||||
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
|
||||
|
||||
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
||||
|
||||
DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
|
||||
acrtc->crtc_id, enable ? "en" : "dis", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
|
||||
{
|
||||
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
|
||||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
|
||||
}
|
||||
|
||||
static void vblank_control_worker(struct work_struct *work)
|
||||
{
|
||||
struct vblank_control_work *vblank_work =
|
||||
container_of(work, struct vblank_control_work, work);
|
||||
struct amdgpu_display_manager *dm = vblank_work->dm;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
|
||||
if (vblank_work->enable)
|
||||
dm->active_vblank_irq_count++;
|
||||
else if (dm->active_vblank_irq_count)
|
||||
dm->active_vblank_irq_count--;
|
||||
|
||||
dc_allow_idle_optimizations(
|
||||
dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
|
||||
|
||||
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
|
||||
|
||||
/*
|
||||
* Control PSR based on vblank requirements from OS
|
||||
*
|
||||
* If panel supports PSR SU, there's no need to disable PSR when OS is
|
||||
* submitting fast atomic commits (we infer this by whether the OS
|
||||
* requests vblank events). Fast atomic commits will simply trigger a
|
||||
* full-frame-update (FFU); a specific case of selective-update (SU)
|
||||
* where the SU region is the full hactive*vactive region. See
|
||||
* fill_dc_dirty_rects().
|
||||
*/
|
||||
if (vblank_work->stream && vblank_work->stream->link) {
|
||||
if (vblank_work->enable) {
|
||||
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
|
||||
vblank_work->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(vblank_work->stream);
|
||||
} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
|
||||
!vblank_work->stream->link->psr_settings.psr_allow_active &&
|
||||
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
|
||||
amdgpu_dm_psr_enable(vblank_work->stream);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
dc_stream_release(vblank_work->stream);
|
||||
|
||||
kfree(vblank_work);
|
||||
}
|
||||
|
||||
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
struct vblank_control_work *work;
|
||||
int rc = 0;
|
||||
|
||||
if (enable) {
|
||||
/* vblank irq on -> Only need vupdate irq in vrr mode */
|
||||
if (amdgpu_dm_vrr_active(acrtc_state))
|
||||
rc = dm_set_vupdate_irq(crtc, true);
|
||||
} else {
|
||||
/* vblank irq off -> vupdate irq off */
|
||||
rc = dm_set_vupdate_irq(crtc, false);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
|
||||
|
||||
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
|
||||
return -EBUSY;
|
||||
|
||||
if (amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
if (dm->vblank_control_workqueue) {
|
||||
work = kzalloc(sizeof(*work), GFP_ATOMIC);
|
||||
if (!work)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&work->work, vblank_control_worker);
|
||||
work->dm = dm;
|
||||
work->acrtc = acrtc;
|
||||
work->enable = enable;
|
||||
|
||||
if (acrtc_state->stream) {
|
||||
dc_stream_retain(acrtc_state->stream);
|
||||
work->stream = acrtc_state->stream;
|
||||
}
|
||||
|
||||
queue_work(dm->vblank_control_workqueue, &work->work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
return dm_set_vblank(crtc, true);
|
||||
}
|
||||
|
||||
void dm_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
dm_set_vblank(crtc, false);
|
||||
}
|
||||
|
||||
static void dm_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
{
|
||||
struct dm_crtc_state *cur = to_dm_crtc_state(state);
|
||||
|
||||
/* TODO Destroy dc_stream objects are stream object is flattened */
|
||||
if (cur->stream)
|
||||
dc_stream_release(cur->stream);
|
||||
|
||||
|
||||
__drm_atomic_helper_crtc_destroy_state(state);
|
||||
|
||||
|
||||
kfree(state);
|
||||
}
|
||||
|
||||
static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct dm_crtc_state *state, *cur;
|
||||
|
||||
cur = to_dm_crtc_state(crtc->state);
|
||||
|
||||
if (WARN_ON(!crtc->state))
|
||||
return NULL;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
|
||||
|
||||
if (cur->stream) {
|
||||
state->stream = cur->stream;
|
||||
dc_stream_retain(state->stream);
|
||||
}
|
||||
|
||||
state->active_planes = cur->active_planes;
|
||||
state->vrr_infopacket = cur->vrr_infopacket;
|
||||
state->abm_level = cur->abm_level;
|
||||
state->vrr_supported = cur->vrr_supported;
|
||||
state->freesync_config = cur->freesync_config;
|
||||
state->cm_has_degamma = cur->cm_has_degamma;
|
||||
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
|
||||
state->crc_skip_count = cur->crc_skip_count;
|
||||
state->mpo_requested = cur->mpo_requested;
|
||||
/* TODO Duplicate dc_stream after objects are stream object is flattened */
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
drm_crtc_cleanup(crtc);
|
||||
kfree(crtc);
|
||||
}
|
||||
|
||||
static void dm_crtc_reset_state(struct drm_crtc *crtc)
|
||||
{
|
||||
struct dm_crtc_state *state;
|
||||
|
||||
if (crtc->state)
|
||||
dm_crtc_destroy_state(crtc, crtc->state);
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (WARN_ON(!state))
|
||||
return;
|
||||
|
||||
__drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
|
||||
{
|
||||
crtc_debugfs_init(crtc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Implemented only the options currently available for the driver */
|
||||
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
||||
.reset = dm_crtc_reset_state,
|
||||
.destroy = amdgpu_dm_crtc_destroy,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.atomic_duplicate_state = dm_crtc_duplicate_state,
|
||||
.atomic_destroy_state = dm_crtc_destroy_state,
|
||||
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
|
||||
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
|
||||
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
|
||||
.get_vblank_counter = amdgpu_get_vblank_counter_kms,
|
||||
.enable_vblank = dm_enable_vblank,
|
||||
.disable_vblank = dm_disable_vblank,
|
||||
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
.late_register = amdgpu_dm_crtc_late_register,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void dm_crtc_helper_disable(struct drm_crtc *crtc)
|
||||
{
|
||||
}
|
||||
|
||||
static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct drm_atomic_state *state = new_crtc_state->state;
|
||||
struct drm_plane *plane;
|
||||
int num_active = 0;
|
||||
|
||||
drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
|
||||
struct drm_plane_state *new_plane_state;
|
||||
|
||||
/* Cursor planes are "fake". */
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
|
||||
|
||||
if (!new_plane_state) {
|
||||
/*
|
||||
* The plane is enable on the CRTC and hasn't changed
|
||||
* state. This means that it previously passed
|
||||
* validation and is therefore enabled.
|
||||
*/
|
||||
num_active += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We need a framebuffer to be considered enabled. */
|
||||
num_active += (new_plane_state->fb != NULL);
|
||||
}
|
||||
|
||||
return num_active;
|
||||
}
|
||||
|
||||
static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct dm_crtc_state *dm_new_crtc_state =
|
||||
to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
dm_new_crtc_state->active_planes = 0;
|
||||
|
||||
if (!dm_new_crtc_state->stream)
|
||||
return;
|
||||
|
||||
dm_new_crtc_state->active_planes =
|
||||
count_crtc_active_planes(new_crtc_state);
|
||||
}
|
||||
|
||||
static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dc *dc = adev->dm.dc;
|
||||
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
|
||||
int ret = -EINVAL;
|
||||
|
||||
trace_amdgpu_dm_crtc_atomic_check(crtc_state);
|
||||
|
||||
dm_update_crtc_active_planes(crtc, crtc_state);
|
||||
|
||||
if (WARN_ON(unlikely(!dm_crtc_state->stream &&
|
||||
modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* We require the primary plane to be enabled whenever the CRTC is, otherwise
|
||||
* drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
|
||||
* planes are disabled, which is not supported by the hardware. And there is legacy
|
||||
* userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
|
||||
*/
|
||||
if (crtc_state->enable &&
|
||||
!(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
|
||||
DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
if (!dm_crtc_state->stream)
|
||||
return 0;
|
||||
|
||||
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
|
||||
return 0;
|
||||
|
||||
DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
|
||||
.disable = dm_crtc_helper_disable,
|
||||
.atomic_check = dm_crtc_helper_atomic_check,
|
||||
.mode_fixup = dm_crtc_helper_mode_fixup,
|
||||
.get_scanout_position = amdgpu_crtc_get_scanout_position,
|
||||
};
|
||||
|
||||
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
struct drm_plane *plane,
|
||||
uint32_t crtc_index)
|
||||
{
|
||||
struct amdgpu_crtc *acrtc = NULL;
|
||||
struct drm_plane *cursor_plane;
|
||||
|
||||
int res = -ENOMEM;
|
||||
|
||||
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
|
||||
if (!cursor_plane)
|
||||
goto fail;
|
||||
|
||||
cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
|
||||
res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
|
||||
|
||||
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
|
||||
if (!acrtc)
|
||||
goto fail;
|
||||
|
||||
res = drm_crtc_init_with_planes(
|
||||
dm->ddev,
|
||||
&acrtc->base,
|
||||
plane,
|
||||
cursor_plane,
|
||||
&amdgpu_dm_crtc_funcs, NULL);
|
||||
|
||||
if (res)
|
||||
goto fail;
|
||||
|
||||
drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
|
||||
|
||||
/* Create (reset) the plane state */
|
||||
if (acrtc->base.funcs->reset)
|
||||
acrtc->base.funcs->reset(&acrtc->base);
|
||||
|
||||
acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
|
||||
acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
|
||||
|
||||
acrtc->crtc_id = crtc_index;
|
||||
acrtc->base.enabled = false;
|
||||
acrtc->otg_inst = -1;
|
||||
|
||||
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
|
||||
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
|
||||
true, MAX_COLOR_LUT_ENTRIES);
|
||||
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(acrtc);
|
||||
kfree(cursor_plane);
|
||||
return res;
|
||||
}
|
||||
|
51
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
Normal file
51
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
Normal file
@ -0,0 +1,51 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_CRTC_H__
|
||||
#define __AMDGPU_DM_CRTC_H__
|
||||
|
||||
void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc);
|
||||
|
||||
bool modeset_required(struct drm_crtc_state *crtc_state,
|
||||
struct dc_stream_state *new_stream,
|
||||
struct dc_stream_state *old_stream);
|
||||
|
||||
int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable);
|
||||
|
||||
bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc);
|
||||
|
||||
bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state);
|
||||
|
||||
int dm_enable_vblank(struct drm_crtc *crtc);
|
||||
|
||||
void dm_disable_vblank(struct drm_crtc *crtc);
|
||||
|
||||
int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
struct drm_plane *plane,
|
||||
uint32_t link_index);
|
||||
|
||||
#endif
|
||||
|
@ -50,6 +50,13 @@ struct dmub_debugfs_trace_entry {
|
||||
uint32_t param1;
|
||||
};
|
||||
|
||||
static const char *const mst_progress_status[] = {
|
||||
"probe",
|
||||
"remote_edid",
|
||||
"allocate_new_payload",
|
||||
"clear_allocated_payload",
|
||||
};
|
||||
|
||||
/* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array
|
||||
*
|
||||
* Function takes in attributes passed to debugfs write entry
|
||||
@ -1256,14 +1263,22 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
kfree(wr_buf);
|
||||
|
||||
if (param_nums <= 0) {
|
||||
DRM_DEBUG_DRIVER("user data not be read\n");
|
||||
kfree(wr_buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
||||
/* Don't support for mst end device*/
|
||||
if (aconnector->mst_port) {
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (param[0] == 1) {
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
||||
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
|
||||
new_connection_type != dc_connection_none)
|
||||
@ -1300,6 +1315,10 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
/* If the aconnector is the root node in mst topology */
|
||||
if (aconnector->mst_mgr.mst_state == true)
|
||||
reset_cur_dp_mst_topology(link);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
@ -1310,7 +1329,6 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
|
||||
unlock:
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
|
||||
kfree(wr_buf);
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -2529,6 +2547,92 @@ static int target_backlight_show(struct seq_file *m, void *unused)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* function description: Determine if the connector is mst connector
|
||||
*
|
||||
* This function helps to determine whether a connector is a mst connector.
|
||||
* - "root" stands for the root connector of the topology
|
||||
* - "branch" stands for branch device of the topology
|
||||
* - "end" stands for leaf node connector of the topology
|
||||
* - "no" stands for the connector is not a device of a mst topology
|
||||
* Access it with the following command:
|
||||
*
|
||||
* cat /sys/kernel/debug/dri/0/DP-X/is_mst_connector
|
||||
*
|
||||
*/
|
||||
static int dp_is_mst_connector_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct drm_dp_mst_topology_mgr *mgr = NULL;
|
||||
struct drm_dp_mst_port *port = NULL;
|
||||
char *role = NULL;
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
|
||||
if (aconnector->mst_mgr.mst_state) {
|
||||
role = "root";
|
||||
} else if (aconnector->mst_port &&
|
||||
aconnector->mst_port->mst_mgr.mst_state) {
|
||||
|
||||
role = "end";
|
||||
|
||||
mgr = &aconnector->mst_port->mst_mgr;
|
||||
port = aconnector->port;
|
||||
|
||||
drm_modeset_lock(&mgr->base.lock, NULL);
|
||||
if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
|
||||
port->mcs)
|
||||
role = "branch";
|
||||
drm_modeset_unlock(&mgr->base.lock);
|
||||
|
||||
} else {
|
||||
role = "no";
|
||||
}
|
||||
|
||||
seq_printf(m, "%s\n", role);
|
||||
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* function description: Read out the mst progress status
|
||||
*
|
||||
* This function helps to determine the mst progress status of
|
||||
* a mst connector.
|
||||
*
|
||||
* Access it with the following command:
|
||||
*
|
||||
* cat /sys/kernel/debug/dri/0/DP-X/mst_progress_status
|
||||
*
|
||||
*/
|
||||
static int dp_mst_progress_status_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_connector *connector = m->private;
|
||||
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
|
||||
struct amdgpu_device *adev = drm_to_adev(connector->dev);
|
||||
int i;
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
mutex_lock(&adev->dm.dc_lock);
|
||||
|
||||
if (aconnector->mst_status == MST_STATUS_DEFAULT) {
|
||||
seq_puts(m, "disabled\n");
|
||||
} else {
|
||||
for (i = 0; i < sizeof(mst_progress_status)/sizeof(char *); i++)
|
||||
seq_printf(m, "%s:%s\n",
|
||||
mst_progress_status[i],
|
||||
aconnector->mst_status & BIT(i) ? "done" : "not_done");
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->dm.dc_lock);
|
||||
mutex_unlock(&aconnector->hpd_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
|
||||
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
|
||||
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
|
||||
@ -2538,6 +2642,8 @@ DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
|
||||
#endif
|
||||
DEFINE_SHOW_ATTRIBUTE(internal_display);
|
||||
DEFINE_SHOW_ATTRIBUTE(psr_capability);
|
||||
DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector);
|
||||
DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status);
|
||||
|
||||
static const struct file_operations dp_dsc_clock_en_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
@ -2681,6 +2787,8 @@ static const struct {
|
||||
{"dp_dsc_fec_support", &dp_dsc_fec_support_fops},
|
||||
{"max_bpc", &dp_max_bpc_debugfs_fops},
|
||||
{"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops},
|
||||
{"is_mst_connector", &dp_is_mst_connector_fops},
|
||||
{"mst_progress_status", &dp_mst_progress_status_fops}
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
|
@ -302,7 +302,7 @@ static void event_property_update(struct work_struct *work)
|
||||
mutex_lock(&hdcp_work->mutex);
|
||||
|
||||
|
||||
if (aconnector->base.state->commit) {
|
||||
if (aconnector->base.state && aconnector->base.state->commit) {
|
||||
ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
|
||||
|
||||
if (ret == 0) {
|
||||
@ -311,18 +311,26 @@ static void event_property_update(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
|
||||
if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 &&
|
||||
hdcp_work->encryption_status <= MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
|
||||
drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
else if (aconnector->base.state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 &&
|
||||
hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
|
||||
drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
} else {
|
||||
drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED);
|
||||
if (aconnector->base.state) {
|
||||
if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
|
||||
if (aconnector->base.state->hdcp_content_type ==
|
||||
DRM_MODE_HDCP_CONTENT_TYPE0 &&
|
||||
hdcp_work->encryption_status <=
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
|
||||
drm_hdcp_update_content_protection(&aconnector->base,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
else if (aconnector->base.state->hdcp_content_type ==
|
||||
DRM_MODE_HDCP_CONTENT_TYPE1 &&
|
||||
hdcp_work->encryption_status ==
|
||||
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
|
||||
drm_hdcp_update_content_protection(&aconnector->base,
|
||||
DRM_MODE_CONTENT_PROTECTION_ENABLED);
|
||||
} else {
|
||||
drm_hdcp_update_content_protection(&aconnector->base,
|
||||
DRM_MODE_CONTENT_PROTECTION_DESIRED);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
mutex_unlock(&hdcp_work->mutex);
|
||||
drm_modeset_unlock(&dev->mode_config.connection_mutex);
|
||||
}
|
||||
@ -495,7 +503,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
|
||||
(!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
|
||||
(!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
|
||||
|
||||
hdcp_update_display(hdcp_work, link_index, aconnector, conn_state->hdcp_content_type, false);
|
||||
if (conn_state)
|
||||
hdcp_update_display(hdcp_work, link_index, aconnector,
|
||||
conn_state->hdcp_content_type, false);
|
||||
}
|
||||
|
||||
|
||||
|
@ -312,6 +312,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
struct drm_dp_mst_port *mst_port;
|
||||
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
|
||||
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
|
||||
|
||||
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
|
||||
|
||||
@ -325,8 +327,20 @@ bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
if (!mst_mgr->mst_state)
|
||||
return false;
|
||||
|
||||
/* It's OK for this to fail */
|
||||
drm_dp_update_payload_part2(mst_mgr);
|
||||
if (!enable) {
|
||||
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
|
||||
clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
|
||||
}
|
||||
|
||||
if (drm_dp_update_payload_part2(mst_mgr)) {
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
set_flag, false);
|
||||
} else {
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
set_flag, true);
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
clr_flag, false);
|
||||
}
|
||||
|
||||
if (!enable)
|
||||
drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
|
||||
|
@ -179,6 +179,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
|
||||
aconnector->dc_sink = NULL;
|
||||
aconnector->edid = NULL;
|
||||
}
|
||||
|
||||
aconnector->mst_status = MST_STATUS_DEFAULT;
|
||||
drm_modeset_unlock(&root->mst_mgr.base.lock);
|
||||
}
|
||||
|
||||
@ -279,6 +281,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
MST_REMOTE_EDID, false);
|
||||
|
||||
drm_connector_update_edid_property(
|
||||
&aconnector->base,
|
||||
NULL);
|
||||
@ -309,6 +314,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
aconnector->edid = edid;
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
MST_REMOTE_EDID, true);
|
||||
}
|
||||
|
||||
if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
|
||||
@ -430,6 +437,10 @@ dm_dp_mst_detect(struct drm_connector *connector,
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
aconnector->dc_sink = NULL;
|
||||
aconnector->edid = NULL;
|
||||
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
|
||||
false);
|
||||
}
|
||||
|
||||
return connection_status;
|
||||
@ -526,6 +537,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
connector = &aconnector->base;
|
||||
aconnector->port = port;
|
||||
aconnector->mst_port = master;
|
||||
amdgpu_dm_set_mst_status(&aconnector->mst_status,
|
||||
MST_PROBE, true);
|
||||
|
||||
if (drm_connector_init(
|
||||
dev,
|
||||
|
1646
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
Normal file
1646
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
Normal file
File diff suppressed because it is too large
Load Diff
73
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
Normal file
73
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
Normal file
@ -0,0 +1,73 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
/*
|
||||
* Copyright 2022 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: AMD
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DM_PLANE_H__
|
||||
#define __AMDGPU_DM_PLANE_H__
|
||||
|
||||
#include "dc.h"
|
||||
|
||||
void handle_cursor_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_plane_state);
|
||||
|
||||
int fill_dc_scaling_info(struct amdgpu_device *adev,
|
||||
const struct drm_plane_state *state,
|
||||
struct dc_scaling_info *scaling_info);
|
||||
|
||||
void get_min_max_dc_plane_scaling(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
int *min_downscale, int *max_upscale);
|
||||
|
||||
int dm_plane_helper_check_state(struct drm_plane_state *state,
|
||||
struct drm_crtc_state *new_crtc_state);
|
||||
|
||||
bool modifier_has_dcc(uint64_t modifier);
|
||||
|
||||
unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier);
|
||||
|
||||
int fill_plane_buffer_attributes(struct amdgpu_device *adev,
|
||||
const struct amdgpu_framebuffer *afb,
|
||||
const enum surface_pixel_format format,
|
||||
const enum dc_rotation_angle rotation,
|
||||
const uint64_t tiling_flags,
|
||||
union dc_tiling_info *tiling_info,
|
||||
struct plane_size *plane_size,
|
||||
struct dc_plane_dcc_param *dcc,
|
||||
struct dc_plane_address *address,
|
||||
bool tmz_surface,
|
||||
bool force_disable_dcc);
|
||||
|
||||
int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
||||
struct drm_plane *plane,
|
||||
unsigned long possible_crtcs,
|
||||
const struct dc_plane_cap *plane_cap);
|
||||
|
||||
const struct drm_format_info *amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
|
||||
|
||||
void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
|
||||
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
|
||||
bool *global_alpha, int *global_alpha_value);
|
||||
|
||||
#endif
|
@ -25,6 +25,9 @@
|
||||
DC_LIBS = basics bios dml clk_mgr dce gpio irq link virtual
|
||||
|
||||
ifdef CONFIG_DRM_AMD_DC_DCN
|
||||
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
DC_LIBS += dcn20
|
||||
DC_LIBS += dsc
|
||||
DC_LIBS += dcn10
|
||||
|
@ -107,12 +107,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN201)
|
||||
###############################################################################
|
||||
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
|
||||
|
||||
# prevent build errors regarding soft-float vs hard-float FP ABI tags
|
||||
# this code is currently unused on ppc64, as it applies to Renoir APUs only
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
|
||||
endif
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
|
||||
@ -121,12 +115,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
|
||||
###############################################################################
|
||||
CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o
|
||||
|
||||
# prevent build errors regarding soft-float vs hard-float FP ABI tags
|
||||
# this code is currently unused on ppc64, as it applies to VanGogh APUs only
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
|
||||
endif
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
|
||||
@ -135,12 +123,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
|
||||
###############################################################################
|
||||
CLK_MGR_DCN301 = vg_clk_mgr.o dcn301_smu.o
|
||||
|
||||
# prevent build errors regarding soft-float vs hard-float FP ABI tags
|
||||
# this code is currently unused on ppc64, as it applies to VanGogh APUs only
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
|
||||
endif
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
|
||||
@ -188,31 +170,6 @@ CLK_MGR_DCN32 = dcn32_clk_mgr.o dcn32_clk_mgr_smu_msg.o
|
||||
|
||||
AMD_DAL_CLK_MGR_DCN32 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn32/,$(CLK_MGR_DCN32))
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn32/dcn32_clk_mgr.o := -mhard-float -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn32/dcn32_clk_mgr.o := -mhard-float -maltivec
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn32/dcn32_clk_mgr.o := -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn32/dcn32_clk_mgr.o := -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN32)
|
||||
|
||||
endif
|
||||
|
@ -26,10 +26,9 @@
|
||||
#include "dccg.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
|
||||
#include "dcn20/dcn20_clk_mgr.h"
|
||||
#include "rn_clk_mgr.h"
|
||||
|
||||
#include "dml/dcn20/dcn20_fpu.h"
|
||||
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
#include "rn_clk_mgr_vbios_smu.h"
|
||||
@ -45,7 +44,6 @@
|
||||
|
||||
/* Constants */
|
||||
|
||||
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
|
||||
#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
|
||||
|
||||
/* Macros */
|
||||
@ -613,228 +611,6 @@ static struct clk_bw_params rn_bw_params = {
|
||||
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_wm_table_gs = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 7.09,
|
||||
.sr_enter_plus_exit_time_us = 8.14,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr4_wm_table_gs = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 5.32,
|
||||
.sr_enter_plus_exit_time_us = 6.38,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr4_wm_table_with_disabled_ppt = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 8.32,
|
||||
.sr_enter_plus_exit_time_us = 9.38,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_wm_table_rn = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 11.90,
|
||||
.sr_enter_plus_exit_time_us = 12.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.18,
|
||||
.sr_enter_plus_exit_time_us = 14.30,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.18,
|
||||
.sr_enter_plus_exit_time_us = 14.30,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.18,
|
||||
.sr_enter_plus_exit_time_us = 14.30,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_1R_wm_table_rn = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 13.90,
|
||||
.sr_enter_plus_exit_time_us = 14.80,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr4_wm_table_rn = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 7.32,
|
||||
.sr_enter_plus_exit_time_us = 8.38,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.82,
|
||||
.sr_enter_plus_exit_time_us = 11.196,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.89,
|
||||
.sr_enter_plus_exit_time_us = 11.24,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 9.748,
|
||||
.sr_enter_plus_exit_time_us = 11.102,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static unsigned int find_socclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
@ -914,12 +690,10 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
|
||||
/*
|
||||
* WM set D will be re-purposed for memory retraining
|
||||
*/
|
||||
bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY;
|
||||
bw_params->wm_table.entries[WM_D].wm_inst = WM_D;
|
||||
bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING;
|
||||
bw_params->wm_table.entries[WM_D].valid = true;
|
||||
DC_FP_START();
|
||||
dcn21_clk_mgr_set_bw_params_wm_table(bw_params);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void rn_clk_mgr_construct(
|
||||
|
@ -29,6 +29,13 @@
|
||||
#include "clk_mgr.h"
|
||||
#include "dm_pp_smu.h"
|
||||
|
||||
extern struct wm_table ddr4_wm_table_gs;
|
||||
extern struct wm_table lpddr4_wm_table_gs;
|
||||
extern struct wm_table lpddr4_wm_table_with_disabled_ppt;
|
||||
extern struct wm_table ddr4_wm_table_rn;
|
||||
extern struct wm_table ddr4_1R_wm_table_rn;
|
||||
extern struct wm_table lpddr4_wm_table_rn;
|
||||
|
||||
struct rn_clk_registers {
|
||||
uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */
|
||||
};
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "dcn20/dcn20_clk_mgr.h"
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
#include "dcn30/dcn30_clk_mgr.h"
|
||||
#include "dml/dcn30/dcn30_fpu.h"
|
||||
#include "reg_helper.h"
|
||||
#include "core_types.h"
|
||||
#include "dm_helpers.h"
|
||||
@ -97,65 +98,11 @@ static void dcn3_init_single_clock(struct clk_mgr_internal *clk_mgr, uint32_t cl
|
||||
}
|
||||
}
|
||||
|
||||
static noinline void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
|
||||
static void dcn3_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
/* defaults */
|
||||
double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
|
||||
double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
|
||||
double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
|
||||
uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
|
||||
|
||||
/* Set A - Normal - default values*/
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = 0;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
|
||||
/* Set B - Performance - higher minimum clocks */
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = TUNED VALUE;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = TUNED VALUE;
|
||||
// clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
|
||||
/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 0;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = 0;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = 1600;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = 8000;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = 10000;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = 16000;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
|
||||
|
||||
/* Set D - MALL - SR enter and exit times adjusted for MALL */
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = pstate_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = 2;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = 4;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = 0;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
DC_FP_START();
|
||||
dcn3_fpu_build_wm_range_table(&clk_mgr->base);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
void dcn3_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
|
@ -32,6 +32,9 @@
|
||||
// For dcn20_update_clocks_update_dpp_dto
|
||||
#include "dcn20/dcn20_clk_mgr.h"
|
||||
|
||||
// For DML FPU code
|
||||
#include "dml/dcn20/dcn20_fpu.h"
|
||||
|
||||
#include "vg_clk_mgr.h"
|
||||
#include "dcn301_smu.h"
|
||||
#include "reg_helper.h"
|
||||
@ -526,81 +529,6 @@ static struct clk_bw_params vg_bw_params = {
|
||||
|
||||
};
|
||||
|
||||
static struct wm_table ddr4_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 6.09,
|
||||
.sr_enter_plus_exit_time_us = 7.14,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 10.12,
|
||||
.sr_enter_plus_exit_time_us = 11.48,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct wm_table lpddr5_wm_table = {
|
||||
.entries = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 13.5,
|
||||
.sr_enter_plus_exit_time_us = 16.5,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_table,
|
||||
unsigned int voltage)
|
||||
{
|
||||
@ -670,10 +598,9 @@ static void vg_clk_mgr_helper_populate_bw_params(
|
||||
/*
|
||||
* WM set D will be re-purposed for memory retraining
|
||||
*/
|
||||
bw_params->wm_table.entries[WM_D].pstate_latency_us = LPDDR_MEM_RETRAIN_LATENCY;
|
||||
bw_params->wm_table.entries[WM_D].wm_inst = WM_D;
|
||||
bw_params->wm_table.entries[WM_D].wm_type = WM_TYPE_RETRAINING;
|
||||
bw_params->wm_table.entries[WM_D].valid = true;
|
||||
DC_FP_START();
|
||||
dcn21_clk_mgr_set_bw_params_wm_table(bw_params);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -29,6 +29,9 @@
|
||||
|
||||
struct watermarks;
|
||||
|
||||
extern struct wm_table ddr4_wm_table;
|
||||
extern struct wm_table lpddr5_wm_table;
|
||||
|
||||
struct smu_watermark_set {
|
||||
struct watermarks *wm_set;
|
||||
union large_integer mc_address;
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "dcn/dcn_3_2_0_sh_mask.h"
|
||||
|
||||
#include "dcn32/dcn32_clk_mgr.h"
|
||||
#include "dml/dcn32/dcn32_fpu.h"
|
||||
|
||||
#define DCN_BASE__INST0_SEG1 0x000000C0
|
||||
|
||||
@ -146,83 +147,9 @@ static void dcn32_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e cl
|
||||
|
||||
static void dcn32_build_wm_range_table(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
/* defaults */
|
||||
double pstate_latency_us = clk_mgr->base.ctx->dc->dml.soc.dram_clock_change_latency_us;
|
||||
double fclk_change_latency_us = clk_mgr->base.ctx->dc->dml.soc.fclk_change_latency_us;
|
||||
double sr_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_exit_time_us;
|
||||
double sr_enter_plus_exit_time_us = clk_mgr->base.ctx->dc->dml.soc.sr_enter_plus_exit_time_us;
|
||||
/* For min clocks use as reported by PM FW and report those as min */
|
||||
uint16_t min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz;
|
||||
uint16_t min_dcfclk_mhz = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
|
||||
uint16_t setb_min_uclk_mhz = min_uclk_mhz;
|
||||
uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->base.ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz;
|
||||
|
||||
/* For Set B ranges use min clocks state 2 when available, and report those to PM FW */
|
||||
if (dcfclk_mhz_for_the_second_state)
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state;
|
||||
else
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->base.bw_params->clk_table.entries[0].dcfclk_mhz;
|
||||
|
||||
if (clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz)
|
||||
setb_min_uclk_mhz = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz;
|
||||
|
||||
/* Set A - Normal - default values */
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
|
||||
/* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
|
||||
/* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */
|
||||
/* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */
|
||||
if (clk_mgr->base.ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) {
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 38;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[0].memclk_mhz * 16;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 38;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[1].memclk_mhz * 16;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[2].memclk_mhz * 16;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->base.bw_params->clk_table.entries[3].memclk_mhz * 16;
|
||||
clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5;
|
||||
}
|
||||
/* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */
|
||||
/* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].valid = true;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->base.bw_params->dummy_pstate_table[3].dummy_pstate_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us; // TBD
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; // TBD
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz;
|
||||
clk_mgr->base.bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF;
|
||||
DC_FP_START();
|
||||
dcn32_build_wm_range_table_fpu(clk_mgr);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
|
||||
|
@ -3478,7 +3478,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
CRTC_STATE_VACTIVE);
|
||||
|
||||
if (stream && should_use_dmub_lock(stream->link)) {
|
||||
if (should_use_dmub_lock(stream->link)) {
|
||||
union dmub_hw_lock_flags hw_locks = { 0 };
|
||||
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
|
||||
|
||||
|
@ -845,7 +845,7 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
|
||||
return link->type == dc_connection_mst_branch;
|
||||
}
|
||||
|
||||
static bool reset_cur_dp_mst_topology(struct dc_link *link)
|
||||
bool reset_cur_dp_mst_topology(struct dc_link *link)
|
||||
{
|
||||
bool result = false;
|
||||
DC_LOGGER_INIT(link->ctx->logger);
|
||||
@ -1703,7 +1703,7 @@ static bool dc_link_construct_legacy(struct dc_link *link,
|
||||
enc_init_data.transmitter =
|
||||
translate_encoder_to_transmitter(enc_init_data.encoder);
|
||||
link->link_enc =
|
||||
link->dc->res_pool->funcs->link_enc_create(&enc_init_data);
|
||||
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
|
||||
|
||||
if (!link->link_enc) {
|
||||
DC_ERROR("Failed to create link encoder!\n");
|
||||
|
@ -7064,6 +7064,7 @@ void dp_enable_link_phy(
|
||||
pipes[i].clock_source->funcs->program_pix_clk(
|
||||
pipes[i].clock_source,
|
||||
&pipes[i].stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(link_settings),
|
||||
&pipes[i].pll_settings);
|
||||
}
|
||||
}
|
||||
|
@ -74,6 +74,7 @@
|
||||
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
{
|
||||
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
|
||||
|
||||
switch (asic_id.chip_family) {
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_SI)
|
||||
@ -169,8 +170,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
dc_version = DCN_VERSION_3_21;
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_14;
|
||||
dc_version = DCN_VERSION_3_14;
|
||||
break;
|
||||
default:
|
||||
dc_version = DCE_VERSION_UNKNOWN;
|
||||
@ -1463,6 +1463,7 @@ bool dc_add_plane_to_context(
|
||||
struct dc_stream_status *stream_status = NULL;
|
||||
struct pipe_ctx *prev_right_head = NULL;
|
||||
struct pipe_ctx *free_right_pipe = NULL;
|
||||
struct pipe_ctx *prev_left_head = NULL;
|
||||
|
||||
DC_LOGGER_INIT(stream->ctx->logger);
|
||||
for (i = 0; i < context->stream_count; i++)
|
||||
@ -1514,8 +1515,16 @@ bool dc_add_plane_to_context(
|
||||
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) &&
|
||||
tail_pipe->next_odm_pipe) {
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2) &&
|
||||
tail_pipe->next_odm_pipe) {
|
||||
|
||||
/* For ODM + window MPO, in 3 plane case, if we already have a MPO window on
|
||||
* the right side, then we will invalidate a 2nd one on the right side
|
||||
*/
|
||||
if (head_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
|
||||
dc_plane_state_release(plane_state);
|
||||
return false;
|
||||
}
|
||||
|
||||
DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d tail_pipe->next_odm_pipe:%d\n",
|
||||
__func__,
|
||||
@ -1530,20 +1539,42 @@ bool dc_add_plane_to_context(
|
||||
* - If not, continue to use free_pipe
|
||||
* - If the right side already has a pipe, use that pipe instead if its available
|
||||
*/
|
||||
|
||||
/*
|
||||
* We also want to avoid the case where with three plane ( 2 MPO videos ), we have
|
||||
* both videos on the left side so one of the videos is invalidated. Then we
|
||||
* move the invalidated video back to the right side. If the order of the plane
|
||||
* states is such that the right MPO plane is processed first, the free pipe
|
||||
* selected by the head will be the left MPO pipe. But since there was no right
|
||||
* MPO pipe, it will assign the free pipe to the right MPO pipe instead and
|
||||
* a pipe reallocation will occur.
|
||||
* Check the old context to see if the left side already has a pipe allocated
|
||||
* - If not, continue to use free_pipe
|
||||
* - If the left side is already using this pipe, then pick another pipe for right
|
||||
*/
|
||||
|
||||
prev_right_head = &dc->current_state->res_ctx.pipe_ctx[tail_pipe->next_odm_pipe->pipe_idx];
|
||||
if ((prev_right_head->bottom_pipe) && (free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
|
||||
if ((prev_right_head->bottom_pipe) &&
|
||||
(free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
|
||||
free_right_pipe = acquire_free_pipe_for_head(context, pool, tail_pipe->next_odm_pipe);
|
||||
if (free_right_pipe) {
|
||||
free_pipe->stream = NULL;
|
||||
memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
|
||||
memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
|
||||
free_pipe->plane_state = NULL;
|
||||
free_pipe->pipe_idx = 0;
|
||||
free_right_pipe->plane_state = plane_state;
|
||||
free_pipe = free_right_pipe;
|
||||
} else {
|
||||
prev_left_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->pipe_idx];
|
||||
if ((prev_left_head->bottom_pipe) &&
|
||||
(free_pipe->pipe_idx == prev_left_head->bottom_pipe->pipe_idx)) {
|
||||
free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
|
||||
}
|
||||
}
|
||||
|
||||
if (free_right_pipe) {
|
||||
free_pipe->stream = NULL;
|
||||
memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
|
||||
memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
|
||||
free_pipe->plane_state = NULL;
|
||||
free_pipe->pipe_idx = 0;
|
||||
free_right_pipe->plane_state = plane_state;
|
||||
free_pipe = free_right_pipe;
|
||||
}
|
||||
|
||||
free_pipe->stream_res.tg = tail_pipe->next_odm_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->next_odm_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->next_odm_pipe->stream_res.opp;
|
||||
@ -1553,7 +1584,63 @@ bool dc_add_plane_to_context(
|
||||
|
||||
free_pipe->top_pipe = tail_pipe->next_odm_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe = free_pipe;
|
||||
} else if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)
|
||||
&& head_pipe->next_odm_pipe) {
|
||||
|
||||
/* For ODM + window MPO, support 3 plane ( 2 MPO ) case.
|
||||
* Here we have a desktop ODM + left window MPO and a new MPO window appears
|
||||
* on the right side only. It fails the first case, because tail_pipe is the
|
||||
* left window MPO, so it has no next_odm_pipe. So in this scenario, we check
|
||||
* for head_pipe->next_odm_pipe instead
|
||||
*/
|
||||
DC_LOG_SCALER("%s - ODM + win MPO (left) + win MPO (right). free_pipe:%d head_pipe->next_odm:%d\n",
|
||||
__func__,
|
||||
free_pipe->pipe_idx,
|
||||
head_pipe->next_odm_pipe ? head_pipe->next_odm_pipe->pipe_idx : -1);
|
||||
|
||||
/*
|
||||
* We want to avoid the case where the right side already has a pipe assigned to
|
||||
* it and is different from free_pipe ( which would cause trigger a pipe
|
||||
* reallocation ).
|
||||
* Check the old context to see if the right side already has a pipe allocated
|
||||
* - If not, continue to use free_pipe
|
||||
* - If the right side already has a pipe, use that pipe instead if its available
|
||||
*/
|
||||
prev_right_head = &dc->current_state->res_ctx.pipe_ctx[head_pipe->next_odm_pipe->pipe_idx];
|
||||
if ((prev_right_head->bottom_pipe) &&
|
||||
(free_pipe->pipe_idx != prev_right_head->bottom_pipe->pipe_idx)) {
|
||||
free_right_pipe = acquire_free_pipe_for_head(context, pool, head_pipe->next_odm_pipe);
|
||||
if (free_right_pipe) {
|
||||
free_pipe->stream = NULL;
|
||||
memset(&free_pipe->stream_res, 0, sizeof(struct stream_resource));
|
||||
memset(&free_pipe->plane_res, 0, sizeof(struct plane_resource));
|
||||
free_pipe->plane_state = NULL;
|
||||
free_pipe->pipe_idx = 0;
|
||||
free_right_pipe->plane_state = plane_state;
|
||||
free_pipe = free_right_pipe;
|
||||
}
|
||||
}
|
||||
|
||||
free_pipe->stream_res.tg = head_pipe->next_odm_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = head_pipe->next_odm_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = head_pipe->next_odm_pipe->stream_res.opp;
|
||||
free_pipe->stream_res.stream_enc = head_pipe->next_odm_pipe->stream_res.stream_enc;
|
||||
free_pipe->stream_res.audio = head_pipe->next_odm_pipe->stream_res.audio;
|
||||
free_pipe->clock_source = head_pipe->next_odm_pipe->clock_source;
|
||||
|
||||
free_pipe->top_pipe = head_pipe->next_odm_pipe;
|
||||
head_pipe->next_odm_pipe->bottom_pipe = free_pipe;
|
||||
} else {
|
||||
|
||||
/* For ODM + window MPO, in 3 plane case, if we already have a MPO window on
|
||||
* the left side, then we will invalidate a 2nd one on the left side
|
||||
*/
|
||||
if (head_pipe->next_odm_pipe && tail_pipe->top_pipe) {
|
||||
dc_plane_state_release(plane_state);
|
||||
return false;
|
||||
}
|
||||
|
||||
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
|
||||
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
|
||||
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
|
||||
@ -1564,21 +1651,28 @@ bool dc_add_plane_to_context(
|
||||
free_pipe->top_pipe = tail_pipe;
|
||||
tail_pipe->bottom_pipe = free_pipe;
|
||||
|
||||
if (!free_pipe->next_odm_pipe && tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
|
||||
free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
|
||||
}
|
||||
if (!free_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
|
||||
free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
|
||||
tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
|
||||
/* Connect MPO pipes together if MPO window is in the centre */
|
||||
if (!(free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
|
||||
free_pipe->stream->src.x + free_pipe->stream->src.width/2))) {
|
||||
if (!free_pipe->next_odm_pipe &&
|
||||
tail_pipe->next_odm_pipe && tail_pipe->next_odm_pipe->bottom_pipe) {
|
||||
free_pipe->next_odm_pipe = tail_pipe->next_odm_pipe->bottom_pipe;
|
||||
tail_pipe->next_odm_pipe->bottom_pipe->prev_odm_pipe = free_pipe;
|
||||
}
|
||||
if (!free_pipe->prev_odm_pipe &&
|
||||
tail_pipe->prev_odm_pipe && tail_pipe->prev_odm_pipe->bottom_pipe) {
|
||||
free_pipe->prev_odm_pipe = tail_pipe->prev_odm_pipe->bottom_pipe;
|
||||
tail_pipe->prev_odm_pipe->bottom_pipe->next_odm_pipe = free_pipe;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ODM + window MPO, where MPO window is on left half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
|
||||
free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
(free_pipe->plane_state->clip_rect.x + free_pipe->plane_state->clip_rect.width <=
|
||||
free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
DC_LOG_SCALER("%s - ODM + window MPO(left). free_pipe:%d\n",
|
||||
__func__,
|
||||
free_pipe->pipe_idx);
|
||||
@ -1586,7 +1680,7 @@ bool dc_add_plane_to_context(
|
||||
}
|
||||
/* ODM + window MPO, where MPO window is on right half only */
|
||||
if (free_pipe->plane_state &&
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
(free_pipe->plane_state->clip_rect.x >= free_pipe->stream->src.x + free_pipe->stream->src.width/2)) {
|
||||
DC_LOG_SCALER("%s - ODM + window MPO(right). free_pipe:%d\n",
|
||||
__func__,
|
||||
free_pipe->pipe_idx);
|
||||
|
@ -328,6 +328,11 @@ bool dc_stream_set_cursor_attributes(
|
||||
}
|
||||
|
||||
dc = stream->ctx->dc;
|
||||
|
||||
if (attributes->height * attributes->width * 4 > 16384)
|
||||
if (stream->mall_stream_config.type == SUBVP_MAIN)
|
||||
return false;
|
||||
|
||||
stream->cursor_attributes = *attributes;
|
||||
|
||||
dc_z10_restore(dc);
|
||||
|
@ -47,7 +47,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.194"
|
||||
#define DC_VER "3.2.196"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
@ -683,7 +683,6 @@ struct dc_debug_options {
|
||||
bool hdmi20_disable;
|
||||
bool skip_detection_link_training;
|
||||
uint32_t edid_read_retry_times;
|
||||
bool remove_disconnect_edp;
|
||||
unsigned int force_odm_combine; //bit vector based on otg inst
|
||||
unsigned int seamless_boot_odm_combine;
|
||||
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
|
||||
|
@ -270,6 +270,23 @@ void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
||||
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst)
|
||||
{
|
||||
union dmub_rb_cmd cmd = { 0 };
|
||||
|
||||
cmd.drr_update.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH;
|
||||
// TODO: Uncomment once FW headers are promoted
|
||||
//cmd.drr_update.header.sub_type = DMUB_CMD__FAMS_SET_MANUAL_TRIGGER;
|
||||
cmd.drr_update.dmub_optc_state_req.tg_inst = tg_inst;
|
||||
|
||||
cmd.drr_update.header.payload_bytes = sizeof(cmd.drr_update) - sizeof(cmd.drr_update.header);
|
||||
|
||||
// Send the command to the DMCUB.
|
||||
dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
|
||||
dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
|
||||
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
|
||||
}
|
||||
|
||||
static uint8_t dc_dmub_srv_get_pipes_for_stream(struct dc *dc, struct dc_stream_state *stream)
|
||||
{
|
||||
uint8_t pipes = 0;
|
||||
|
@ -74,6 +74,7 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable);
|
||||
|
||||
void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max);
|
||||
|
||||
void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst);
|
||||
bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context);
|
||||
|
||||
void dc_dmub_srv_query_caps_cmd(struct dmub_srv *dmub);
|
||||
|
@ -514,4 +514,7 @@ bool dc_dp_trace_is_logged(struct dc_link *link,
|
||||
struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
|
||||
bool in_detection);
|
||||
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
|
||||
|
||||
/* Destruct the mst topology of the link and reset the allocated payload table */
|
||||
bool reset_cur_dp_mst_topology(struct dc_link *link);
|
||||
#endif /* DC_LINK_H_ */
|
||||
|
@ -838,6 +838,7 @@ static void dce112_program_pixel_clk_resync(
|
||||
static bool dce110_program_pix_clk(
|
||||
struct clock_source *clock_source,
|
||||
struct pixel_clk_params *pix_clk_params,
|
||||
enum dp_link_encoding encoding,
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
@ -911,6 +912,7 @@ static bool dce110_program_pix_clk(
|
||||
static bool dce112_program_pix_clk(
|
||||
struct clock_source *clock_source,
|
||||
struct pixel_clk_params *pix_clk_params,
|
||||
enum dp_link_encoding encoding,
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
@ -970,6 +972,7 @@ static bool dce112_program_pix_clk(
|
||||
static bool dcn31_program_pix_clk(
|
||||
struct clock_source *clock_source,
|
||||
struct pixel_clk_params *pix_clk_params,
|
||||
enum dp_link_encoding encoding,
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
@ -993,9 +996,14 @@ static bool dcn31_program_pix_clk(
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/* Enable DTO */
|
||||
if (clk_src->cs_mask->PIPE0_DTO_SRC_SEL)
|
||||
REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
|
||||
DP_DTO0_ENABLE, 1,
|
||||
PIPE0_DTO_SRC_SEL, 1);
|
||||
if (encoding == DP_128b_132b_ENCODING)
|
||||
REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
|
||||
DP_DTO0_ENABLE, 1,
|
||||
PIPE0_DTO_SRC_SEL, 2);
|
||||
else
|
||||
REG_UPDATE_2(PIXEL_RATE_CNTL[inst],
|
||||
DP_DTO0_ENABLE, 1,
|
||||
PIPE0_DTO_SRC_SEL, 1);
|
||||
else
|
||||
REG_UPDATE(PIXEL_RATE_CNTL[inst],
|
||||
DP_DTO0_ENABLE, 1);
|
||||
@ -1198,12 +1206,13 @@ const struct pixel_rate_range_table_entry *look_up_in_video_optimized_rate_tlb(
|
||||
static bool dcn20_program_pix_clk(
|
||||
struct clock_source *clock_source,
|
||||
struct pixel_clk_params *pix_clk_params,
|
||||
enum dp_link_encoding encoding,
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
|
||||
|
||||
dce112_program_pix_clk(clock_source, pix_clk_params, pll_settings);
|
||||
dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings);
|
||||
|
||||
if (clock_source->ctx->dc->hwss.enable_vblanks_synchronization &&
|
||||
clock_source->ctx->dc->config.vblank_alignment_max_frame_time_diff > 0) {
|
||||
@ -1243,6 +1252,7 @@ static const struct clock_source_funcs dcn20_clk_src_funcs = {
|
||||
static bool dcn3_program_pix_clk(
|
||||
struct clock_source *clock_source,
|
||||
struct pixel_clk_params *pix_clk_params,
|
||||
enum dp_link_encoding encoding,
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
|
||||
@ -1265,7 +1275,7 @@ static bool dcn3_program_pix_clk(
|
||||
REG_UPDATE(PIXEL_RATE_CNTL[inst], DP_DTO0_ENABLE, 1);
|
||||
} else
|
||||
// For other signal types(HDMI_TYPE_A, DVI) Driver still to call VBIOS Command table
|
||||
dce112_program_pix_clk(clock_source, pix_clk_params, pll_settings);
|
||||
dce112_program_pix_clk(clock_source, pix_clk_params, encoding, pll_settings);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1276,9 +1286,7 @@ static uint32_t dcn3_get_pix_clk_dividers(
|
||||
struct pll_settings *pll_settings)
|
||||
{
|
||||
unsigned long long actual_pix_clk_100Hz = pix_clk_params ? pix_clk_params->requested_pix_clk_100hz : 0;
|
||||
struct dce110_clk_src *clk_src;
|
||||
|
||||
clk_src = TO_DCE110_CLK_SRC(cs);
|
||||
DC_LOGGER_INIT();
|
||||
|
||||
if (pix_clk_params == NULL || pll_settings == NULL
|
||||
|
@ -612,6 +612,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dce100_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 =
|
||||
|
@ -1435,6 +1435,7 @@ static enum dc_status dce110_enable_stream_timing(
|
||||
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
@ -660,6 +660,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dce110_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 =
|
||||
|
@ -618,6 +618,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dce112_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 =
|
||||
|
@ -697,6 +697,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dce120_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 =
|
||||
|
@ -710,6 +710,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dce60_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 =
|
||||
|
@ -713,6 +713,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dce80_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 =
|
||||
|
@ -892,6 +892,7 @@ enum dc_status dcn10_enable_stream_timing(
|
||||
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
@ -49,6 +49,11 @@ void mpc1_set_bg_color(struct mpc *mpc,
|
||||
|
||||
/* find bottommost mpcc. */
|
||||
while (bottommost_mpcc->mpcc_bot) {
|
||||
/* avoid circular linked link */
|
||||
ASSERT(bottommost_mpcc != bottommost_mpcc->mpcc_bot);
|
||||
if (bottommost_mpcc == bottommost_mpcc->mpcc_bot)
|
||||
break;
|
||||
|
||||
bottommost_mpcc = bottommost_mpcc->mpcc_bot;
|
||||
}
|
||||
|
||||
|
@ -740,6 +740,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn10_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn10_link_encoder *enc10 =
|
||||
|
@ -59,7 +59,6 @@ void enc1_update_generic_info_packet(
|
||||
uint32_t packet_index,
|
||||
const struct dc_info_packet *info_packet)
|
||||
{
|
||||
uint32_t regval;
|
||||
/* TODOFPGA Figure out a proper number for max_retries polling for lock
|
||||
* use 50 for now.
|
||||
*/
|
||||
@ -88,7 +87,6 @@ void enc1_update_generic_info_packet(
|
||||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1);
|
||||
|
||||
/* choose which generic packet to use */
|
||||
regval = REG_READ(AFMT_VBI_PACKET_CONTROL);
|
||||
REG_UPDATE(AFMT_VBI_PACKET_CONTROL,
|
||||
AFMT_GENERIC_INDEX, packet_index);
|
||||
|
||||
@ -259,8 +257,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
|
||||
uint32_t h_back_porch;
|
||||
uint8_t synchronous_clock = 0; /* asynchronous mode */
|
||||
uint8_t colorimetry_bpc;
|
||||
uint8_t dynamic_range_rgb = 0; /*full range*/
|
||||
uint8_t dynamic_range_ycbcr = 1; /*bt709*/
|
||||
uint8_t dp_pixel_encoding = 0;
|
||||
uint8_t dp_component_depth = 0;
|
||||
|
||||
@ -372,18 +368,15 @@ void enc1_stream_encoder_dp_set_stream_attribute(
|
||||
switch (output_color_space) {
|
||||
case COLOR_SPACE_SRGB:
|
||||
misc1 = misc1 & ~0x80; /* bit7 = 0*/
|
||||
dynamic_range_rgb = 0; /*full range*/
|
||||
break;
|
||||
case COLOR_SPACE_SRGB_LIMITED:
|
||||
misc0 = misc0 | 0x8; /* bit3=1 */
|
||||
misc1 = misc1 & ~0x80; /* bit7 = 0*/
|
||||
dynamic_range_rgb = 1; /*limited range*/
|
||||
break;
|
||||
case COLOR_SPACE_YCBCR601:
|
||||
case COLOR_SPACE_YCBCR601_LIMITED:
|
||||
misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
|
||||
misc1 = misc1 & ~0x80; /* bit7 = 0*/
|
||||
dynamic_range_ycbcr = 0; /*bt601*/
|
||||
if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
|
||||
else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
|
||||
@ -393,15 +386,12 @@ void enc1_stream_encoder_dp_set_stream_attribute(
|
||||
case COLOR_SPACE_YCBCR709_LIMITED:
|
||||
misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
|
||||
misc1 = misc1 & ~0x80; /* bit7 = 0*/
|
||||
dynamic_range_ycbcr = 1; /*bt709*/
|
||||
if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
|
||||
misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
|
||||
else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
|
||||
misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
|
||||
break;
|
||||
case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
|
||||
dynamic_range_rgb = 1; /*limited range*/
|
||||
break;
|
||||
case COLOR_SPACE_2020_RGB_FULLRANGE:
|
||||
case COLOR_SPACE_2020_YCBCR:
|
||||
case COLOR_SPACE_XR_RGB:
|
||||
|
@ -700,6 +700,7 @@ enum dc_status dcn20_enable_stream_timing(
|
||||
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
|
||||
pipe_ctx->clock_source,
|
||||
&pipe_ctx->stream_res.pix_clk_params,
|
||||
dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
|
||||
&pipe_ctx->pll_settings)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
|
@ -925,6 +925,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
struct link_encoder *dcn20_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 =
|
||||
@ -1268,7 +1269,6 @@ static void get_pixel_clock_parameters(
|
||||
pixel_clk_params->requested_pix_clk_100hz /= 4;
|
||||
else if (optc2_is_two_pixels_per_containter(&stream->timing) || opp_cnt == 2)
|
||||
pixel_clk_params->requested_pix_clk_100hz /= 2;
|
||||
|
||||
else if (hws->funcs.is_dp_dig_pixel_rate_div_policy) {
|
||||
if (hws->funcs.is_dp_dig_pixel_rate_div_policy(pipe_ctx))
|
||||
pixel_clk_params->requested_pix_clk_100hz /= 2;
|
||||
|
@ -50,6 +50,7 @@ struct resource_pool *dcn20_create_resource_pool(
|
||||
struct dc *dc);
|
||||
|
||||
struct link_encoder *dcn20_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data);
|
||||
|
||||
unsigned int dcn20_calc_max_scaled_time(
|
||||
|
@ -6,30 +6,6 @@ DCN201 = dcn201_init.o dcn201_resource.o dcn201_hwseq.o \
|
||||
dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \
|
||||
dcn201_dccg.o dcn201_link_encoder.o
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -maltivec
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -msse2
|
||||
endif
|
||||
endif
|
||||
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN201)
|
||||
|
@ -788,6 +788,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn201_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 =
|
||||
@ -1036,6 +1037,14 @@ static bool dcn201_get_dcc_compression_cap(const struct dc *dc,
|
||||
output);
|
||||
}
|
||||
|
||||
static void dcn201_populate_dml_writeback_from_context(struct dc *dc,
|
||||
struct resource_context *res_ctx,
|
||||
display_e2e_pipe_params_st *pipes)
|
||||
{
|
||||
DC_FP_START();
|
||||
dcn201_populate_dml_writeback_from_context_fpu(dc, res_ctx, pipes);
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
static void dcn201_destroy_resource_pool(struct resource_pool **pool)
|
||||
{
|
||||
@ -1067,8 +1076,8 @@ static struct resource_funcs dcn201_res_pool_funcs = {
|
||||
.add_dsc_to_stream_resource = NULL,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer,
|
||||
.populate_dml_writeback_from_context = dcn201_populate_dml_writeback_from_context,
|
||||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
|
||||
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
|
||||
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
|
||||
};
|
||||
|
@ -1325,6 +1325,7 @@ static int map_transmitter_id_to_phy_instance(
|
||||
}
|
||||
|
||||
static struct link_encoder *dcn21_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn21_link_encoder *enc21 =
|
||||
|
@ -30,36 +30,6 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
|
||||
dcn30_dpp_cm.o dcn30_dwb_cm.o dcn30_cm_common.o dcn30_mmhubbub.o \
|
||||
dcn30_dio_link_encoder.o dcn30_resource.o
|
||||
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
|
||||
endif
|
||||
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN30)
|
||||
|
@ -927,6 +927,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn30_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 =
|
||||
@ -1521,26 +1522,11 @@ static bool init_soc_bounding_box(struct dc *dc,
|
||||
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
|
||||
loaded_ip->max_num_dpp = pool->base.pipe_count;
|
||||
loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk;
|
||||
|
||||
DC_FP_START();
|
||||
dcn20_patch_bounding_box(dc, loaded_bb);
|
||||
DC_FP_START();
|
||||
patch_dcn30_soc_bounding_box(dc, &dcn3_0_soc);
|
||||
DC_FP_END();
|
||||
|
||||
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
|
||||
struct bp_soc_bb_info bb_info = {0};
|
||||
|
||||
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
|
||||
if (bb_info.dram_clock_change_latency_100ns > 0)
|
||||
dcn3_0_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
|
||||
dcn3_0_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
|
||||
|
||||
if (bb_info.dram_sr_exit_latency_100ns > 0)
|
||||
dcn3_0_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2031,44 +2017,6 @@ void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct
|
||||
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finds dummy_latency_index when MCLK switching using firmware based
|
||||
* vblank stretch is enabled. This function will iterate through the
|
||||
* table of dummy pstate latencies until the lowest value that allows
|
||||
* dm_allow_self_refresh_and_mclk_switch to happen is found
|
||||
*/
|
||||
int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel)
|
||||
{
|
||||
const int max_latency_table_entries = 4;
|
||||
int dummy_latency_index = 0;
|
||||
|
||||
while (dummy_latency_index < max_latency_table_entries) {
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
|
||||
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
|
||||
dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
|
||||
|
||||
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
|
||||
dm_allow_self_refresh_and_mclk_switch)
|
||||
break;
|
||||
|
||||
dummy_latency_index++;
|
||||
}
|
||||
|
||||
if (dummy_latency_index == max_latency_table_entries) {
|
||||
ASSERT(dummy_latency_index != max_latency_table_entries);
|
||||
/* If the execution gets here, it means dummy p_states are
|
||||
* not possible. This should never happen and would mean
|
||||
* something is severely wrong.
|
||||
* Here we reset dummy_latency_index to 3, because it is
|
||||
* better to have underflows than system crashes.
|
||||
*/
|
||||
dummy_latency_index = 3;
|
||||
}
|
||||
|
||||
return dummy_latency_index;
|
||||
}
|
||||
|
||||
void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
DC_FP_START();
|
||||
|
@ -99,6 +99,9 @@ enum dc_status dcn30_add_stream_to_ctx(
|
||||
|
||||
void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
|
||||
|
||||
bool dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
|
||||
void dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
|
||||
int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel);
|
||||
|
||||
#endif /* _DCN30_RESOURCE_H_ */
|
||||
|
@ -890,6 +890,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn301_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 =
|
||||
|
@ -7,31 +7,6 @@
|
||||
|
||||
DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
|
||||
|
||||
ifdef CONFIG_X86
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
|
||||
endif
|
||||
|
||||
ifdef CONFIG_PPC64
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86
|
||||
ifdef CONFIG_CC_IS_GCC
|
||||
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
|
||||
IS_OLD_GCC = 1
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef IS_OLD_GCC
|
||||
# Stack alignment mismatch, proceed with caution.
|
||||
# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
|
||||
# (8B stack alignment).
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mpreferred-stack-boundary=4
|
||||
else
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -msse2
|
||||
endif
|
||||
endif
|
||||
|
||||
AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
|
||||
|
||||
AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02)
|
||||
|
@ -891,7 +891,9 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
|
||||
hpd_regs(4)
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn302_link_encoder_create(const struct encoder_init_data *enc_init_data)
|
||||
static struct link_encoder *dcn302_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
|
||||
|
||||
|
@ -819,7 +819,9 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
|
||||
hpd_regs(1)
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn303_link_encoder_create(const struct encoder_init_data *enc_init_data)
|
||||
static struct link_encoder *dcn303_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
|
||||
|
||||
|
@ -158,9 +158,11 @@ static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst)
|
||||
}
|
||||
}
|
||||
|
||||
void dccg31_set_dpstreamclk(struct dccg *dccg,
|
||||
enum streamclk_source src,
|
||||
int otg_inst)
|
||||
void dccg31_set_dpstreamclk(
|
||||
struct dccg *dccg,
|
||||
enum streamclk_source src,
|
||||
int otg_inst,
|
||||
int dp_hpo_inst)
|
||||
{
|
||||
if (src == REFCLK)
|
||||
dccg31_disable_dpstreamclk(dccg, otg_inst);
|
||||
|
@ -161,11 +161,6 @@ struct dccg *dccg31_create(
|
||||
|
||||
void dccg31_init(struct dccg *dccg);
|
||||
|
||||
void dccg31_set_dpstreamclk(
|
||||
struct dccg *dccg,
|
||||
enum streamclk_source src,
|
||||
int otg_inst);
|
||||
|
||||
void dccg31_enable_symclk32_se(
|
||||
struct dccg *dccg,
|
||||
int hpo_se_inst,
|
||||
@ -207,7 +202,8 @@ void dccg31_get_dccg_ref_freq(
|
||||
void dccg31_set_dpstreamclk(
|
||||
struct dccg *dccg,
|
||||
enum streamclk_source src,
|
||||
int otg_inst);
|
||||
int otg_inst,
|
||||
int dp_hpo_inst);
|
||||
|
||||
void dccg31_set_dtbclk_dto(
|
||||
struct dccg *dccg,
|
||||
|
@ -141,7 +141,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
|
||||
bool optc31_immediate_disable_crtc(struct timing_generator *optc)
|
||||
{
|
||||
struct optc *optc1 = DCN10TG_FROM_TG(optc);
|
||||
|
||||
|
@ -258,6 +258,8 @@
|
||||
|
||||
void dcn31_timing_generator_init(struct optc *optc1);
|
||||
|
||||
bool optc31_immediate_disable_crtc(struct timing_generator *optc);
|
||||
|
||||
void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
|
||||
|
||||
void optc3_init_odm(struct timing_generator *optc);
|
||||
|
@ -1093,6 +1093,7 @@ static const struct encoder_feature_support link_enc_feature = {
|
||||
};
|
||||
|
||||
static struct link_encoder *dcn31_link_encoder_create(
|
||||
struct dc_context *ctx,
|
||||
const struct encoder_init_data *enc_init_data)
|
||||
{
|
||||
struct dcn20_link_encoder *enc20 =
|
||||
@ -1663,11 +1664,12 @@ int dcn31_populate_dml_pipes_from_context(
|
||||
pipes[pipe_cnt].pipe.src.immediate_flip = true;
|
||||
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
|
||||
pipes[pipe_cnt].pipe.src.gpuvm = true;
|
||||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
|
||||
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
|
||||
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
|
||||
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
|
||||
pipes[pipe_cnt].dout.dsc_input_bpc = 0;
|
||||
DC_FP_START();
|
||||
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
|
||||
DC_FP_END();
|
||||
|
||||
if (dc->debug.dml_hostvm_override == DML_HOSTVM_NO_OVERRIDE)
|
||||
pipes[pipe_cnt].pipe.src.hostvm = dc->res_pool->hubbub->riommu_active;
|
||||
@ -1716,15 +1718,6 @@ int dcn31_populate_dml_pipes_from_context(
|
||||
return pipe_cnt;
|
||||
}
|
||||
|
||||
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
|
||||
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_enter_plus_exit_time_us;
|
||||
context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].sr_exit_time_us;
|
||||
}
|
||||
}
|
||||
|
||||
void dcn31_calculate_wm_and_dlg(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
@ -1871,8 +1864,6 @@ static bool dcn31_resource_construct(
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
struct irq_service_init_data init_data;
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
ctx->dc_bios->regs = &bios_regs;
|
||||
|
||||
pool->base.res_cap = &res_cap_dcn31;
|
||||
@ -2183,13 +2174,9 @@ static bool dcn31_resource_construct(
|
||||
|
||||
dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
|
||||
|
||||
DC_FP_END();
|
||||
|
||||
return true;
|
||||
|
||||
create_fail:
|
||||
|
||||
DC_FP_END();
|
||||
dcn31_resource_destruct(pool);
|
||||
|
||||
return false;
|
||||
|
@ -59,7 +59,6 @@ dcn31_set_mcif_arb_params(struct dc *dc,
|
||||
struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt);
|
||||
void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
|
||||
|
||||
struct resource_pool *dcn31_create_resource_pool(
|
||||
const struct dc_init_data *init_data,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user