amd-drm-fixes-6.13-2024-12-04:

amdgpu:
 - Jpeg work handler fix for VCN 1.0
 - HDP flush fixes
 - ACPI EDID sanity check
 - OLED panel backlight fix
 - DC YCbCr fix
 - DC Detile buffer size debugging
 - DC prefetch calculation fix
 - DC VTotal handling fix
 - DC HBlank fix
 - ISP fix
 - SR-IOV fix
 - Workload profile fixes
 - DCN 4.0.1 resume fix
 
 amdkfd:
 - GC 12.x fix
 - GC 9.4.x fix
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZ1CeDAAKCRC93/aFa7yZ
 2LcWAP4l5gL+abfBIJ9W3BFbFZ4JXXqqWqR1cddsPG+qP2weEwEA+ksJEqcHhEn8
 HdDyxkOVzX22J5AoBQjK5enZXTeXHA4=
 =GJWO
 -----END PGP SIGNATURE-----

Merge tag 'amd-drm-fixes-6.13-2024-12-04' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.13-2024-12-04:

amdgpu:
- Jpeg work handler fix for VCN 1.0
- HDP flush fixes
- ACPI EDID sanity check
- OLED panel backlight fix
- DC YCbCr fix
- DC Detile buffer size debugging
- DC prefetch calculation fix
- DC VTotal handling fix
- DC HBlank fix
- ISP fix
- SR-IOV fix
- Workload profile fixes
- DCN 4.0.1 resume fix

amdkfd:
- GC 12.x fix
- GC 9.4.x fix

Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241206190452.2571042-1-alexander.deucher@amd.com
This commit is contained in:
Simona Vetter 2024-12-06 21:54:04 +01:00
commit 1995e7d050
49 changed files with 1062 additions and 646 deletions

View File

@ -145,7 +145,7 @@ const char *amdgpu_asic_name[] = {
"LAST",
};
#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM - 1, 0)
#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMDGPU_MAX_IP_NUM, 0)
/*
* Default init level where all blocks are expected to be initialized. This is
* the level of initialization expected by default and also after a full reset
@ -3670,9 +3670,11 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
if (r) {
dev_err(adev->dev, "RE-INIT-early: %s failed\n",
block->version->funcs->name);
return r;
}
block->status.hw = true;
}
}
@ -3682,7 +3684,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
{
int i, r;
struct amdgpu_ip_block *block;
int i, r = 0;
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
@ -3697,34 +3700,28 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
};
for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
int j;
struct amdgpu_ip_block *block;
block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
for (j = 0; j < adev->num_ip_blocks; j++) {
block = &adev->ip_blocks[j];
if (block->version->type != ip_order[i] ||
!block->status.valid ||
block->status.hw)
continue;
if (!block)
continue;
if (block->status.valid && !block->status.hw) {
if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
if (r)
return r;
r = amdgpu_ip_block_resume(block);
} else {
r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
DRM_ERROR("hw_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
return r;
}
block->status.hw = true;
r = block->version->funcs->hw_init(block);
}
if (r) {
dev_err(adev->dev, "RE-INIT-late: %s failed\n",
block->version->funcs->name);
break;
}
block->status.hw = true;
}
}
return 0;
return r;
}
/**
@ -3765,7 +3762,7 @@ static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
*
* @adev: amdgpu_device pointer
*
* First resume function for hardware IPs. The list of all the hardware
* Second resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all blocks except COMMON, GMC, and IH. resume puts the hardware into a
* functional state after a suspend and updates the software state as
@ -3783,6 +3780,7 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
@ -3793,6 +3791,36 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
return 0;
}
/**
* amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
*
* @adev: amdgpu_device pointer
*
* Third resume function for hardware IPs. The list of all the hardware
* IPs that make up the asic is walked and the resume callbacks are run for
* all DCE. resume puts the hardware into a functional state after a suspend
* and updates the software state as necessary. This function is also used
* for restoring the GPU after a GPU reset.
*
* Returns 0 on success, negative error code on failure.
*/
static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
if (r)
return r;
}
}
return 0;
}
/**
* amdgpu_device_ip_resume - run resume for hardware IPs
*
@ -3822,6 +3850,13 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
if (r)
return r;
amdgpu_fence_driver_hw_init(adev);
r = amdgpu_device_ip_resume_phase3(adev);
return r;
}
@ -4902,7 +4937,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
goto exit;
}
amdgpu_fence_driver_hw_init(adev);
if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
@ -5487,6 +5521,10 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
r = amdgpu_device_ip_resume_phase3(tmp_adev);
if (r)
goto out;
if (vram_lost)
amdgpu_device_fill_reset_magic(tmp_adev);

View File

@ -40,10 +40,12 @@
static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
}
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 5))
return;
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
}
static void hdp_v4_0_query_ras_error_count(struct amdgpu_device *adev,

View File

@ -31,10 +31,12 @@
static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
}
static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
@ -42,6 +44,7 @@ static void hdp_v5_0_invalidate_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
RREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE);
} else {
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);

View File

@ -31,13 +31,15 @@
static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
else
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
}
}
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,

View File

@ -34,10 +34,12 @@
static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
}
static void hdp_v6_0_update_clock_gating(struct amdgpu_device *adev,

View File

@ -31,10 +31,12 @@
static void hdp_v7_0_flush_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
else
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}
}
static void hdp_v7_0_update_clock_gating(struct amdgpu_device *adev,

View File

@ -604,7 +604,7 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
bool set_clocks = !cancel_delayed_work_sync(&adev->jpeg.idle_work);
bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
int cnt = 0;
mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);

View File

@ -1510,6 +1510,8 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_tcp_size_per_cu) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
pcache_info[i].cache_level = 1;
/* Cacheline size not available in IP discovery for gc943,gc944 */
pcache_info[i].cache_line_size = 128;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@ -1521,6 +1523,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
pcache_info[i].cache_size =
adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_INST_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@ -1531,6 +1534,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
pcache_info[i].cache_level = 1;
pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@ -1541,6 +1545,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gfx.config.gc_tcc_size) {
pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
pcache_info[i].cache_level = 2;
pcache_info[i].cache_line_size = 128;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);
@ -1551,6 +1556,7 @@ static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
if (adev->gmc.mall_size) {
pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
pcache_info[i].cache_level = 3;
pcache_info[i].cache_line_size = 64;
pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
CRAT_CACHE_FLAGS_DATA_CACHE |
CRAT_CACHE_FLAGS_SIMD_CACHE);

View File

@ -235,6 +235,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
*/
kfd->device_info.needs_pci_atomics = true;
kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
} else if (gc_version < IP_VERSION(13, 0, 0)) {
kfd->device_info.needs_pci_atomics = true;
kfd->device_info.no_atomic_fw_version = 2090;
} else {
kfd->device_info.needs_pci_atomics = true;
}

View File

@ -3481,6 +3481,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->aux_support = false;
else if (amdgpu_backlight == 1)
caps->aux_support = true;
if (caps->aux_support)
aconnector->dc_link->backlight_control_type = BACKLIGHT_CONTROL_AMD_AUX;
luminance_range = &conn_base->display_info.luminance_range;

View File

@ -907,14 +907,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
struct drm_connector *connector = data;
struct acpi_device *acpidev = ACPI_COMPANION(connector->dev->dev);
unsigned char start = block * EDID_LENGTH;
void *edid;
struct edid *edid;
int r;
if (!acpidev)
return -ENODEV;
/* fetch the entire edid from BIOS */
r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, &edid);
r = acpi_video_get_edid(acpidev, ACPI_VIDEO_DISPLAY_LCD, -1, (void *)&edid);
if (r < 0) {
drm_dbg(connector->dev, "Failed to get EDID from ACPI: %d\n", r);
return r;
@ -924,7 +924,14 @@ dm_helpers_probe_acpi_edid(void *data, u8 *buf, unsigned int block, size_t len)
goto cleanup;
}
memcpy(buf, edid + start, len);
/* sanity check */
if (edid->revision < 4 || !(edid->input & DRM_EDID_INPUT_DIGITAL) ||
(edid->input & DRM_EDID_DIGITAL_TYPE_MASK) == DRM_EDID_DIGITAL_TYPE_UNDEF) {
r = -EINVAL;
goto cleanup;
}
memcpy(buf, (void *)edid + start, len);
r = 0;
cleanup:

View File

@ -6109,3 +6109,21 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state
profile.power_level = dc->res_pool->funcs->get_power_profile(context);
return profile;
}
/*
**********************************************************************************
* dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state
*
* Called when DM wants to log detile buffer size from dc_state
*
**********************************************************************************
*/
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context)
{
struct dc *dc = context->clk_mgr->ctx->dc;
if (dc->res_pool->funcs->get_det_buffer_size)
return dc->res_pool->funcs->get_det_buffer_size(context);
else
return 0;
}

View File

@ -2094,7 +2094,8 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
count = resource_get_odm_slice_count(otg_master);
h_active = timing->h_addressable +
timing->h_border_left +
timing->h_border_right;
timing->h_border_right +
otg_master->hblank_borrow;
width = h_active / count;
if (otg_master->stream_res.tg)
@ -4026,6 +4027,41 @@ enum dc_status dc_validate_with_context(struct dc *dc,
return res;
}
/**
* decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
* @pipe_ctx: Pointer to the pipe context structure.
*
* This function calculates the horizontal blanking borrow value for a given pipe context based on the
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
* than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
* total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
* value to 0.
*/
static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
{
uint32_t hactive;
uint32_t ceil_slice_width;
struct dc_stream_state *stream = NULL;
if (!pipe_ctx)
return;
stream = pipe_ctx->stream;
if (stream->timing.flags.DSC) {
hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
/* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
pipe_ctx->hblank_borrow = 0;
}
}
}
/**
* dc_validate_global_state() - Determine if hardware can support a given state
*
@ -4064,6 +4100,10 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
if (dc->debug.enable_hblank_borrow)
decide_hblank_borrow(pipe_ctx);
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {

View File

@ -290,6 +290,7 @@ struct dc_caps {
uint16_t subvp_vertical_int_margin_us;
bool seamless_odm;
uint32_t max_v_total;
bool vtotal_limited_by_fp2;
uint32_t max_disp_clock_khz_at_vmin;
uint8_t subvp_drr_vblank_start_margin_us;
bool cursor_not_scaled;
@ -1068,6 +1069,7 @@ struct dc_debug_options {
unsigned int scale_to_sharpness_policy;
bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
};
@ -2550,6 +2552,8 @@ struct dc_power_profile {
struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context);
unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
/* DSC Interfaces */
#include "dc_dsc.h"

View File

@ -120,7 +120,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;

View File

@ -1222,6 +1222,7 @@ static dml_bool_t CalculatePrefetchSchedule(struct display_mode_lib_scratch_st *
s->dst_y_prefetch_oto = s->Tvm_oto_lines + 2 * s->Tr0_oto_lines + s->Lsw_oto;
s->dst_y_prefetch_equ = p->VStartup - (*p->TSetup + dml_max(p->TWait + p->TCalc, *p->Tdmdl)) / s->LineTime - (*p->DSTYAfterScaler + (dml_float_t) *p->DSTXAfterScaler / (dml_float_t)p->myPipe->HTotal);
s->dst_y_prefetch_equ = dml_min(s->dst_y_prefetch_equ, 63.75); // limit to the reg limit of U6.2 for DST_Y_PREFETCH
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: HTotal = %u\n", __func__, p->myPipe->HTotal);

View File

@ -339,11 +339,22 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in
// }
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
{
unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
max_hw_v_total -= stream->timing.v_front_porch + 1;
}
return max_hw_v_total;
}
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
struct dml2_context *dml_ctx)
{
unsigned int hblank_start, vblank_start;
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@ -371,11 +382,23 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
- stream->timing.v_border_top - stream->timing.v_border_bottom;
timing->drr_config.enabled = stream->ignore_msa_timing_param;
timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
timing->drr_config.drr_active_variable = stream->vrr_active_variable;
timing->drr_config.drr_active_fixed = stream->vrr_active_fixed;
timing->drr_config.disallowed = !stream->allow_freesync;
/* limit min refresh rate to DC cap */
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
if (stream->ctx->dc->caps.max_v_total != 0) {
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
}
if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
} else {
timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
}
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
timing->drr_config.max_instant_vtotal_delta = dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase(stream, false);
@ -422,6 +445,21 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
/**
* adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
* based on the pipe context.
* @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
* @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
*
* This function modifies the horizontal active and blank end timings by adding and subtracting
* the horizontal blanking borrow value from the pipe context, respectively.
*/
static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
{
timing->h_active += pipe->hblank_borrow;
timing->h_blank_end -= pipe->hblank_borrow;
}
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@ -709,6 +747,7 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
temp_pipe->hblank_borrow = pipe->hblank_borrow;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@ -973,6 +1012,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index]);
@ -1111,12 +1151,12 @@ void dml21_populate_pipe_ctx_dlg_params(struct dml2_context *dml_ctx, struct dc_
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
union dml2_global_sync_programming *global_sync = &stream_programming->global_sync;
hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right;
hactive = timing->h_addressable + timing->h_border_left + timing->h_border_right + pipe_ctx->hblank_borrow;
vactive = timing->v_addressable + timing->v_border_bottom + timing->v_border_top;
hblank_start = pipe_ctx->stream->timing.h_total - pipe_ctx->stream->timing.h_front_porch;
vblank_start = pipe_ctx->stream->timing.v_total - pipe_ctx->stream->timing.v_front_porch;
hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right;
hblank_end = hblank_start - timing->h_addressable - timing->h_border_left - timing->h_border_right - pipe_ctx->hblank_borrow;
vblank_end = vblank_start - timing->v_addressable - timing->v_border_top - timing->v_border_bottom;
if (dml_ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {

View File

@ -1049,7 +1049,8 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;

View File

@ -820,6 +820,7 @@ enum dc_status dcn401_enable_stream_timing(
int opp_cnt = 1;
int opp_inst[MAX_PIPES] = {0};
struct pipe_ctx *opp_heads[MAX_PIPES] = {0};
struct dc_crtc_timing patched_crtc_timing = stream->timing;
bool manual_mode;
unsigned int tmds_div = PIXEL_RATE_DIV_NA;
unsigned int unused_div = PIXEL_RATE_DIV_NA;
@ -874,9 +875,13 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
/* if we are borrowing from hblank, h_addressable needs to be adjusted */
if (dc->debug.enable_hblank_borrow)
patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
&patched_crtc_timing,
pipe_ctx->pipe_dlg_param.vready_offset,
pipe_ctx->pipe_dlg_param.vstartup_start,
pipe_ctx->pipe_dlg_param.vupdate_offset,

View File

@ -219,6 +219,7 @@ struct resource_funcs {
* Get indicator of power from a context that went through full validation
*/
int (*get_power_profile)(const struct dc_state *context);
unsigned int (*get_det_buffer_size)(const struct dc_state *context);
};
struct audio_support{
@ -477,6 +478,8 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
/* pixels borrowed from hblank to hactive */
uint8_t hblank_borrow;
};
/* Data used for dynamic link encoder assignment.

View File

@ -808,7 +808,8 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;

View File

@ -1510,6 +1510,7 @@ bool dcn20_split_stream_for_odm(
if (prev_odm_pipe->plane_state) {
struct scaler_data *sd = &prev_odm_pipe->plane_res.scl_data;
struct output_pixel_processor *opp = next_odm_pipe->stream_res.opp;
int new_width;
/* HACTIVE halved for odm combine */
@ -1543,7 +1544,28 @@ bool dcn20_split_stream_for_odm(
sd->viewport_c.x += dc_fixpt_floor(dc_fixpt_mul_int(
sd->ratios.horz_c, sd->h_active - sd->recout.x));
sd->recout.x = 0;
/*
* When odm is used in YcbCr422 or 420 colour space, a split screen
* will be seen with the previous calculations since the extra left
* edge pixel is accounted for in fmt but not in viewport.
*
* Below are calculations which fix the split by fixing the calculations
* if there is an extra left edge pixel.
*/
if (opp && opp->funcs->opp_get_left_edge_extra_pixel_count
&& opp->funcs->opp_get_left_edge_extra_pixel_count(
opp, next_odm_pipe->stream->timing.pixel_encoding,
resource_is_pipe_type(next_odm_pipe, OTG_MASTER)) == 1) {
sd->h_active += 1;
sd->recout.width += 1;
sd->viewport.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
sd->viewport_c.x -= dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
sd->viewport_c.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
sd->viewport.width += dc_fixpt_ceil(dc_fixpt_mul_int(sd->ratios.horz, 1));
}
}
if (!next_odm_pipe->top_pipe)
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
@ -2132,6 +2154,7 @@ bool dcn20_fast_validate_bw(
ASSERT(0);
}
}
/* Actual dsc count per stream dsc validation*/
if (!dcn20_validate_dsc(dc, context)) {
context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] =

View File

@ -2353,6 +2353,7 @@ static bool dcn30_resource_construct(
dc->caps.dp_hdmi21_pcon_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* read VBIOS LTTPR caps */
{

View File

@ -1233,6 +1233,7 @@ static bool dcn302_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;

View File

@ -1178,6 +1178,7 @@ static bool dcn303_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;

View File

@ -1720,6 +1720,12 @@ int dcn31_populate_dml_pipes_from_context(
return pipe_cnt;
}
unsigned int dcn31_get_det_buffer_size(
const struct dc_state *context)
{
return context->bw_ctx.dml.ip.det_buffer_size_kbytes;
}
void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@ -1842,6 +1848,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.update_bw_bounding_box = dcn31_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn31_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
};
static struct clock_source *dcn30_clock_source_create(

View File

@ -63,6 +63,9 @@ struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
unsigned int dcn31_get_det_buffer_size(
const struct dc_state *context);
/*temp: B0 specific before switch to dcn313 headers*/
#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e

View File

@ -1777,6 +1777,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
};
static struct clock_source *dcn30_clock_source_create(

View File

@ -1845,6 +1845,7 @@ static struct resource_funcs dcn315_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn315_get_panel_config_defaults,
.get_power_profile = dcn315_get_power_profile,
.get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn315_resource_construct(

View File

@ -1719,6 +1719,7 @@ static struct resource_funcs dcn316_res_pool_funcs = {
.update_bw_bounding_box = dcn316_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn316_get_panel_config_defaults,
.get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn316_resource_construct(

View File

@ -2189,6 +2189,7 @@ static bool dcn32_resource_construct(
dc->caps.dmcub_support = true;
dc->caps.seamless_odm = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;
@ -2803,6 +2804,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
free_pipe->hblank_borrow = otg_master->hblank_borrow;
if (free_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
&new_ctx->res_ctx,

View File

@ -1742,6 +1742,7 @@ static bool dcn321_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;

View File

@ -1778,6 +1778,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn35_resource_construct(
@ -1849,6 +1850,7 @@ static bool dcn35_resource_construct(
dc->caps.zstate_support = true;
dc->caps.ips_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;

View File

@ -1757,6 +1757,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn35_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia,
.get_det_buffer_size = dcn31_get_det_buffer_size,
};
static bool dcn351_resource_construct(
@ -1828,6 +1829,7 @@ static bool dcn351_resource_construct(
dc->caps.zstate_support = true;
dc->caps.ips_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
/* Color pipeline capabilities */
dc->caps.color.dpp.dcn_arch = 1;

View File

@ -1864,6 +1864,7 @@ static bool dcn401_resource_construct(
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.max_v_total = (1 << 15) - 1;
dc->caps.vtotal_limited_by_fp2 = true;
if (ASICREV_IS_GC_12_0_1_A0(dc->ctx->asic_id.hw_internal_rev))
dc->caps.dcc_plane_width_limit = 7680;

View File

@ -122,6 +122,17 @@ static unsigned int calc_duration_in_us_from_v_total(
return duration_in_us;
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
{
unsigned int max_hw_v_total = stream->ctx->dc->caps.max_v_total;
if (stream->ctx->dc->caps.vtotal_limited_by_fp2) {
max_hw_v_total -= stream->timing.v_front_porch + 1;
}
return max_hw_v_total;
}
unsigned int mod_freesync_calc_v_total_from_refresh(
const struct dc_stream_state *stream,
unsigned int refresh_in_uhz)
@ -1016,7 +1027,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
if (stream->ctx->dc->caps.max_v_total != 0 && stream->timing.h_total != 0) {
min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
(stream->timing.h_total * (long long)stream->ctx->dc->caps.max_v_total));
(stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
}
/* Limit minimum refresh rate to what can be supported by hardware */
min_refresh_in_uhz = min_hardware_refresh_in_uhz > in_config->min_refresh_in_uhz ?

View File

@ -1361,7 +1361,11 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
* create a custom set of heuristics, write a string of numbers to the file
* starting with the number of the custom profile along with a setting
* for each heuristic parameter. Due to differences across asic families
* the heuristic parameters vary from family to family.
* the heuristic parameters vary from family to family. Additionally,
* you can apply the custom heuristics to different clock domains. Each
* clock domain is considered a distinct operation so if you modify the
* gfxclk heuristics and then the memclk heuristics, the all of the
* custom heuristics will be retained until you switch to another profile.
*
*/

View File

@ -72,6 +72,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@ -1259,42 +1263,19 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
atomic64_set(&smu->throttle_int_counter, 0);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->user_dpm_profile.user_workload_mask = 0;
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
if (smu->is_apu ||
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) {
smu->driver_workload_mask =
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
} else {
smu->driver_workload_mask =
1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
}
!smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D))
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
else
smu->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu_power_profile_mode_get(smu, smu->power_profile_mode);
smu->workload_mask = smu->driver_workload_mask |
smu->user_dpm_profile.user_workload_mask;
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
smu->display_config = &adev->pm.pm_display_cfg;
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
@ -1347,6 +1328,11 @@ static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
return ret;
}
if (smu->custom_profile_params) {
kfree(smu->custom_profile_params);
smu->custom_profile_params = NULL;
}
smu_fini_microcode(smu);
return 0;
@ -2131,6 +2117,9 @@ static int smu_suspend(struct amdgpu_ip_block *ip_block)
if (!ret)
adev->gfx.gfx_off_entrycount = count;
/* clear this on suspend so it will get reprogrammed on resume */
smu->workload_mask = 0;
return 0;
}
@ -2243,25 +2232,49 @@ static int smu_enable_umd_pstate(void *handle,
}
static int smu_bump_power_profile_mode(struct smu_context *smu,
long *param,
uint32_t param_size)
long *custom_params,
u32 custom_params_max_idx)
{
int ret = 0;
u32 workload_mask = 0;
int i, ret = 0;
for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
if (smu->workload_refcount[i])
workload_mask |= 1 << i;
}
if (smu->workload_mask == workload_mask)
return 0;
if (smu->ppt_funcs->set_power_profile_mode)
ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
custom_params,
custom_params_max_idx);
if (!ret)
smu->workload_mask = workload_mask;
return ret;
}
static void smu_power_profile_mode_get(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode)
{
smu->workload_refcount[profile_mode]++;
}
static void smu_power_profile_mode_put(struct smu_context *smu,
enum PP_SMC_POWER_PROFILE profile_mode)
{
if (smu->workload_refcount[profile_mode])
smu->workload_refcount[profile_mode]--;
}
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings,
bool init)
bool skip_display_settings)
{
int ret = 0;
int index = 0;
long workload[1];
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!skip_display_settings) {
@ -2298,14 +2311,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
if (init || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, NULL, 0);
return ret;
}
@ -2324,13 +2331,13 @@ static int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false, false);
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
ret = smu_adjust_power_state_dynamic(smu, level, true, true);
ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
case AMD_PP_TASK_READJUST_POWER_STATE:
ret = smu_adjust_power_state_dynamic(smu, level, true, false);
ret = smu_adjust_power_state_dynamic(smu, level, true);
break;
default:
break;
@ -2352,12 +2359,11 @@ static int smu_handle_dpm_task(void *handle,
static int smu_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type,
bool en)
bool enable)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
long workload[1];
uint32_t index;
int ret;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@ -2365,24 +2371,21 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
if (!en) {
smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]);
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
} else {
smu->driver_workload_mask |= (1 << smu->workload_priority[type]);
index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
}
smu->workload_mask = smu->driver_workload_mask |
smu->user_dpm_profile.user_workload_mask;
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0);
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
if (enable)
smu_power_profile_mode_get(smu, type);
else
smu_power_profile_mode_put(smu, type);
ret = smu_bump_power_profile_mode(smu, NULL, 0);
if (ret) {
if (enable)
smu_power_profile_mode_put(smu, type);
else
smu_power_profile_mode_get(smu, type);
return ret;
}
}
return 0;
}
@ -3074,21 +3077,33 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
int ret;
bool custom = false;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
if (smu->user_dpm_profile.user_workload_mask &
(1 << smu->workload_priority[param[param_size]]))
return 0;
if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
custom = true;
/* clear frontend mask so custom changes propogate */
smu->workload_mask = 0;
}
smu->user_dpm_profile.user_workload_mask =
(1 << smu->workload_priority[param[param_size]]);
smu->workload_mask = smu->user_dpm_profile.user_workload_mask |
smu->driver_workload_mask;
ret = smu_bump_power_profile_mode(smu, param, param_size);
if ((param[param_size] != smu->power_profile_mode) || custom) {
/* clear the old user preference */
smu_power_profile_mode_put(smu, smu->power_profile_mode);
/* set the new user preference */
smu_power_profile_mode_get(smu, param[param_size]);
ret = smu_bump_power_profile_mode(smu,
custom ? param : NULL,
custom ? param_size : 0);
if (ret)
smu_power_profile_mode_put(smu, param[param_size]);
else
/* store the user's preference */
smu->power_profile_mode = param[param_size];
}
return ret;
}

View File

@ -240,7 +240,6 @@ struct smu_user_dpm_profile {
/* user clock state information */
uint32_t clk_mask[SMU_CLK_COUNT];
uint32_t clk_dependency;
uint32_t user_workload_mask;
};
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
@ -557,12 +556,13 @@ struct smu_context {
uint32_t hard_min_uclk_req_from_dal;
bool disable_uclk_switch;
/* asic agnostic workload mask */
uint32_t workload_mask;
uint32_t driver_workload_mask;
uint32_t workload_priority[WORKLOAD_POLICY_MAX];
uint32_t workload_setting[WORKLOAD_POLICY_MAX];
/* default/user workload preference */
uint32_t power_profile_mode;
uint32_t default_power_profile_mode;
uint32_t workload_refcount[PP_SMC_POWER_PROFILE_COUNT];
/* backend specific custom workload settings */
long *custom_profile_params;
bool pm_enabled;
bool is_apu;
@ -733,9 +733,12 @@ struct pptable_funcs {
* @set_power_profile_mode: Set a power profile mode. Also used to
* create/set custom power profile modes.
* &input: Power profile mode parameters.
* &size: Size of &input.
* &workload_mask: mask of workloads to enable
* &custom_params: custom profile parameters
* &custom_params_max_idx: max valid idx into custom_params
*/
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
int (*set_power_profile_mode)(struct smu_context *smu, u32 workload_mask,
long *custom_params, u32 custom_params_max_idx);
/**
* @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power

View File

@ -1445,97 +1445,120 @@ static int arcturus_get_power_profile_mode(struct smu_context *smu,
return size;
}
static int arcturus_set_power_profile_mode(struct smu_context *smu,
long *input,
uint32_t size)
#define ARCTURUS_CUSTOM_PARAMS_COUNT 10
#define ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT 2
#define ARCTURUS_CUSTOM_PARAMS_SIZE (ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT * ARCTURUS_CUSTOM_PARAMS_COUNT * sizeof(long))
static int arcturus_set_power_profile_mode_coeff(struct smu_context *smu,
long *input)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type = 0;
uint32_t profile_mode = input[size];
int ret = 0;
int ret, idx;
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) &&
(smu->smc_fw_version >= 0x360d00)) {
if (size != 10)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
activity_monitor.Gfx_UseRlcBusy = input[2];
activity_monitor.Gfx_MinActiveFreqType = input[3];
activity_monitor.Gfx_MinActiveFreq = input[4];
activity_monitor.Gfx_BoosterFreqType = input[5];
activity_monitor.Gfx_BoosterFreq = input[6];
activity_monitor.Gfx_PD_Data_limit_c = input[7];
activity_monitor.Gfx_PD_Data_error_coeff = input[8];
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
break;
case 1: /* Uclk */
activity_monitor.Mem_FPS = input[1];
activity_monitor.Mem_UseRlcBusy = input[2];
activity_monitor.Mem_MinActiveFreqType = input[3];
activity_monitor.Mem_MinActiveFreq = input[4];
activity_monitor.Mem_BoosterFreqType = input[5];
activity_monitor.Mem_BoosterFreq = input[6];
activity_monitor.Mem_PD_Data_limit_c = input[7];
activity_monitor.Mem_PD_Data_error_coeff = input[8];
activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
break;
default:
return -EINVAL;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
}
/*
* Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
* Not all profile modes are supported on arcturus.
*/
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0) {
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on arcturus\n", profile_mode);
return -EINVAL;
}
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
smu->workload_mask,
NULL);
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor),
false);
if (ret) {
dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
smu_cmn_assign_power_profile(smu);
idx = 0 * ARCTURUS_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Gfxclk */
activity_monitor.Gfx_FPS = input[idx + 1];
activity_monitor.Gfx_UseRlcBusy = input[idx + 2];
activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
activity_monitor.Gfx_BoosterFreq = input[idx + 6];
activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
}
idx = 1 * ARCTURUS_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Uclk */
activity_monitor.Mem_FPS = input[idx + 1];
activity_monitor.Mem_UseRlcBusy = input[idx + 2];
activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
activity_monitor.Mem_MinActiveFreq = input[idx + 4];
activity_monitor.Mem_BoosterFreqType = input[idx + 5];
activity_monitor.Mem_BoosterFreq = input[idx + 6];
activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
}
return 0;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
return ret;
}
static int arcturus_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0;
int ret, idx = -1, i;
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
if (smu->smc_fw_version < 0x360d00)
return -EINVAL;
if (!smu->custom_profile_params) {
smu->custom_profile_params =
kzalloc(ARCTURUS_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
if (!smu->custom_profile_params)
return -ENOMEM;
}
if (custom_params && custom_params_max_idx) {
if (custom_params_max_idx != ARCTURUS_CUSTOM_PARAMS_COUNT)
return -EINVAL;
if (custom_params[0] >= ARCTURUS_CUSTOM_PARAMS_CLOCK_COUNT)
return -EINVAL;
idx = custom_params[0] * ARCTURUS_CUSTOM_PARAMS_COUNT;
smu->custom_profile_params[idx] = 1;
for (i = 1; i < custom_params_max_idx; i++)
smu->custom_profile_params[idx + i] = custom_params[i];
}
ret = arcturus_set_power_profile_mode_coeff(smu,
smu->custom_profile_params);
if (ret) {
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
} else if (smu->custom_profile_params) {
memset(smu->custom_profile_params, 0, ARCTURUS_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
backend_workload_mask,
NULL);
if (ret) {
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
workload_mask);
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
return ret;
}
static int arcturus_set_performance_level(struct smu_context *smu,

View File

@ -2006,90 +2006,122 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
return size;
}
static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
#define NAVI10_CUSTOM_PARAMS_COUNT 10
#define NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT 3
#define NAVI10_CUSTOM_PARAMS_SIZE (NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT * NAVI10_CUSTOM_PARAMS_COUNT * sizeof(long))
static int navi10_set_power_profile_mode_coeff(struct smu_context *smu,
long *input)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type, ret = 0;
int ret, idx;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size != 10)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
activity_monitor.Gfx_MinFreqStep = input[2];
activity_monitor.Gfx_MinActiveFreqType = input[3];
activity_monitor.Gfx_MinActiveFreq = input[4];
activity_monitor.Gfx_BoosterFreqType = input[5];
activity_monitor.Gfx_BoosterFreq = input[6];
activity_monitor.Gfx_PD_Data_limit_c = input[7];
activity_monitor.Gfx_PD_Data_error_coeff = input[8];
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
break;
case 1: /* Socclk */
activity_monitor.Soc_FPS = input[1];
activity_monitor.Soc_MinFreqStep = input[2];
activity_monitor.Soc_MinActiveFreqType = input[3];
activity_monitor.Soc_MinActiveFreq = input[4];
activity_monitor.Soc_BoosterFreqType = input[5];
activity_monitor.Soc_BoosterFreq = input[6];
activity_monitor.Soc_PD_Data_limit_c = input[7];
activity_monitor.Soc_PD_Data_error_coeff = input[8];
activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
break;
case 2: /* Memclk */
activity_monitor.Mem_FPS = input[1];
activity_monitor.Mem_MinFreqStep = input[2];
activity_monitor.Mem_MinActiveFreqType = input[3];
activity_monitor.Mem_MinActiveFreq = input[4];
activity_monitor.Mem_BoosterFreqType = input[5];
activity_monitor.Mem_BoosterFreq = input[6];
activity_monitor.Mem_PD_Data_limit_c = input[7];
activity_monitor.Mem_PD_Data_error_coeff = input[8];
activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
break;
default:
return -EINVAL;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
idx = 0 * NAVI10_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Gfxclk */
activity_monitor.Gfx_FPS = input[idx + 1];
activity_monitor.Gfx_MinFreqStep = input[idx + 2];
activity_monitor.Gfx_MinActiveFreqType = input[idx + 3];
activity_monitor.Gfx_MinActiveFreq = input[idx + 4];
activity_monitor.Gfx_BoosterFreqType = input[idx + 5];
activity_monitor.Gfx_BoosterFreq = input[idx + 6];
activity_monitor.Gfx_PD_Data_limit_c = input[idx + 7];
activity_monitor.Gfx_PD_Data_error_coeff = input[idx + 8];
activity_monitor.Gfx_PD_Data_error_rate_coeff = input[idx + 9];
}
idx = 1 * NAVI10_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Socclk */
activity_monitor.Soc_FPS = input[idx + 1];
activity_monitor.Soc_MinFreqStep = input[idx + 2];
activity_monitor.Soc_MinActiveFreqType = input[idx + 3];
activity_monitor.Soc_MinActiveFreq = input[idx + 4];
activity_monitor.Soc_BoosterFreqType = input[idx + 5];
activity_monitor.Soc_BoosterFreq = input[idx + 6];
activity_monitor.Soc_PD_Data_limit_c = input[idx + 7];
activity_monitor.Soc_PD_Data_error_coeff = input[idx + 8];
activity_monitor.Soc_PD_Data_error_rate_coeff = input[idx + 9];
}
idx = 2 * NAVI10_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Memclk */
activity_monitor.Mem_FPS = input[idx + 1];
activity_monitor.Mem_MinFreqStep = input[idx + 2];
activity_monitor.Mem_MinActiveFreqType = input[idx + 3];
activity_monitor.Mem_MinActiveFreq = input[idx + 4];
activity_monitor.Mem_BoosterFreqType = input[idx + 5];
activity_monitor.Mem_BoosterFreq = input[idx + 6];
activity_monitor.Mem_PD_Data_limit_c = input[idx + 7];
activity_monitor.Mem_PD_Data_error_coeff = input[idx + 8];
activity_monitor.Mem_PD_Data_error_rate_coeff = input[idx + 9];
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
return ret;
}
static int navi10_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0;
int ret, idx = -1, i;
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
if (!smu->custom_profile_params) {
smu->custom_profile_params = kzalloc(NAVI10_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
if (!smu->custom_profile_params)
return -ENOMEM;
}
if (custom_params && custom_params_max_idx) {
if (custom_params_max_idx != NAVI10_CUSTOM_PARAMS_COUNT)
return -EINVAL;
if (custom_params[0] >= NAVI10_CUSTOM_PARAMS_CLOCKS_COUNT)
return -EINVAL;
idx = custom_params[0] * NAVI10_CUSTOM_PARAMS_COUNT;
smu->custom_profile_params[idx] = 1;
for (i = 1; i < custom_params_max_idx; i++)
smu->custom_profile_params[idx + i] = custom_params[i];
}
ret = navi10_set_power_profile_mode_coeff(smu,
smu->custom_profile_params);
if (ret) {
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
} else if (smu->custom_profile_params) {
memset(smu->custom_profile_params, 0, NAVI10_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
smu->workload_mask, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
smu_cmn_assign_power_profile(smu);
backend_workload_mask, NULL);
if (ret) {
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
workload_mask);
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
return ret;
}

View File

@ -1708,93 +1708,126 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
return size;
}
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
#define SIENNA_CICHLID_CUSTOM_PARAMS_COUNT 10
#define SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT 3
#define SIENNA_CICHLID_CUSTOM_PARAMS_SIZE (SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT * sizeof(long))
static int sienna_cichlid_set_power_profile_mode_coeff(struct smu_context *smu,
long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
int ret, idx;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size != 10)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor->Gfx_FPS = input[1];
activity_monitor->Gfx_MinFreqStep = input[2];
activity_monitor->Gfx_MinActiveFreqType = input[3];
activity_monitor->Gfx_MinActiveFreq = input[4];
activity_monitor->Gfx_BoosterFreqType = input[5];
activity_monitor->Gfx_BoosterFreq = input[6];
activity_monitor->Gfx_PD_Data_limit_c = input[7];
activity_monitor->Gfx_PD_Data_error_coeff = input[8];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
break;
case 1: /* Socclk */
activity_monitor->Fclk_FPS = input[1];
activity_monitor->Fclk_MinFreqStep = input[2];
activity_monitor->Fclk_MinActiveFreqType = input[3];
activity_monitor->Fclk_MinActiveFreq = input[4];
activity_monitor->Fclk_BoosterFreqType = input[5];
activity_monitor->Fclk_BoosterFreq = input[6];
activity_monitor->Fclk_PD_Data_limit_c = input[7];
activity_monitor->Fclk_PD_Data_error_coeff = input[8];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
break;
case 2: /* Memclk */
activity_monitor->Mem_FPS = input[1];
activity_monitor->Mem_MinFreqStep = input[2];
activity_monitor->Mem_MinActiveFreqType = input[3];
activity_monitor->Mem_MinActiveFreq = input[4];
activity_monitor->Mem_BoosterFreqType = input[5];
activity_monitor->Mem_BoosterFreq = input[6];
activity_monitor->Mem_PD_Data_limit_c = input[7];
activity_monitor->Mem_PD_Data_error_coeff = input[8];
activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
break;
default:
return -EINVAL;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
idx = 0 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Gfxclk */
activity_monitor->Gfx_FPS = input[idx + 1];
activity_monitor->Gfx_MinFreqStep = input[idx + 2];
activity_monitor->Gfx_MinActiveFreqType = input[idx + 3];
activity_monitor->Gfx_MinActiveFreq = input[idx + 4];
activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
activity_monitor->Gfx_BoosterFreq = input[idx + 6];
activity_monitor->Gfx_PD_Data_limit_c = input[idx + 7];
activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 8];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 9];
}
idx = 1 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Socclk */
activity_monitor->Fclk_FPS = input[idx + 1];
activity_monitor->Fclk_MinFreqStep = input[idx + 2];
activity_monitor->Fclk_MinActiveFreqType = input[idx + 3];
activity_monitor->Fclk_MinActiveFreq = input[idx + 4];
activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
activity_monitor->Fclk_BoosterFreq = input[idx + 6];
activity_monitor->Fclk_PD_Data_limit_c = input[idx + 7];
activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 8];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 9];
}
idx = 2 * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Memclk */
activity_monitor->Mem_FPS = input[idx + 1];
activity_monitor->Mem_MinFreqStep = input[idx + 2];
activity_monitor->Mem_MinActiveFreqType = input[idx + 3];
activity_monitor->Mem_MinActiveFreq = input[idx + 4];
activity_monitor->Mem_BoosterFreqType = input[idx + 5];
activity_monitor->Mem_BoosterFreq = input[idx + 6];
activity_monitor->Mem_PD_Data_limit_c = input[idx + 7];
activity_monitor->Mem_PD_Data_error_coeff = input[idx + 8];
activity_monitor->Mem_PD_Data_error_rate_coeff = input[idx + 9];
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
return ret;
}
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0;
int ret, idx = -1, i;
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
if (!smu->custom_profile_params) {
smu->custom_profile_params =
kzalloc(SIENNA_CICHLID_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
if (!smu->custom_profile_params)
return -ENOMEM;
}
if (custom_params && custom_params_max_idx) {
if (custom_params_max_idx != SIENNA_CICHLID_CUSTOM_PARAMS_COUNT)
return -EINVAL;
if (custom_params[0] >= SIENNA_CICHLID_CUSTOM_PARAMS_CLOCK_COUNT)
return -EINVAL;
idx = custom_params[0] * SIENNA_CICHLID_CUSTOM_PARAMS_COUNT;
smu->custom_profile_params[idx] = 1;
for (i = 1; i < custom_params_max_idx; i++)
smu->custom_profile_params[idx + i] = custom_params[i];
}
ret = sienna_cichlid_set_power_profile_mode_coeff(smu,
smu->custom_profile_params);
if (ret) {
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
} else if (smu->custom_profile_params) {
memset(smu->custom_profile_params, 0, SIENNA_CICHLID_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
smu->workload_mask, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
smu_cmn_assign_power_profile(smu);
backend_workload_mask, NULL);
if (ret) {
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
workload_mask);
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
return ret;
}

View File

@ -1056,42 +1056,27 @@ static int vangogh_get_power_profile_mode(struct smu_context *smu,
return size;
}
static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
static int vangogh_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
int workload_type, ret;
uint32_t profile_mode = input[size];
u32 backend_workload_mask = 0;
int ret;
if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
return 0;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0) {
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
profile_mode);
return -EINVAL;
}
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
smu->workload_mask,
NULL);
backend_workload_mask,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
workload_type);
dev_err_once(smu->adev->dev, "Fail to set workload mask 0x%08x\n",
workload_mask);
return ret;
}
smu_cmn_assign_power_profile(smu);
return 0;
return ret;
}
static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,

View File

@ -864,44 +864,27 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
}
static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
static int renoir_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
int workload_type, ret;
uint32_t profile_mode = input[size];
int ret;
u32 backend_workload_mask = 0;
if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
return -EINVAL;
}
if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
return 0;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0) {
/*
* TODO: If some case need switch to powersave/default power mode
* then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
*/
dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on RENOIR\n", profile_mode);
return -EINVAL;
}
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
smu->workload_mask,
NULL);
backend_workload_mask,
NULL);
if (ret) {
dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type);
dev_err_once(smu->adev->dev, "Failed to set workload mask 0x08%x\n",
workload_mask);
return ret;
}
smu_cmn_assign_power_profile(smu);
return 0;
return ret;
}
static int renoir_set_peak_clock_by_device(struct smu_context *smu)

View File

@ -2571,82 +2571,76 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
return size;
}
static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
long *input,
uint32_t size)
#define SMU_13_0_0_CUSTOM_PARAMS_COUNT 9
#define SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT 2
#define SMU_13_0_0_CUSTOM_PARAMS_SIZE (SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_0_CUSTOM_PARAMS_COUNT * sizeof(long))
static int smu_v13_0_0_set_power_profile_mode_coeff(struct smu_context *smu,
long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
u32 workload_mask;
int ret, idx;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size != 9)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor->Gfx_FPS = input[1];
activity_monitor->Gfx_MinActiveFreqType = input[2];
activity_monitor->Gfx_MinActiveFreq = input[3];
activity_monitor->Gfx_BoosterFreqType = input[4];
activity_monitor->Gfx_BoosterFreq = input[5];
activity_monitor->Gfx_PD_Data_limit_c = input[6];
activity_monitor->Gfx_PD_Data_error_coeff = input[7];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
break;
case 1: /* Fclk */
activity_monitor->Fclk_FPS = input[1];
activity_monitor->Fclk_MinActiveFreqType = input[2];
activity_monitor->Fclk_MinActiveFreq = input[3];
activity_monitor->Fclk_BoosterFreqType = input[4];
activity_monitor->Fclk_BoosterFreq = input[5];
activity_monitor->Fclk_PD_Data_limit_c = input[6];
activity_monitor->Fclk_PD_Data_error_coeff = input[7];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
break;
default:
return -EINVAL;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
idx = 0 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Gfxclk */
activity_monitor->Gfx_FPS = input[idx + 1];
activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
activity_monitor->Gfx_BoosterFreq = input[idx + 5];
activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
}
idx = 1 * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Fclk */
activity_monitor->Fclk_FPS = input[idx + 1];
activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
activity_monitor->Fclk_BoosterFreq = input[idx + 5];
activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
if (workload_type < 0)
return -EINVAL;
return ret;
}
workload_mask = 1 << workload_type;
static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0;
int workload_type, ret, idx = -1, i;
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
@ -2658,24 +2652,47 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_POWERSAVING);
if (workload_type >= 0)
workload_mask |= 1 << workload_type;
backend_workload_mask |= 1 << workload_type;
}
smu->workload_mask |= workload_mask;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
smu->workload_mask,
NULL);
if (!ret) {
smu_cmn_assign_power_profile(smu);
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) {
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
PP_SMC_POWER_PROFILE_FULLSCREEN3D);
smu->power_profile_mode = smu->workload_mask & (1 << workload_type)
? PP_SMC_POWER_PROFILE_FULLSCREEN3D
: PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
if (!smu->custom_profile_params) {
smu->custom_profile_params =
kzalloc(SMU_13_0_0_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
if (!smu->custom_profile_params)
return -ENOMEM;
}
if (custom_params && custom_params_max_idx) {
if (custom_params_max_idx != SMU_13_0_0_CUSTOM_PARAMS_COUNT)
return -EINVAL;
if (custom_params[0] >= SMU_13_0_0_CUSTOM_PARAMS_CLOCK_COUNT)
return -EINVAL;
idx = custom_params[0] * SMU_13_0_0_CUSTOM_PARAMS_COUNT;
smu->custom_profile_params[idx] = 1;
for (i = 1; i < custom_params_max_idx; i++)
smu->custom_profile_params[idx + i] = custom_params[i];
}
ret = smu_v13_0_0_set_power_profile_mode_coeff(smu,
smu->custom_profile_params);
if (ret) {
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
} else if (smu->custom_profile_params) {
memset(smu->custom_profile_params, 0, SMU_13_0_0_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
backend_workload_mask,
NULL);
if (ret) {
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
workload_mask);
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
return ret;

View File

@ -2530,79 +2530,110 @@ do { \
return result;
}
static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
#define SMU_13_0_7_CUSTOM_PARAMS_COUNT 8
#define SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT 2
#define SMU_13_0_7_CUSTOM_PARAMS_SIZE (SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT * SMU_13_0_7_CUSTOM_PARAMS_COUNT * sizeof(long))
static int smu_v13_0_7_set_power_profile_mode_coeff(struct smu_context *smu,
long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
int ret, idx;
smu->power_profile_mode = input[size];
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size != 8)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor->Gfx_ActiveHystLimit = input[1];
activity_monitor->Gfx_IdleHystLimit = input[2];
activity_monitor->Gfx_FPS = input[3];
activity_monitor->Gfx_MinActiveFreqType = input[4];
activity_monitor->Gfx_BoosterFreqType = input[5];
activity_monitor->Gfx_MinActiveFreq = input[6];
activity_monitor->Gfx_BoosterFreq = input[7];
break;
case 1: /* Fclk */
activity_monitor->Fclk_ActiveHystLimit = input[1];
activity_monitor->Fclk_IdleHystLimit = input[2];
activity_monitor->Fclk_FPS = input[3];
activity_monitor->Fclk_MinActiveFreqType = input[4];
activity_monitor->Fclk_BoosterFreqType = input[5];
activity_monitor->Fclk_MinActiveFreq = input[6];
activity_monitor->Fclk_BoosterFreq = input[7];
break;
default:
return -EINVAL;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
idx = 0 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Gfxclk */
activity_monitor->Gfx_ActiveHystLimit = input[idx + 1];
activity_monitor->Gfx_IdleHystLimit = input[idx + 2];
activity_monitor->Gfx_FPS = input[idx + 3];
activity_monitor->Gfx_MinActiveFreqType = input[idx + 4];
activity_monitor->Gfx_BoosterFreqType = input[idx + 5];
activity_monitor->Gfx_MinActiveFreq = input[idx + 6];
activity_monitor->Gfx_BoosterFreq = input[idx + 7];
}
idx = 1 * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Fclk */
activity_monitor->Fclk_ActiveHystLimit = input[idx + 1];
activity_monitor->Fclk_IdleHystLimit = input[idx + 2];
activity_monitor->Fclk_FPS = input[idx + 3];
activity_monitor->Fclk_MinActiveFreqType = input[idx + 4];
activity_monitor->Fclk_BoosterFreqType = input[idx + 5];
activity_monitor->Fclk_MinActiveFreq = input[idx + 6];
activity_monitor->Fclk_BoosterFreq = input[idx + 7];
}
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
return ret;
}
static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0;
int ret, idx = -1, i;
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
if (!smu->custom_profile_params) {
smu->custom_profile_params =
kzalloc(SMU_13_0_7_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
if (!smu->custom_profile_params)
return -ENOMEM;
}
if (custom_params && custom_params_max_idx) {
if (custom_params_max_idx != SMU_13_0_7_CUSTOM_PARAMS_COUNT)
return -EINVAL;
if (custom_params[0] >= SMU_13_0_7_CUSTOM_PARAMS_CLOCK_COUNT)
return -EINVAL;
idx = custom_params[0] * SMU_13_0_7_CUSTOM_PARAMS_COUNT;
smu->custom_profile_params[idx] = 1;
for (i = 1; i < custom_params_max_idx; i++)
smu->custom_profile_params[idx + i] = custom_params[i];
}
ret = smu_v13_0_7_set_power_profile_mode_coeff(smu,
smu->custom_profile_params);
if (ret) {
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
} else if (smu->custom_profile_params) {
memset(smu->custom_profile_params, 0, SMU_13_0_7_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
smu->workload_mask, NULL);
backend_workload_mask, NULL);
if (ret)
dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__);
else
smu_cmn_assign_power_profile(smu);
if (ret) {
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
workload_mask);
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
return ret;
}

View File

@ -1739,89 +1739,120 @@ static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
return size;
}
static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
long *input,
uint32_t size)
#define SMU_14_0_2_CUSTOM_PARAMS_COUNT 9
#define SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT 2
#define SMU_14_0_2_CUSTOM_PARAMS_SIZE (SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT * SMU_14_0_2_CUSTOM_PARAMS_COUNT * sizeof(long))
static int smu_v14_0_2_set_power_profile_mode_coeff(struct smu_context *smu,
long *input)
{
DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
DpmActivityMonitorCoeffInt_t *activity_monitor =
&(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
uint32_t current_profile_mode = smu->power_profile_mode;
smu->power_profile_mode = input[size];
int ret, idx;
if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size != 9)
return -EINVAL;
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
}
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor->Gfx_FPS = input[1];
activity_monitor->Gfx_MinActiveFreqType = input[2];
activity_monitor->Gfx_MinActiveFreq = input[3];
activity_monitor->Gfx_BoosterFreqType = input[4];
activity_monitor->Gfx_BoosterFreq = input[5];
activity_monitor->Gfx_PD_Data_limit_c = input[6];
activity_monitor->Gfx_PD_Data_error_coeff = input[7];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
break;
case 1: /* Fclk */
activity_monitor->Fclk_FPS = input[1];
activity_monitor->Fclk_MinActiveFreqType = input[2];
activity_monitor->Fclk_MinActiveFreq = input[3];
activity_monitor->Fclk_BoosterFreqType = input[4];
activity_monitor->Fclk_BoosterFreq = input[5];
activity_monitor->Fclk_PD_Data_limit_c = input[6];
activity_monitor->Fclk_PD_Data_error_coeff = input[7];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
break;
default:
return -EINVAL;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
idx = 0 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Gfxclk */
activity_monitor->Gfx_FPS = input[idx + 1];
activity_monitor->Gfx_MinActiveFreqType = input[idx + 2];
activity_monitor->Gfx_MinActiveFreq = input[idx + 3];
activity_monitor->Gfx_BoosterFreqType = input[idx + 4];
activity_monitor->Gfx_BoosterFreq = input[idx + 5];
activity_monitor->Gfx_PD_Data_limit_c = input[idx + 6];
activity_monitor->Gfx_PD_Data_error_coeff = input[idx + 7];
activity_monitor->Gfx_PD_Data_error_rate_coeff = input[idx + 8];
}
idx = 1 * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
if (input[idx]) {
/* Fclk */
activity_monitor->Fclk_FPS = input[idx + 1];
activity_monitor->Fclk_MinActiveFreqType = input[idx + 2];
activity_monitor->Fclk_MinActiveFreq = input[idx + 3];
activity_monitor->Fclk_BoosterFreqType = input[idx + 4];
activity_monitor->Fclk_BoosterFreq = input[idx + 5];
activity_monitor->Fclk_PD_Data_limit_c = input[idx + 6];
activity_monitor->Fclk_PD_Data_error_coeff = input[idx + 7];
activity_monitor->Fclk_PD_Data_error_rate_coeff = input[idx + 8];
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT,
(void *)(&activity_monitor_external),
true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
}
return ret;
}
static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
u32 workload_mask,
long *custom_params,
u32 custom_params_max_idx)
{
u32 backend_workload_mask = 0;
int ret, idx = -1, i;
smu_cmn_get_backend_workload_mask(smu, workload_mask,
&backend_workload_mask);
/* disable deep sleep if compute is enabled */
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_COMPUTE))
smu_v14_0_deep_sleep_control(smu, false);
else if (current_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE)
else
smu_v14_0_deep_sleep_control(smu, true);
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
if (workload_mask & (1 << PP_SMC_POWER_PROFILE_CUSTOM)) {
if (!smu->custom_profile_params) {
smu->custom_profile_params =
kzalloc(SMU_14_0_2_CUSTOM_PARAMS_SIZE, GFP_KERNEL);
if (!smu->custom_profile_params)
return -ENOMEM;
}
if (custom_params && custom_params_max_idx) {
if (custom_params_max_idx != SMU_14_0_2_CUSTOM_PARAMS_COUNT)
return -EINVAL;
if (custom_params[0] >= SMU_14_0_2_CUSTOM_PARAMS_CLOCK_COUNT)
return -EINVAL;
idx = custom_params[0] * SMU_14_0_2_CUSTOM_PARAMS_COUNT;
smu->custom_profile_params[idx] = 1;
for (i = 1; i < custom_params_max_idx; i++)
smu->custom_profile_params[idx + i] = custom_params[i];
}
ret = smu_v14_0_2_set_power_profile_mode_coeff(smu,
smu->custom_profile_params);
if (ret) {
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
} else if (smu->custom_profile_params) {
memset(smu->custom_profile_params, 0, SMU_14_0_2_CUSTOM_PARAMS_SIZE);
}
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
smu->workload_mask, NULL);
if (!ret)
smu_cmn_assign_power_profile(smu);
backend_workload_mask, NULL);
if (ret) {
dev_err(smu->adev->dev, "Failed to set workload mask 0x%08x\n",
workload_mask);
if (idx != -1)
smu->custom_profile_params[idx] = 0;
return ret;
}
return ret;
}

View File

@ -1144,14 +1144,6 @@ int smu_cmn_set_mp1_state(struct smu_context *smu,
return ret;
}
void smu_cmn_assign_power_profile(struct smu_context *smu)
{
uint32_t index;
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
smu->power_profile_mode = smu->workload_setting[index];
}
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
{
struct pci_dev *p = NULL;
@ -1229,3 +1221,28 @@ void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
{
policy->desc = &xgmi_plpd_policy_desc;
}
void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
u32 workload_mask,
u32 *backend_workload_mask)
{
int workload_type;
u32 profile_mode;
*backend_workload_mask = 0;
for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
if (!(workload_mask & (1 << profile_mode)))
continue;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
profile_mode);
if (workload_type < 0)
continue;
*backend_workload_mask |= 1 << workload_type;
}
}

View File

@ -130,8 +130,6 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev);
int smu_cmn_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
void smu_cmn_assign_power_profile(struct smu_context *smu);
/*
* Helper function to make sysfs_emit_at() happy. Align buf to
* the current page boundary and record the offset.
@ -149,5 +147,9 @@ bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
u32 workload_mask,
u32 *backend_workload_mask);
#endif
#endif