drm fixes for v6.7-rc5

atomic-helpers:
 - invoke end_fb_access while owning plane state
 
 i915:
 - fix a missing dep for a previous fix
 - Relax BXT/GLK DSI transcoder hblank limits
 - Fix DP MST .mode_valid_ctx() return values
 - Reject DP MST modes that require bigjoiner (as it's not yet supported on DP MST)
 - Fix _intel_dsb_commit() variable type to allow negative values
 
 nouveau:
 - document some bits of gsp rm
 - flush vmm more on tu102 to avoid hangs
 
 panfrost:
 - fix imported dma-buf objects residency
 - fix device freq update
 
 bridge:
 - tc358768 - fix Kconfig
 
 amdgpu:
 - Disable MCBP on gfx9
 - DC vbios fix
 - eDP fix
 - dml2 UBSAN fix
 - SMU 14 fix
 - RAS fixes
 - dml KASAN/KCSAN fix
 - PSP 13 fix
 - Clockgating fixes
 - Suspend fix
 
 exynos:
 - fix pointer dereference
 - fix wrong error check
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmVyoMEACgkQDHTzWXnE
 hr4FlxAAlSOOMsRvrmK5qChAZhqCEqAhVo+N+8BnjR/whHWhGPQWqyrdoJVBAOLh
 ZZtqfRW343V6+HESW+VRoQfv7JnaKZzh2/aimJQbqa/7kxfitKfUxltMkwRR/Hqm
 GwRDIlCYj0rGJrty3NVccYOWC1eL0nwDGIbRXFuQ1KsreQYD/UJ6JU3ZiPaeaNoX
 d2JLGoHwVE1Oa+MQTnOcgAbXxUZfesLc4jGizeh+mRMw1n/h1YutQwJy4xvC5UHB
 529Yc7HW4kim0toGwSLnpnew1PCuld+XiuEU6OgT74drYZ5t7lHwJUxOkrOxK+nU
 j5yW1YW0+3DdIJZgbrOZu1gVee3oZVKO+KZL6zn+A2d4wwY9EonFsCZECqHUeoLC
 eLUCpNwH2HwOsDCsjQ6CqZ/yWFayska5UGmqux7kbIv3FV/vrZsk2ZMRN7+K4ogo
 N5xwhp63Bzg11HEoOFml0egPaNhJSCIuEJ2GoyKCmMmEXibtjm9B4Y9YvhaF3zgx
 LJaLlXwKiuvYM6Mp3enST65FrtziB22NblHJ8y3B04GFq9fLRc5RKAEWPhlIdRh5
 2KSMNJVCYge/hN9M782CBeDjW9u3yDGom58Gqt6nSlQhTCLInWqdueO2jfs/sKR3
 K7UR1jLO+2JmzbukgHjOuL6L4XMBWPchcBymm0Bgh8lZZV3zGxk=
 =c5mf
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2023-12-08' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Regular weekly fixes, mostly amdgpu and i915 as usual. A couple of
  nouveau, panfrost, one core and one bridge Kconfig.

  Seems about normal for rc5.

  atomic-helpers:
   - invoke end_fb_access while owning plane state

  i915:
   - fix a missing dep for a previous fix
   - Relax BXT/GLK DSI transcoder hblank limits
   - Fix DP MST .mode_valid_ctx() return values
   - Reject DP MST modes that require bigjoiner (as it's not yet
     supported on DP MST)
   - Fix _intel_dsb_commit() variable type to allow negative values

  nouveau:
   - document some bits of gsp rm
   - flush vmm more on tu102 to avoid hangs

  panfrost:
   - fix imported dma-buf objects residency
   - fix device freq update

  bridge:
   - tc358768 - fix Kconfig

  amdgpu:
   - Disable MCBP on gfx9
   - DC vbios fix
   - eDP fix
   - dml2 UBSAN fix
   - SMU 14 fix
   - RAS fixes
   - dml KASAN/KCSAN fix
   - PSP 13 fix
   - Clockgating fixes
   - Suspend fix

  exynos:
   - fix pointer dereference
   - fix wrong error check"

* tag 'drm-fixes-2023-12-08' of git://anongit.freedesktop.org/drm/drm: (27 commits)
  drm/exynos: fix a wrong error checking
  drm/exynos: fix a potential error pointer dereference
  drm/amdgpu: fix buffer funcs setting order on suspend
  drm/amdgpu: Avoid querying DRM MGCG status
  drm/amdgpu: Update HDP 4.4.2 clock gating flags
  drm/amdgpu: Add NULL checks for function pointers
  drm/amdgpu: Restrict extended wait to PSP v13.0.6
  drm/amd/display: Increase frame warning limit with KASAN or KCSAN in dml
  drm/amdgpu: optimize the printing order of error data
  drm/amdgpu: Update fw version for boot time error query
  drm/amd/pm: support new mca smu error code decoding
  drm/amd/swsmu: update smu v14_0_0 driver if version and metrics table
  drm/amd/display: Fix array-index-out-of-bounds in dml2
  drm/amd/display: Add monitor patch for specific eDP
  drm/amd/display: Use channel_width = 2 for vram table 3.0
  drm/amdgpu: disable MCBP by default
  drm/atomic-helpers: Invoke end_fb_access while owning plane state
  drm/i915: correct the input parameter on _intel_dsb_commit()
  drm/i915/mst: Reject modes that require the bigjoiner
  drm/i915/mst: Fix .mode_valid_ctx() return values
  ...
This commit is contained in:
Linus Torvalds 2023-12-08 11:17:44 -08:00
commit 38bafa65b1
39 changed files with 503 additions and 98 deletions

View File

@ -3791,10 +3791,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
adev->gfx.mcbp = true;
else if (amdgpu_mcbp == 0)
adev->gfx.mcbp = false;
else if ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 0, 0)) &&
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0)) &&
adev->gfx.num_gfx_rings)
adev->gfx.mcbp = true;
if (amdgpu_sriov_vf(adev))
adev->gfx.mcbp = true;
@ -4531,6 +4527,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (r)
return r;
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);

View File

@ -46,6 +46,8 @@
#define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16)
#define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0)
#define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0)
enum amdgpu_mca_ip {
AMDGPU_MCA_IP_UNKNOW = -1,
AMDGPU_MCA_IP_PSP = 0,

View File

@ -28,6 +28,7 @@
#include <linux/reboot.h>
#include <linux/syscalls.h>
#include <linux/pm_runtime.h>
#include <linux/list_sort.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@ -3665,6 +3666,21 @@ static struct ras_err_node *amdgpu_ras_error_node_new(void)
return err_node;
}
static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
{
struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
if (unlikely(infoa->socket_id != infob->socket_id))
return infoa->socket_id - infob->socket_id;
else
return infoa->die_id - infob->die_id;
return 0;
}
static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
struct amdgpu_smuio_mcm_config_info *mcm_info)
{
@ -3682,6 +3698,7 @@ static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_d
err_data->err_list_count++;
list_add_tail(&err_node->node, &err_data->err_node_list);
list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
return &err_node->err_info;
}

View File

@ -129,6 +129,11 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
{
int data;
if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 2)) {
/* Default enabled */
*flags |= AMD_CG_SUPPORT_HDP_MGCG;
return;
}
/* AMD_CG_SUPPORT_HDP_LS */
data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK)

View File

@ -60,7 +60,7 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_0_ta.bin");
#define GFX_CMD_USB_PD_USE_LFB 0x480
/* Retry times for vmbx ready wait */
#define PSP_VMBX_POLLING_LIMIT 20000
#define PSP_VMBX_POLLING_LIMIT 3000
/* VBIOS gfl defines */
#define MBOX_READY_MASK 0x80000000
@ -161,14 +161,18 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int retry_loop, ret;
int retry_loop, retry_cnt, ret;
retry_cnt =
(amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) ?
PSP_VMBX_POLLING_LIMIT :
10;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
* If there is an error in processing command, bits[7:0] will be set.
* This is applicable for PSP v13.0.6 and newer.
*/
for (retry_loop = 0; retry_loop < PSP_VMBX_POLLING_LIMIT; retry_loop++) {
for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) {
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000, 0xffffffff, false);
@ -821,7 +825,7 @@ static int psp_v13_0_query_boot_status(struct psp_context *psp)
if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
return 0;
if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007)
if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10109)
return 0;
for_each_inst(i, inst_mask) {

View File

@ -1423,11 +1423,14 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
adev->nbio.funcs->get_clockgating_state(adev, flags);
if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state)
adev->nbio.funcs->get_clockgating_state(adev, flags);
adev->hdp.funcs->get_clock_gating_state(adev, flags);
if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state)
adev->hdp.funcs->get_clock_gating_state(adev, flags);
if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) {
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2)) &&
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
if (!(data & 0x01000000))
@ -1440,9 +1443,11 @@ static void soc15_common_get_clockgating_state(void *handle, u64 *flags)
}
/* AMD_CG_SUPPORT_ROM_MGCG */
adev->smuio.funcs->get_clock_gating_state(adev, flags);
if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state)
adev->smuio.funcs->get_clock_gating_state(adev, flags);
adev->df.funcs->get_clockgating_state(adev, flags);
if (adev->df.funcs && adev->df.funcs->get_clockgating_state)
adev->df.funcs->get_clockgating_state(adev, flags);
}
static int soc15_common_set_powergating_state(void *handle,

View File

@ -63,6 +63,12 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_fams = true;
break;
/* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */
case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB):
case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B):
DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
default:
return;
}

View File

@ -2386,7 +2386,13 @@ static enum bp_result get_vram_info_v30(
return BP_RESULT_BADBIOSTABLE;
info->num_chans = info_v30->channel_num;
info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
/* As suggested by VBIOS we should always use
* dram_channel_width_bytes = 2 when using VRAM
* table version 3.0. This is because the channel_width
* param in the VRAM info table is changed in 7000 series and
* no longer represents the memory channel width.
*/
info->dram_channel_width_bytes = 2;
return result;
}

View File

@ -61,8 +61,12 @@ endif
endif
ifneq ($(CONFIG_FRAME_WARN),0)
ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
frame_warn_flag := -Wframe-larger-than=3072
else
frame_warn_flag := -Wframe-larger-than=2048
endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)

View File

@ -9447,12 +9447,12 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
// Output
CalculateWatermarks_params->Watermark = &s->dummy_watermark; // Watermarks *Watermark
CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[j];
CalculateWatermarks_params->DRAMClockChangeSupport = &mode_lib->ms.support.DRAMClockChangeSupport[0];
CalculateWatermarks_params->MaxActiveDRAMClockChangeLatencySupported = &s->dummy_single_array[0][0]; // dml_float_t *MaxActiveDRAMClockChangeLatencySupported[]
CalculateWatermarks_params->SubViewportLinesNeededInMALL = &mode_lib->ms.SubViewportLinesNeededInMALL[j]; // dml_uint_t SubViewportLinesNeededInMALL[]
CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[j];
CalculateWatermarks_params->FCLKChangeSupport = &mode_lib->ms.support.FCLKChangeSupport[0];
CalculateWatermarks_params->MaxActiveFCLKChangeLatencySupported = &s->dummy_single[0]; // dml_float_t *MaxActiveFCLKChangeLatencySupported
CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[j];
CalculateWatermarks_params->USRRetrainingSupport = &mode_lib->ms.support.USRRetrainingSupport[0];
CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
&mode_lib->scratch,

View File

@ -1085,6 +1085,10 @@ struct gpu_metrics_v3_0 {
uint16_t average_dram_reads;
/* time filtered DRAM write bandwidth [MB/sec] */
uint16_t average_dram_writes;
/* time filtered IPU read bandwidth [MB/sec] */
uint16_t average_ipu_reads;
/* time filtered IPU write bandwidth [MB/sec] */
uint16_t average_ipu_writes;
/* Driver attached timestamp (in ns) */
uint64_t system_clock_counter;
@ -1104,6 +1108,8 @@ struct gpu_metrics_v3_0 {
uint32_t average_all_core_power;
/* calculated core power [mW] */
uint16_t average_core_power[16];
/* time filtered total system power [mW] */
uint16_t average_sys_power;
/* maximum IRM defined STAPM power limit [mW] */
uint16_t stapm_power_limit;
/* time filtered STAPM power limit [mW] */
@ -1116,6 +1122,8 @@ struct gpu_metrics_v3_0 {
uint16_t average_ipuclk_frequency;
uint16_t average_fclk_frequency;
uint16_t average_vclk_frequency;
uint16_t average_uclk_frequency;
uint16_t average_mpipu_frequency;
/* Current clocks */
/* target core frequency [MHz] */
@ -1125,6 +1133,15 @@ struct gpu_metrics_v3_0 {
/* GFXCLK frequency limit enforced on GFX [MHz] */
uint16_t current_gfx_maxfreq;
/* Throttle Residency (ASIC dependent) */
uint32_t throttle_residency_prochot;
uint32_t throttle_residency_spl;
uint32_t throttle_residency_fppt;
uint32_t throttle_residency_sppt;
uint32_t throttle_residency_thm_core;
uint32_t throttle_residency_thm_gfx;
uint32_t throttle_residency_thm_soc;
/* Metrics table alpha filter time constant [us] */
uint32_t time_filter_alphavalue;
};

View File

@ -1408,6 +1408,16 @@ typedef enum {
METRICS_PCIE_WIDTH,
METRICS_CURR_FANPWM,
METRICS_CURR_SOCKETPOWER,
METRICS_AVERAGE_VPECLK,
METRICS_AVERAGE_IPUCLK,
METRICS_AVERAGE_MPIPUCLK,
METRICS_THROTTLER_RESIDENCY_PROCHOT,
METRICS_THROTTLER_RESIDENCY_SPL,
METRICS_THROTTLER_RESIDENCY_FPPT,
METRICS_THROTTLER_RESIDENCY_SPPT,
METRICS_THROTTLER_RESIDENCY_THM_CORE,
METRICS_THROTTLER_RESIDENCY_THM_GFX,
METRICS_THROTTLER_RESIDENCY_THM_SOC,
} MetricsMember_t;
enum smu_cmn2asic_mapping_type {

View File

@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
#define PMFW_DRIVER_IF_VERSION 6
#define PMFW_DRIVER_IF_VERSION 7
typedef struct {
int32_t value;
@ -150,37 +150,50 @@ typedef struct {
} DpmClocks_t;
typedef struct {
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
uint16_t IpuPower; //Time filtered IPU power [mW]
uint32_t ApuPower; //Time filtered APU power [mW]
uint32_t GfxPower; //Time filtered GFX power [mW]
uint32_t dGpuPower; //Time filtered dGPU power [mW]
uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
uint32_t spare[16];
uint16_t CoreFrequency[16]; //Target core frequency [MHz]
uint16_t CorePower[16]; //CAC calculated core power [mW]
uint16_t CoreTemperature[16]; //TSEN measured core temperature [centi-C]
uint16_t GfxTemperature; //TSEN measured GFX temperature [centi-C]
uint16_t SocTemperature; //TSEN measured SOC temperature [centi-C]
uint16_t StapmOpnLimit; //Maximum IRM defined STAPM power limit [mW]
uint16_t StapmCurrentLimit; //Time filtered STAPM power limit [mW]
uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
uint16_t SkinTemp; //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
uint16_t GfxclkFrequency; //Time filtered target GFXCLK frequency [MHz]
uint16_t FclkFrequency; //Time filtered target FCLK frequency [MHz]
uint16_t GfxActivity; //Time filtered GFX busy % [0-100]
uint16_t SocclkFrequency; //Time filtered target SOCCLK frequency [MHz]
uint16_t VclkFrequency; //Time filtered target VCLK frequency [MHz]
uint16_t VcnActivity; //Time filtered VCN busy % [0-100]
uint16_t VpeclkFrequency; //Time filtered target VPECLK frequency [MHz]
uint16_t IpuclkFrequency; //Time filtered target IPUCLK frequency [MHz]
uint16_t IpuBusy[8]; //Time filtered IPU per-column busy % [0-100]
uint16_t DRAMReads; //Time filtered DRAM read bandwidth [MB/sec]
uint16_t DRAMWrites; //Time filtered DRAM write bandwidth [MB/sec]
uint16_t CoreC0Residency[16]; //Time filtered per-core C0 residency % [0-100]
uint16_t IpuPower; //Time filtered IPU power [mW]
uint32_t ApuPower; //Time filtered APU power [mW]
uint32_t GfxPower; //Time filtered GFX power [mW]
uint32_t dGpuPower; //Time filtered dGPU power [mW]
uint32_t SocketPower; //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
uint32_t AllCorePower; //Time filtered sum of core power across all cores in the socket [mW]
uint32_t FilterAlphaValue; //Metrics table alpha filter time constant [us]
uint32_t MetricsCounter; //Counter that is incremented on every metrics table update [PM_TIMER cycles]
uint16_t MemclkFrequency; //Time filtered target MEMCLK frequency [MHz]
uint16_t MpipuclkFrequency; //Time filtered target MPIPUCLK frequency [MHz]
uint16_t IpuReads; //Time filtered IPU read bandwidth [MB/sec]
uint16_t IpuWrites; //Time filtered IPU write bandwidth [MB/sec]
uint32_t ThrottleResidency_PROCHOT; //Counter that is incremented on every metrics table update when PROCHOT was engaged [PM_TIMER cycles]
uint32_t ThrottleResidency_SPL; //Counter that is incremented on every metrics table update when SPL was engaged [PM_TIMER cycles]
uint32_t ThrottleResidency_FPPT; //Counter that is incremented on every metrics table update when fast PPT was engaged [PM_TIMER cycles]
uint32_t ThrottleResidency_SPPT; //Counter that is incremented on every metrics table update when slow PPT was engaged [PM_TIMER cycles]
uint32_t ThrottleResidency_THM_CORE; //Counter that is incremented on every metrics table update when CORE thermal throttling was engaged [PM_TIMER cycles]
uint32_t ThrottleResidency_THM_GFX; //Counter that is incremented on every metrics table update when GFX thermal throttling was engaged [PM_TIMER cycles]
uint32_t ThrottleResidency_THM_SOC; //Counter that is incremented on every metrics table update when SOC thermal throttling was engaged [PM_TIMER cycles]
uint16_t Psys; //Time filtered Psys power [mW]
uint16_t spare1;
uint32_t spare[6];
} SmuMetrics_t;
//ISP tile definitions

View File

@ -2593,13 +2593,20 @@ static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct
static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
{
struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t errcode, instlo;
instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
if (instlo != 0x03b30400)
return false;
errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
errcode &= 0xff;
} else {
errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
}
return mca_smu_check_error_code(adev, mca_ras, errcode);
}

View File

@ -246,11 +246,20 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = 0;
break;
case METRICS_AVERAGE_UCLK:
*value = 0;
*value = metrics->MemclkFrequency;
break;
case METRICS_AVERAGE_FCLK:
*value = metrics->FclkFrequency;
break;
case METRICS_AVERAGE_VPECLK:
*value = metrics->VpeclkFrequency;
break;
case METRICS_AVERAGE_IPUCLK:
*value = metrics->IpuclkFrequency;
break;
case METRICS_AVERAGE_MPIPUCLK:
*value = metrics->MpipuclkFrequency;
break;
case METRICS_AVERAGE_GFXACTIVITY:
*value = metrics->GfxActivity / 100;
break;
@ -270,8 +279,26 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->SocTemperature / 100 *
SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
*value = 0;
case METRICS_THROTTLER_RESIDENCY_PROCHOT:
*value = metrics->ThrottleResidency_PROCHOT;
break;
case METRICS_THROTTLER_RESIDENCY_SPL:
*value = metrics->ThrottleResidency_SPL;
break;
case METRICS_THROTTLER_RESIDENCY_FPPT:
*value = metrics->ThrottleResidency_FPPT;
break;
case METRICS_THROTTLER_RESIDENCY_SPPT:
*value = metrics->ThrottleResidency_SPPT;
break;
case METRICS_THROTTLER_RESIDENCY_THM_CORE:
*value = metrics->ThrottleResidency_THM_CORE;
break;
case METRICS_THROTTLER_RESIDENCY_THM_GFX:
*value = metrics->ThrottleResidency_THM_GFX;
break;
case METRICS_THROTTLER_RESIDENCY_THM_SOC:
*value = metrics->ThrottleResidency_THM_SOC;
break;
case METRICS_VOLTAGE_VDDGFX:
*value = 0;
@ -498,6 +525,8 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
sizeof(uint16_t) * 16);
gpu_metrics->average_dram_reads = metrics.DRAMReads;
gpu_metrics->average_dram_writes = metrics.DRAMWrites;
gpu_metrics->average_ipu_reads = metrics.IpuReads;
gpu_metrics->average_ipu_writes = metrics.IpuWrites;
gpu_metrics->average_socket_power = metrics.SocketPower;
gpu_metrics->average_ipu_power = metrics.IpuPower;
@ -505,6 +534,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_gfx_power = metrics.GfxPower;
gpu_metrics->average_dgpu_power = metrics.dGpuPower;
gpu_metrics->average_all_core_power = metrics.AllCorePower;
gpu_metrics->average_sys_power = metrics.Psys;
memcpy(&gpu_metrics->average_core_power[0],
&metrics.CorePower[0],
sizeof(uint16_t) * 16);
@ -515,6 +545,8 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
gpu_metrics->average_mpipu_frequency = metrics.MpipuclkFrequency;
memcpy(&gpu_metrics->current_coreclk[0],
&metrics.CoreFrequency[0],
@ -522,6 +554,14 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_core_maxfreq = metrics.InfrastructureCpuMaxFreq;
gpu_metrics->current_gfx_maxfreq = metrics.InfrastructureGfxMaxFreq;
gpu_metrics->throttle_residency_prochot = metrics.ThrottleResidency_PROCHOT;
gpu_metrics->throttle_residency_spl = metrics.ThrottleResidency_SPL;
gpu_metrics->throttle_residency_fppt = metrics.ThrottleResidency_FPPT;
gpu_metrics->throttle_residency_sppt = metrics.ThrottleResidency_SPPT;
gpu_metrics->throttle_residency_thm_core = metrics.ThrottleResidency_THM_CORE;
gpu_metrics->throttle_residency_thm_gfx = metrics.ThrottleResidency_THM_GFX;
gpu_metrics->throttle_residency_thm_soc = metrics.ThrottleResidency_THM_SOC;
gpu_metrics->time_filter_alphavalue = metrics.FilterAlphaValue;
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();

View File

@ -313,6 +313,7 @@ config DRM_TOSHIBA_TC358768
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
help
Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.

View File

@ -2012,7 +2012,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return ret;
drm_atomic_helper_async_commit(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_unprepare_planes(dev, state);
return 0;
}
@ -2072,7 +2072,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
return 0;
err:
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_unprepare_planes(dev, state);
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit);
@ -2650,6 +2650,39 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
/**
* drm_atomic_helper_unprepare_planes - release plane resources on aborts
* @dev: DRM device
* @state: atomic state object with old state structures
*
* This function cleans up plane state, specifically framebuffers, from the
* atomic state. It undoes the effects of drm_atomic_helper_prepare_planes()
* when aborting an atomic commit. For cleaning up after a successful commit
* use drm_atomic_helper_cleanup_planes().
*/
void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int i;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, new_plane_state);
}
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, new_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_unprepare_planes);
static bool plane_crtc_active(const struct drm_plane_state *state)
{
return state->crtc && state->crtc->state->active;
@ -2784,6 +2817,17 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_flush(crtc, old_state);
}
/*
* Signal end of framebuffer access here before hw_done. After hw_done,
* a later commit might have already released the plane state.
*/
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, old_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
@ -2911,40 +2955,22 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_planes_on_crtc);
* configuration. Hence the old configuration must be perserved in @old_state to
* be able to call this function.
*
* This function must also be called on the new state when the atomic update
* fails at any point after calling drm_atomic_helper_prepare_planes().
* This function may not be called on the new state when the atomic update
* fails at any point after calling drm_atomic_helper_prepare_planes(). Use
* drm_atomic_helper_unprepare_planes() in this case.
*/
void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane_state *old_plane_state;
int i;
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
if (funcs->end_fb_access)
funcs->end_fb_access(plane, new_plane_state);
}
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
struct drm_plane_state *plane_state;
/*
* This might be called before swapping when commit is aborted,
* in which case we have to cleanup the new state.
*/
if (old_plane_state == plane->state)
plane_state = new_plane_state;
else
plane_state = old_plane_state;
funcs = plane->helper_private;
if (funcs->cleanup_fb)
funcs->cleanup_fb(plane, plane_state);
funcs->cleanup_fb(plane, old_plane_state);
}
}
EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);

View File

@ -107,18 +107,16 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
return 0;
if (!priv->mapping) {
void *mapping;
void *mapping = NULL;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
mapping = arm_iommu_create_mapping(&platform_bus_type,
EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
else if (IS_ENABLED(CONFIG_IOMMU_DMA))
mapping = iommu_get_domain_for_dev(priv->dma_dev);
else
mapping = ERR_PTR(-ENODEV);
if (IS_ERR(mapping))
return PTR_ERR(mapping);
if (!mapping)
return -ENODEV;
priv->mapping = mapping;
}

View File

@ -1861,6 +1861,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
return ret;
crtc = exynos_drm_crtc_get_by_type(drm_dev, EXYNOS_DISPLAY_TYPE_HDMI);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
crtc->pipe_clk = &hdata->phy_clk;
ret = hdmi_create_connector(encoder);

View File

@ -1440,6 +1440,13 @@ static void gen11_dsi_post_disable(struct intel_atomic_state *state,
static enum drm_mode_status gen11_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
/* FIXME: DSC? */
return intel_dsi_mode_valid(connector, mode);
}

View File

@ -348,8 +348,13 @@ intel_crt_mode_valid(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
int max_dotclk = dev_priv->max_dotclk_freq;
enum drm_mode_status status;
int max_clock;
status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
if (status != MODE_OK)
return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;

View File

@ -906,12 +906,18 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
if (!new_crtc_state->hw.active)
return false;
return is_enabling(active_planes, old_crtc_state, new_crtc_state);
}
static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
if (!old_crtc_state->hw.active)
return false;
return is_disabling(active_planes, old_crtc_state, new_crtc_state);
}
@ -928,6 +934,9 @@ static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state,
static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
if (!new_crtc_state->hw.active)
return false;
return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) ||
(new_crtc_state->vrr.enable &&
(new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
@ -937,6 +946,9 @@ static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state,
static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
if (!old_crtc_state->hw.active)
return false;
return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) ||
(old_crtc_state->vrr.enable &&
(new_crtc_state->update_m_n || new_crtc_state->update_lrr ||
@ -7476,7 +7488,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
drm_atomic_helper_cleanup_planes(dev, &state->base);
drm_atomic_helper_unprepare_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
@ -7857,6 +7869,16 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
return MODE_OK;
}
enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode)
{
/*
* Additional transcoder timing limits,
* excluding BXT/GLK DSI transcoders.
*/
if (DISPLAY_VER(dev_priv) >= 5) {
if (mode->hdisplay < 64 ||
mode->htotal - mode->hdisplay < 32)

View File

@ -403,6 +403,9 @@ enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
const struct drm_display_mode *mode,
bool bigjoiner);
enum drm_mode_status
intel_cpu_transcoder_mode_valid(struct drm_i915_private *i915,
const struct drm_display_mode *mode);
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
bool is_trans_port_sync_master(const struct intel_crtc_state *state);

View File

@ -1172,6 +1172,10 @@ intel_dp_mode_valid(struct drm_connector *_connector,
enum drm_mode_status status;
bool dsc = false, bigjoiner = false;
status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
if (status != MODE_OK)
return status;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;

View File

@ -959,6 +959,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
*status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
if (*status != MODE_OK)
return 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) {
*status = MODE_NO_DBLESCAN;
return 0;
@ -993,6 +997,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
/* TODO: add support for bigjoiner */
*status = MODE_CLOCK_HIGH;
return 0;
}
if (DISPLAY_VER(dev_priv) >= 10 &&
@ -1027,11 +1035,15 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
* Big joiner configuration needs DSC for TGL which is not true for
* XE_LPD where uncompressed joiner is supported.
*/
if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
return MODE_CLOCK_HIGH;
if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
if (mode_rate > max_rate && !dsc)
return MODE_CLOCK_HIGH;
if (mode_rate > max_rate && !dsc) {
*status = MODE_CLOCK_HIGH;
return 0;
}
*status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
return 0;

View File

@ -340,7 +340,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
}
static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
unsigned int dewake_scanline)
int dewake_scanline)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);

View File

@ -217,11 +217,17 @@ intel_dvo_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
int target_clock = mode->clock;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;

View File

@ -1983,6 +1983,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool ycbcr_420_only;
enum intel_output_format sink_format;
status = intel_cpu_transcoder_mode_valid(dev_priv, mode);
if (status != MODE_OK)
return status;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;

View File

@ -389,11 +389,16 @@ intel_lvds_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
struct intel_connector *connector = to_intel_connector(_connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *fixed_mode =
intel_panel_fixed_mode(connector, mode);
int max_pixclk = to_i915(connector->base.dev)->max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;

View File

@ -1921,13 +1921,19 @@ static enum drm_mode_status
intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
bool has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo_connector, connector->state);
int max_dotclk = i915->max_dotclk_freq;
enum drm_mode_status status;
int clock = mode->clock;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;

View File

@ -958,8 +958,14 @@ static enum drm_mode_status
intel_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int max_dotclk = i915->max_dotclk_freq;
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;

View File

@ -1541,9 +1541,25 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
.destroy = intel_dsi_encoder_destroy,
};
static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_i915_private *i915 = to_i915(connector->dev);
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
enum drm_mode_status status;
status = intel_cpu_transcoder_mode_valid(i915, mode);
if (status != MODE_OK)
return status;
}
return intel_dsi_mode_valid(connector, mode);
}
static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
.get_modes = intel_dsi_get_modes,
.mode_valid = intel_dsi_mode_valid,
.mode_valid = vlv_dsi_mode_valid,
.atomic_check = intel_digital_connector_atomic_check,
};

View File

@ -2474,7 +2474,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_unprepare_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;

View File

@ -26,6 +26,49 @@
* DEALINGS IN THE SOFTWARE.
*/
/**
* msgqTxHeader -- TX queue data structure
* @version: the version of this structure, must be 0
* @size: the size of the entire queue, including this header
* @msgSize: the padded size of queue element, 16 is minimum
* @msgCount: the number of elements in this queue
* @writePtr: head index of this queue
* @flags: 1 = swap the RX pointers
* @rxHdrOff: offset of readPtr in this structure
* @entryOff: offset of beginning of queue (msgqRxHeader), relative to
* beginning of this structure
*
* The command queue is a queue of RPCs that are sent from the driver to the
* GSP. The status queue is a queue of messages/responses from GSP-RM to the
* driver. Although the driver allocates memory for both queues, the command
* queue is owned by the driver and the status queue is owned by GSP-RM. In
* addition, the headers of the two queues must not share the same 4K page.
*
* Each queue is prefixed with this data structure. The idea is that a queue
* and its header are written to only by their owner. That is, only the
* driver writes to the command queue and command queue header, and only the
* GSP writes to the status (receive) queue and its header.
*
* This is enforced by the concept of "swapping" the RX pointers. This is
* why the 'flags' field must be set to 1. 'rxHdrOff' is how the GSP knows
* where the where the tail pointer of its status queue.
*
* When the driver writes a new RPC to the command queue, it updates writePtr.
* When it reads a new message from the status queue, it updates readPtr. In
* this way, the GSP knows when a new command is in the queue (it polls
* writePtr) and it knows how much free space is in the status queue (it
* checks readPtr). The driver never cares about how much free space is in
* the status queue.
*
* As usual, producers write to the head pointer, and consumers read from the
* tail pointer. When head == tail, the queue is empty.
*
* So to summarize:
* command.writePtr = head of command queue
* command.readPtr = tail of status queue
* status.writePtr = head of status queue
* status.readPtr = tail of command queue
*/
typedef struct
{
NvU32 version; // queue version
@ -38,6 +81,14 @@ typedef struct
NvU32 entryOff; // Offset of entries from start of backing store.
} msgqTxHeader;
/**
* msgqRxHeader - RX queue data structure
* @readPtr: tail index of the other queue
*
* Although this is a separate struct, it could easily be merged into
* msgqTxHeader. msgqTxHeader.rxHdrOff is simply the offset of readPtr
* from the beginning of msgqTxHeader.
*/
typedef struct
{
NvU32 readPtr; // message id of last message read

View File

@ -1377,6 +1377,13 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
return 0;
}
/**
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
*
* The GSP sequencer is a list of I/O commands that the GSP can send to
* the driver to perform for various purposes. The most common usage is to
* perform a special mid-initialization reset.
*/
static int
r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
{
@ -1716,6 +1723,23 @@ r535_gsp_libos_id8(const char *name)
return id;
}
/**
* create_pte_array() - creates a PTE array of a physically contiguous buffer
* @ptes: pointer to the array
* @addr: base address of physically contiguous buffer (GSP_PAGE_SIZE aligned)
* @size: size of the buffer
*
* GSP-RM sometimes expects physically-contiguous buffers to have an array of
* "PTEs" for each page in that buffer. Although in theory that allows for
* the buffer to be physically discontiguous, GSP-RM does not currently
* support that.
*
* In this case, the PTEs are DMA addresses of each page of the buffer. Since
* the buffer is physically contiguous, calculating all the PTEs is simple
* math.
*
* See memdescGetPhysAddrsForGpu()
*/
static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
{
unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
@ -1725,6 +1749,35 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
}
/**
* r535_gsp_libos_init() -- create the libos arguments structure
*
* The logging buffers are byte queues that contain encoded printf-like
* messages from GSP-RM. They need to be decoded by a special application
* that can parse the buffers.
*
* The 'loginit' buffer contains logs from early GSP-RM init and
* exception dumps. The 'logrm' buffer contains the subsequent logs. Both are
* written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
*
* The physical address map for the log buffer is stored in the buffer
* itself, starting with offset 1. Offset 0 contains the "put" pointer.
*
* The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
* configured for a larger page size (e.g. 64K pages), we need to give
* the GSP an array of 4K pages. Fortunately, since the buffer is
* physically contiguous, it's simple math to calculate the addresses.
*
* The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently
* ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
* buffers to be physically contiguous anyway.
*
* The memory allocated for the arguments must remain until the GSP sends the
* init_done RPC.
*
* See _kgspInitLibosLoggingStructures (allocates memory for buffers)
* See kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
*/
static int
r535_gsp_libos_init(struct nvkm_gsp *gsp)
{
@ -1835,6 +1888,35 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
}
/**
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
*
* The GSP uses a three-level page table, called radix3, to map the firmware.
* Each 64-bit "pointer" in the table is either the bus address of an entry in
* the next table (for levels 0 and 1) or the bus address of the next page in
* the GSP firmware image itself.
*
* Level 0 contains a single entry in one page that points to the first page
* of level 1.
*
* Level 1, since it's also only one page in size, contains up to 512 entries,
* one for each page in Level 2.
*
* Level 2 can be up to 512 pages in size, and each of those entries points to
* the next page of the firmware image. Since there can be up to 512*512
* pages, that limits the size of the firmware to 512*512*GSP_PAGE_SIZE = 1GB.
*
* Internally, the GSP has its window into system memory, but the base
* physical address of the aperture is not 0. In fact, it varies depending on
* the GPU architecture. Since the GPU is a PCI device, this window is
* accessed via DMA and is therefore bound by IOMMU translation. The end
* result is that GSP-RM must translate the bus addresses in the table to GSP
* physical addresses. All this should happen transparently.
*
* Returns 0 on success, or negative error code
*
* See kgspCreateRadix3_IMPL
*/
static int
nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
struct nvkm_gsp_radix3 *rx3)

View File

@ -31,7 +31,7 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
type |= 0x00000006; /* HUB_ONLY | ALL PDB (hack) */
mutex_lock(&vmm->mmu->mutex);

View File

@ -29,14 +29,20 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct panfrost_device *ptdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
return dev_pm_opp_set_rate(dev, *freq);
err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
ptdev->pfdevfreq.current_frequency = *freq;
return err;
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
@ -58,7 +64,6 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
spin_lock_irqsave(&pfdevfreq->lock, irqflags);
panfrost_devfreq_update_utilization(pfdevfreq);
pfdevfreq->current_frequency = status->current_frequency;
status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
pfdevfreq->idle_time));
@ -164,6 +169,14 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_profile.initial_freq = cur_freq;
/*
* We could wait until panfrost_devfreq_target() to set this value, but
* since the simple_ondemand governor works asynchronously, there's a
* chance by the time someone opens the device's fdinfo file, current
* frequency hasn't been updated yet, so let's just do an early set.
*/
pfdevfreq->current_frequency = cur_freq;
/*
* Set the recommend OPP this will enable and configure the regulator
* if any and will avoid a switch off by regulator_late_cleanup()

View File

@ -200,7 +200,7 @@ static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
enum drm_gem_object_status res = 0;
if (bo->base.pages)
if (bo->base.base.import_attach || bo->base.pages)
res |= DRM_GEM_OBJECT_RESIDENT;
if (bo->base.madv == PANFROST_MADV_DONTNEED)

View File

@ -97,6 +97,8 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_unprepare_planes(struct drm_device *dev,
struct drm_atomic_state *state);
#define DRM_PLANE_COMMIT_ACTIVE_ONLY BIT(0)
#define DRM_PLANE_COMMIT_NO_DISABLE_AFTER_MODESET BIT(1)