Merge tag 'drm-msm-next-2025-01-07' of gitlab.freedesktop.org:drm/msm into drm-next

Updates for v6.14

MDSS:
- properly described UBWC registers
- added SM6150 (aka QCS615) support

MDP4:
- several small fixes

DPU:
- added SM6150 (aka QCS615) support
- enabled wide planes if virtual planes are enabled (by using two SSPPs for a single plane)
- fixed modes filtering for platforms w/o 3DMux
- fixed DSPP DSPP_2 / _3 links on several platforms
- corrected DSPP definitions on SDM670
- added CWB hardware blocks support
- added VBIF to DPU snapshots
- dropped struct dpu_rm_requirements

DP:
- reworked DP audio support

DSI:
- added SM6150 (aka QCS615) support

GPU:
- Print GMU core fw version
- GMU bandwidth voting for a740 and a750
- Expose uche trap base via uapi
- UAPI error reporting

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGsutUu4ff6OpXNXxqf1xaV0rV6oV23VXNRiF0_OEfe72Q@mail.gmail.com
This commit is contained in:
Dave Airlie 2025-01-13 11:14:06 +10:00
commit 24c61d5533
73 changed files with 2137 additions and 704 deletions

View File

@ -8,6 +8,7 @@ title: MSM Display Port Controller
maintainers:
- Kuogee Hsieh <quic_khsieh@quicinc.com>
- Abhinav Kumar <quic_abhinavk@quicinc.com>
description: |
Device tree bindings for DisplayPort host controller for MSM targets

View File

@ -30,6 +30,7 @@ properties:
- qcom,sdm845-dsi-ctrl
- qcom,sm6115-dsi-ctrl
- qcom,sm6125-dsi-ctrl
- qcom,sm6150-dsi-ctrl
- qcom,sm6350-dsi-ctrl
- qcom,sm6375-dsi-ctrl
- qcom,sm7150-dsi-ctrl
@ -349,6 +350,7 @@ allOf:
enum:
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
- qcom,sm6150-dsi-ctrl
- qcom,sm7150-dsi-ctrl
- qcom,sm8150-dsi-ctrl
- qcom,sm8250-dsi-ctrl

View File

@ -20,6 +20,7 @@ properties:
- qcom,dsi-phy-14nm-660
- qcom,dsi-phy-14nm-8953
- qcom,sm6125-dsi-phy-14nm
- qcom,sm6150-dsi-phy-14nm
reg:
items:

View File

@ -168,7 +168,8 @@ examples:
reg = <0xaf54000 0x104>,
<0xaf54200 0x0c0>,
<0xaf55000 0x770>,
<0xaf56000 0x09c>;
<0xaf56000 0x09c>,
<0xaf57000 0x09c>;
interrupt-parent = <&mdss0>;
interrupts = <12>;

View File

@ -0,0 +1,108 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm6150-dpu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM6150 Display DPU
maintainers:
- Abhinav Kumar <quic_abhinavk@quicinc.com>
- Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
$ref: /schemas/display/msm/dpu-common.yaml#
properties:
compatible:
const: qcom,sm6150-dpu
reg:
items:
- description: Address offset and size for mdp register set
- description: Address offset and size for vbif register set
reg-names:
items:
- const: mdp
- const: vbif
clocks:
items:
- description: Display ahb clock
- description: Display hf axi clock
- description: Display core clock
- description: Display vsync clock
clock-names:
items:
- const: iface
- const: bus
- const: core
- const: vsync
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
display-controller@ae01000 {
compatible = "qcom,sm6150-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>,
<&dispcc_mdss_mdp_clk>,
<&dispcc_mdss_vsync_clk>;
clock-names = "iface", "bus", "core", "vsync";
assigned-clocks = <&dispcc_mdss_vsync_clk>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf0_out: endpoint {
};
};
port@1 {
reg = <1>;
dpu_intf1_out: endpoint {
remote-endpoint = <&mdss_dsi0_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-19200000 {
opp-hz = /bits/ 64 <19200000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-25600000 {
opp-hz = /bits/ 64 <25600000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-307200000 {
opp-hz = /bits/ 64 <307200000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
...

View File

@ -0,0 +1,245 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm6150-mdss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM6150 Display MDSS
maintainers:
- Abhinav Kumar <quic_abhinavk@quicinc.com>
- Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
description:
Device tree bindings for MSM Mobile Display Subsystem(MDSS) that encapsulates
sub-blocks like DPU display controller, DSI and DP interfaces etc. Device tree
bindings of MDSS are mentioned for SM6150 target.
$ref: /schemas/display/msm/mdss-common.yaml#
properties:
compatible:
items:
- const: qcom,sm6150-mdss
clocks:
items:
- description: Display AHB clock from gcc
- description: Display hf axi clock
- description: Display core clock
clock-names:
items:
- const: iface
- const: bus
- const: core
iommus:
maxItems: 1
interconnects:
maxItems: 2
interconnect-names:
maxItems: 2
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6150-dpu
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
- const: qcom,sm6150-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm6150-dsi-phy-14nm
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,qcs615-rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
display-subsystem@ae00000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "qcom,sm6150-mdss";
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
interconnects = <&mmss_noc MASTER_MDP0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
&config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "mdp0-mem", "cpu-cfg";
power-domains = <&dispcc_mdss_gdsc>;
clocks = <&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>,
<&dispcc_mdss_mdp_clk>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <1>;
iommus = <&apps_smmu 0x800 0x0>;
ranges;
display-controller@ae01000 {
compatible = "qcom,sm6150-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>,
<&dispcc_mdss_mdp_clk>,
<&dispcc_mdss_vsync_clk>;
clock-names = "iface", "bus", "core", "vsync";
assigned-clocks = <&dispcc_mdss_vsync_clk>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf0_out: endpoint {
};
};
port@1 {
reg = <1>;
dpu_intf1_out: endpoint {
remote-endpoint = <&mdss_dsi0_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-19200000 {
opp-hz = /bits/ 64 <19200000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-25600000 {
opp-hz = /bits/ 64 <25600000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-307200000 {
opp-hz = /bits/ 64 <307200000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
dsi@ae94000 {
compatible = "qcom,sm6150-dsi-ctrl",
"qcom,mdss-dsi-ctrl";
reg = <0x0ae94000 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <4>;
clocks = <&dispcc_mdss_byte0_clk>,
<&dispcc_mdss_byte0_intf_clk>,
<&dispcc_mdss_pclk0_clk>,
<&dispcc_mdss_esc0_clk>,
<&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>;
clock-names = "byte",
"byte_intf",
"pixel",
"core",
"iface",
"bus";
assigned-clocks = <&dispcc_mdss_byte0_clk_src>,
<&dispcc_mdss_pclk0_clk_src>;
assigned-clock-parents = <&mdss_dsi0_phy 0>,
<&mdss_dsi0_phy 1>;
operating-points-v2 = <&dsi0_opp_table>;
phys = <&mdss_dsi0_phy>;
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdss_dsi0_in: endpoint {
remote-endpoint = <&dpu_intf1_out>;
};
};
port@1 {
reg = <1>;
mdss_dsi0_out: endpoint {
};
};
};
dsi0_opp_table: opp-table {
compatible = "operating-points-v2";
opp-164000000 {
opp-hz = /bits/ 64 <164000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
};
};
mdss_dsi0_phy: phy@ae94400 {
compatible = "qcom,sm6150-dsi-phy-14nm";
reg = <0x0ae94400 0x100>,
<0x0ae94500 0x300>,
<0x0ae94800 0x188>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
clocks = <&dispcc_mdss_ahb_clk>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
};
};
...

View File

@ -78,6 +78,7 @@ msm-display-$(CONFIG_DRM_MSM_DPU) += \
disp/dpu1/dpu_hw_catalog.o \
disp/dpu1/dpu_hw_cdm.o \
disp/dpu1/dpu_hw_ctl.o \
disp/dpu1/dpu_hw_cwb.o \
disp/dpu1/dpu_hw_dsc.o \
disp/dpu1/dpu_hw_dsc_1_2.o \
disp/dpu1/dpu_hw_interrupts.o \

View File

@ -251,8 +251,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
@ -693,6 +693,8 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
if (ret)
goto fail;
adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull;
if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required

View File

@ -750,10 +750,10 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
/* Disable L2 bypass in the UCHE */
gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, lower_32_bits(adreno_gpu->uche_trap_base));
gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, upper_32_bits(adreno_gpu->uche_trap_base));
/* Set the GMEM VA range (0 to gpu->gmem) */
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
@ -1760,11 +1760,6 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
unsigned int nr_rings;
int ret;
if (!pdev) {
DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}
a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
if (!a5xx_gpu)
return ERR_PTR(-ENOMEM);
@ -1805,5 +1800,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
adreno_gpu->ubwc_config.macrotile_mode = 0;
adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
adreno_gpu->uche_trap_base = 0x0001ffffffff0000ull;
return gpu;
}

View File

@ -1388,6 +1388,17 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
{ .name = "SH0", .buswidth = 16 },
{ .name = "MC0", .buswidth = 4 },
{
.name = "ACV",
.fixed = true,
.perfmode = BIT(3),
.perfmode_bw = 16500000,
},
{ /* sentinel */ },
},
},
.address_space_size = SZ_16G,
.preempt_record_size = 4192 * SZ_1K,
@ -1432,6 +1443,17 @@ static const struct adreno_info a7xx_gpus[] = {
.pwrup_reglist = &a7xx_pwrup_reglist,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
{ .name = "SH0", .buswidth = 16 },
{ .name = "MC0", .buswidth = 4 },
{
.name = "ACV",
.fixed = true,
.perfmode = BIT(2),
.perfmode_bw = 10687500,
},
{ /* sentinel */ },
},
},
.address_space_size = SZ_16G,
.preempt_record_size = 3572 * SZ_1K,

View File

@ -9,6 +9,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/tcs.h>
#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
@ -109,9 +110,11 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
bool suspended)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index;
u32 bw_index = 0;
unsigned long gpu_freq;
int ret = 0;
@ -124,6 +127,37 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
/* If enabled, find the corresponding DDR bandwidth index */
if (info->bcms && gmu->nr_gpu_bws > 1) {
unsigned int bw = dev_pm_opp_get_bw(opp, true, 0);
for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) {
if (bw == gmu->gpu_bw_table[bw_index])
break;
}
/* Vote AB as a fraction of the max bandwidth, starting from A750 */
if (bw && adreno_is_a750_family(adreno_gpu)) {
u64 tmp;
/* For now, vote for 25% of the bandwidth */
tmp = bw * 25;
do_div(tmp, 100);
/*
* The AB vote consists of a 16 bit wide quantized level
* against the maximum supported bandwidth.
* Quantization can be calculated as below:
* vote = (bandwidth * 2^16) / max bandwidth
*/
tmp *= MAX_AB_VOTE;
do_div(tmp, gmu->gpu_bw_table[gmu->nr_gpu_bws - 1]);
bw_index |= AB_VOTE(clamp(tmp, 1, MAX_AB_VOTE));
bw_index |= AB_VOTE_ENABLE;
}
}
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
@ -139,8 +173,10 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
return;
if (!gmu->legacy) {
a6xx_hfi_set_freq(gmu, perf_index);
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
a6xx_hfi_set_freq(gmu, perf_index, bw_index);
/* With Bandwidth voting, we now vote for all resources, so skip OPP set */
if (!bw_index)
dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
return;
}
@ -729,6 +765,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
const struct block_header *blk;
u32 reg_offset;
u32 ver;
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
@ -775,6 +812,12 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
}
ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
DRM_INFO("Loaded GMU firmware v%u.%u.%u\n",
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
return 0;
}
@ -1265,7 +1308,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
msm_gem_object_set_name(bo->obj, name);
msm_gem_object_set_name(bo->obj, "%s", name);
return 0;
}
@ -1287,6 +1330,104 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return 0;
}
/**
* struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM)
* @unit: divisor used to convert bytes/sec bw value to an RPMh msg
* @width: multiplier used to convert bytes/sec bw value to an RPMh msg
* @vcd: virtual clock domain that this bcm belongs to
* @reserved: reserved field
*/
struct bcm_db {
__le32 unit;
__le16 width;
u8 vcd;
u8 reserved;
};
static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
const struct a6xx_info *info,
struct a6xx_gmu *gmu)
{
const struct bcm_db *bcm_data[GMU_MAX_BCMS] = { 0 };
unsigned int bcm_index, bw_index, bcm_count = 0;
/* Retrieve BCM data from cmd-db */
for (bcm_index = 0; bcm_index < GMU_MAX_BCMS; bcm_index++) {
const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
size_t count;
/* Stop at NULL terminated bcm entry */
if (!bcm->name)
break;
bcm_data[bcm_index] = cmd_db_read_aux_data(bcm->name, &count);
if (IS_ERR(bcm_data[bcm_index]))
return PTR_ERR(bcm_data[bcm_index]);
if (!count) {
dev_err(gmu->dev, "invalid BCM '%s' aux data size\n",
bcm->name);
return -EINVAL;
}
bcm_count++;
}
/* Generate BCM votes values for each bandwidth & BCM */
for (bw_index = 0; bw_index < gmu->nr_gpu_bws; bw_index++) {
u32 *data = gmu->gpu_ib_votes[bw_index];
u32 bw = gmu->gpu_bw_table[bw_index];
/* Calculations loosely copied from bcm_aggregate() & tcs_cmd_gen() */
for (bcm_index = 0; bcm_index < bcm_count; bcm_index++) {
const struct a6xx_bcm *bcm = &info->bcms[bcm_index];
bool commit = false;
u64 peak;
u32 vote;
if (bcm_index == bcm_count - 1 ||
(bcm_data[bcm_index + 1] &&
bcm_data[bcm_index]->vcd != bcm_data[bcm_index + 1]->vcd))
commit = true;
if (!bw) {
data[bcm_index] = BCM_TCS_CMD(commit, false, 0, 0);
continue;
}
if (bcm->fixed) {
u32 perfmode = 0;
/* GMU on A6xx votes perfmode on all valid bandwidth */
if (!adreno_is_a7xx(adreno_gpu) ||
(bcm->perfmode_bw && bw >= bcm->perfmode_bw))
perfmode = bcm->perfmode;
data[bcm_index] = BCM_TCS_CMD(commit, true, 0, perfmode);
continue;
}
/* Multiply the bandwidth by the width of the connection */
peak = (u64)bw * le16_to_cpu(bcm_data[bcm_index]->width);
do_div(peak, bcm->buswidth);
/* Input bandwidth value is in KBps, scale the value to BCM unit */
peak *= 1000;
do_div(peak, le32_to_cpu(bcm_data[bcm_index]->unit));
vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK);
/* GMUs on A7xx votes on both x & y */
if (adreno_is_a7xx(adreno_gpu))
data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote);
else
data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote);
}
}
return 0;
}
/* Return the 'arc-level' for the given frequency */
static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
unsigned long freq)
@ -1390,12 +1531,15 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
* voltage levels and build the votes
* The GMU can also vote for DDR interconnects, use the OPP bandwidth entries
* and BCM parameters to build the votes.
*/
static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret;
@ -1407,6 +1551,10 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
/* Build the interconnect votes */
if (info->bcms && gmu->nr_gpu_bws > 1)
ret |= a6xx_gmu_rpmh_bw_votes_init(adreno_gpu, info, gmu);
return ret;
}
@ -1442,10 +1590,43 @@ static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
return index;
}
static int a6xx_gmu_build_bw_table(struct device *dev, unsigned long *bandwidths,
u32 size)
{
int count = dev_pm_opp_get_opp_count(dev);
struct dev_pm_opp *opp;
int i, index = 0;
unsigned int bandwidth = 1;
/*
* The OPP table doesn't contain the "off" bandwidth level so we need to
* add 1 to the table size to account for it
*/
if (WARN(count + 1 > size,
"The GMU bandwidth table is being truncated\n"))
count = size - 1;
/* Set the "off" bandwidth */
bandwidths[index++] = 0;
for (i = 0; i < count; i++) {
opp = dev_pm_opp_find_bw_ceil(dev, &bandwidth, 0);
if (IS_ERR(opp))
break;
dev_pm_opp_put(opp);
bandwidths[index++] = bandwidth++;
}
return index;
}
static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
int ret = 0;
@ -1472,6 +1653,14 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
gmu->current_perf_index = gmu->nr_gpu_freqs - 1;
/*
* The GMU also handles GPU Interconnect Votes so build a list
* of DDR bandwidths from the GPU OPP table
*/
if (info->bcms)
gmu->nr_gpu_bws = a6xx_gmu_build_bw_table(&gpu->pdev->dev,
gmu->gpu_bw_table, ARRAY_SIZE(gmu->gpu_bw_table));
/* Build the list of RPMh votes that we'll send to the GMU */
return a6xx_gmu_rpmh_votes_init(gmu);
}
@ -1603,7 +1792,9 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
ret = of_dma_configure(gmu->dev, node, true);
if (ret)
return ret;
pm_runtime_enable(gmu->dev);
@ -1668,7 +1859,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
of_dma_configure(gmu->dev, node, true);
ret = of_dma_configure(gmu->dev, node, true);
if (ret)
return ret;
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;

View File

@ -19,6 +19,18 @@ struct a6xx_gmu_bo {
u64 iova;
};
#define GMU_MAX_GX_FREQS 16
#define GMU_MAX_CX_FREQS 4
#define GMU_MAX_BCMS 3
struct a6xx_bcm {
char *name;
unsigned int buswidth;
bool fixed;
unsigned int perfmode;
unsigned int perfmode_bw;
};
/*
* These define the different GMU wake up options - these define how both the
* CPU and the GMU bring up the hardware
@ -79,12 +91,16 @@ struct a6xx_gmu {
int current_perf_index;
int nr_gpu_freqs;
unsigned long gpu_freqs[16];
u32 gx_arc_votes[16];
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
int nr_gpu_bws;
unsigned long gpu_bw_table[GMU_MAX_GX_FREQS];
u32 gpu_ib_votes[GMU_MAX_GX_FREQS][GMU_MAX_BCMS];
int nr_gmu_freqs;
unsigned long gmu_freqs[4];
u32 cx_arc_votes[4];
unsigned long gmu_freqs[GMU_MAX_CX_FREQS];
u32 cx_arc_votes[GMU_MAX_CX_FREQS];
unsigned long freq;
@ -193,7 +209,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 perf_index, u32 bw_index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);

View File

@ -1123,12 +1123,12 @@ static int hw_init(struct msm_gpu *gpu)
/* Disable L2 bypass in the UCHE */
if (adreno_is_a7xx(adreno_gpu)) {
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
} else {
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, 0x0001ffffffffffc0llu);
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, 0x0001fffffffff000llu);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX, adreno_gpu->uche_trap_base + 0xfc0);
gpu_write64(gpu, REG_A6XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
gpu_write64(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
}
if (!(adreno_is_a650_family(adreno_gpu) ||
@ -2533,6 +2533,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
}
}
adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
if (gpu->aspace)
msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
a6xx_fault_handler);

View File

@ -44,6 +44,7 @@ struct a6xx_info {
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
const struct a6xx_bcm *bcms;
};
struct a6xx_gpu {

View File

@ -6,6 +6,7 @@
#include <linux/list.h>
#include <soc/qcom/cmd-db.h>
#include <soc/qcom/tcs.h>
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
@ -259,6 +260,48 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
static void a6xx_generate_bw_table(const struct a6xx_info *info, struct a6xx_gmu *gmu,
struct a6xx_hfi_msg_bw_table *msg)
{
unsigned int i, j;
for (i = 0; i < GMU_MAX_BCMS; i++) {
if (!info->bcms[i].name)
break;
msg->ddr_cmds_addrs[i] = cmd_db_read_addr(info->bcms[i].name);
}
msg->ddr_cmds_num = i;
for (i = 0; i < gmu->nr_gpu_bws; ++i)
for (j = 0; j < msg->ddr_cmds_num; j++)
msg->ddr_cmds_data[i][j] = gmu->gpu_ib_votes[i][j];
msg->bw_level_num = gmu->nr_gpu_bws;
/* Compute the wait bitmask with each BCM having the commit bit */
msg->ddr_wait_bitmask = 0;
for (j = 0; j < msg->ddr_cmds_num; j++)
if (msg->ddr_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
msg->ddr_wait_bitmask |= BIT(j);
/*
* These are the CX (CNOC) votes - these are used by the GMU
* The 'CN0' BCM is used on all targets, and votes are basically
* 'off' and 'on' states with first bit to enable the path.
*/
msg->cnoc_cmds_addrs[0] = cmd_db_read_addr("CN0");
msg->cnoc_cmds_num = 1;
msg->cnoc_cmds_data[0][0] = BCM_TCS_CMD(true, false, 0, 0);
msg->cnoc_cmds_data[1][0] = BCM_TCS_CMD(true, true, 0, BIT(0));
/* Compute the wait bitmask with each BCM having the commit bit */
msg->cnoc_wait_bitmask = 0;
for (j = 0; j < msg->cnoc_cmds_num; j++)
if (msg->cnoc_cmds_data[0][j] & BCM_TCS_CMD_COMMIT_MASK)
msg->cnoc_wait_bitmask |= BIT(j);
}
static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
/* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
@ -664,6 +707,7 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
struct a6xx_hfi_msg_bw_table *msg;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct a6xx_info *info = adreno_gpu->info->a6xx;
if (gmu->bw_table)
goto send;
@ -672,7 +716,9 @@ static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
if (!msg)
return -ENOMEM;
if (adreno_is_a618(adreno_gpu))
if (info->bcms && gmu->nr_gpu_bws > 1)
a6xx_generate_bw_table(info, gmu, msg);
else if (adreno_is_a618(adreno_gpu))
a618_build_bw_table(msg);
else if (adreno_is_a619(adreno_gpu))
a619_build_bw_table(msg);
@ -726,13 +772,13 @@ static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
sizeof(msg), NULL, 0);
}
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, u32 freq_index, u32 bw_index)
{
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
msg.ack_type = 1; /* blocking */
msg.freq = index;
msg.bw = 0; /* TODO: bus scaling */
msg.freq = freq_index;
msg.bw = bw_index;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
sizeof(msg), NULL, 0);

View File

@ -173,6 +173,11 @@ struct a6xx_hfi_gx_bw_perf_vote_cmd {
u32 bw;
};
#define AB_VOTE_MASK GENMASK(31, 16)
#define MAX_AB_VOTE (FIELD_MAX(AB_VOTE_MASK) - 1)
#define AB_VOTE(vote) FIELD_PREP(AB_VOTE_MASK, (vote))
#define AB_VOTE_ENABLE BIT(8)
#define HFI_H2F_MSG_PREPARE_SLUMBER 33
struct a6xx_hfi_prep_slumber_cmd {

View File

@ -310,10 +310,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *drm = gpu->dev;
/* No pointer params yet */
if (*len != 0)
return -EINVAL;
return UERR(EINVAL, drm, "invalid len");
switch (param) {
case MSM_PARAM_GPU_ID:
@ -365,12 +366,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
return 0;
case MSM_PARAM_VA_START:
if (ctx->aspace == gpu->aspace)
return -EINVAL;
return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_start;
return 0;
case MSM_PARAM_VA_SIZE:
if (ctx->aspace == gpu->aspace)
return -EINVAL;
return UERR(EINVAL, drm, "requires per-process pgtables");
*value = ctx->aspace->va_size;
return 0;
case MSM_PARAM_HIGHEST_BANK_BIT:
@ -385,15 +386,19 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_MACROTILE_MODE:
*value = adreno_gpu->ubwc_config.macrotile_mode;
return 0;
case MSM_PARAM_UCHE_TRAP_BASE:
*value = adreno_gpu->uche_trap_base;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}
int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
uint32_t param, uint64_t value, uint32_t len)
{
struct drm_device *drm = gpu->dev;
switch (param) {
case MSM_PARAM_COMM:
case MSM_PARAM_CMDLINE:
@ -401,11 +406,11 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
* that should be a reasonable upper bound
*/
if (len > PAGE_SIZE)
return -EINVAL;
return UERR(EINVAL, drm, "invalid len");
break;
default:
if (len != 0)
return -EINVAL;
return UERR(EINVAL, drm, "invalid len");
}
switch (param) {
@ -434,11 +439,10 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
}
case MSM_PARAM_SYSPROF:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return UERR(EPERM, drm, "invalid permissions");
return msm_file_private_set_sysprof(ctx, gpu, value);
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param);
}
}

View File

@ -253,6 +253,8 @@ struct adreno_gpu {
bool gmu_is_wrapper;
bool has_ray_tracing;
u64 uche_trap_base;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@ -559,6 +561,11 @@ static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
gpu->info->family == ADRENO_7XX_GEN3;
}
static inline int adreno_is_a750_family(struct adreno_gpu *gpu)
{
return gpu->info->family == ADRENO_7XX_GEN3;
}
static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
{
/* Update with non-fake (i.e. non-A702) Gen 7 GPUs */

View File

@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x400,
@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8650_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x400,
@ -252,25 +254,25 @@ static const struct dpu_pingpong_cfg sm8650_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_8", .id = PINGPONG_8,
.name = "pingpong_cwb_2", .id = PINGPONG_CWB_2,
.base = 0x7e000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_4,
}, {
.name = "pingpong_9", .id = PINGPONG_9,
.name = "pingpong_cwb_3", .id = PINGPONG_CWB_3,
.base = 0x7e400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@ -350,6 +352,25 @@ static const struct dpu_wb_cfg sm8650_wb[] = {
},
};
static const struct dpu_cwb_cfg sm8650_cwb[] = {
{
.name = "cwb_0", .id = CWB_0,
.base = 0x66200, .len = 0x8,
},
{
.name = "cwb_1", .id = CWB_1,
.base = 0x66600, .len = 0x8,
},
{
.name = "cwb_2", .id = CWB_2,
.base = 0x7E200, .len = 0x8,
},
{
.name = "cwb_3", .id = CWB_3,
.base = 0x7E600, .len = 0x8,
},
};
static const struct dpu_intf_cfg sm8650_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@ -447,6 +468,8 @@ const struct dpu_mdss_cfg dpu_sm8650_cfg = {
.merge_3d = sm8650_merge_3d,
.wb_count = ARRAY_SIZE(sm8650_wb),
.wb = sm8650_wb,
.cwb_count = ARRAY_SIZE(sm8650_cwb),
.cwb = sm8650_cwb,
.intf_count = ARRAY_SIZE(sm8650_intf),
.intf = sm8650_intf,
.vbif_count = ARRAY_SIZE(sm8650_vbif),

View File

@ -65,6 +65,54 @@ static const struct dpu_sspp_cfg sdm670_sspp[] = {
},
};
static const struct dpu_lm_cfg sdm670_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_2,
}, {
.name = "lm_5", .id = LM_5,
.base = 0x49000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
},
};
static const struct dpu_dspp_cfg sdm670_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
.features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
.features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
static const struct dpu_dsc_cfg sdm670_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
@ -88,8 +136,10 @@ const struct dpu_mdss_cfg dpu_sdm670_cfg = {
.ctl = sdm845_ctl,
.sspp_count = ARRAY_SIZE(sdm670_sspp),
.sspp = sdm670_sspp,
.mixer_count = ARRAY_SIZE(sdm845_lm),
.mixer = sdm845_lm,
.mixer_count = ARRAY_SIZE(sdm670_lm),
.mixer = sdm670_lm,
.dspp_count = ARRAY_SIZE(sdm670_dspp),
.dspp = sdm670_dspp,
.pingpong_count = ARRAY_SIZE(sdm845_pp),
.pingpong = sdm845_pp,
.dsc_count = ARRAY_SIZE(sdm670_dsc),

View File

@ -164,6 +164,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@ -171,6 +172,7 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,

View File

@ -163,6 +163,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@ -170,6 +171,7 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,

View File

@ -0,0 +1,254 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DPU_5_3_SM6150_H
#define _DPU_5_3_SM6150_H
static const struct dpu_caps sm6150_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x9,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
.max_hdeci_exp = MAX_HORZ_DECIMATION,
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
static const struct dpu_mdp_cfg sm6150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
.features = 0,
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
},
};
static const struct dpu_ctl_cfg sm6150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm6150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK_SDMA,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_8", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
}, {
.name = "sspp_11", .id = SSPP_DMA3,
.base = 0x2a000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK_SDMA,
.sblk = &dpu_dma_sblk,
.xin_id = 13,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA3,
},
};
static const struct dpu_lm_cfg sm6150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
.features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
.lm_pair = LM_1,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
.features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
.lm_pair = LM_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
.features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_2,
},
};
static const struct dpu_dspp_cfg sm6150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
.features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
static const struct dpu_pingpong_cfg sm6150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
},
};
static const struct dpu_intf_cfg sm6150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
.features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2c0,
.features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
.intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
.features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
},
};
static const struct dpu_perf_cfg sm6150_perf_data = {
.max_bw_low = 4800000,
.max_bw_high = 4800000,
.min_core_ib = 2400000,
.min_llcc_ib = 0,
.min_dram_ib = 800000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
.qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sm8150_qos_linear),
.entries = sm8150_qos_linear
},
{.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
.entries = sc7180_qos_macrotile
},
{.nentry = ARRAY_SIZE(sc7180_qos_nrt),
.entries = sc7180_qos_nrt
},
/* TODO: macrotile-qseed is different from macrotile */
},
.cdp_cfg = {
{.rd_enable = 1, .wr_enable = 1},
{.rd_enable = 1, .wr_enable = 0}
},
.clk_inefficiency_factor = 105,
.bw_inefficiency_factor = 120,
};
static const struct dpu_mdss_version sm6150_mdss_ver = {
.core_major_ver = 5,
.core_minor_ver = 3,
};
const struct dpu_mdss_cfg dpu_sm6150_cfg = {
.mdss_ver = &sm6150_mdss_ver,
.caps = &sm6150_dpu_caps,
.mdp = &sm6150_mdp,
.ctl_count = ARRAY_SIZE(sm6150_ctl),
.ctl = sm6150_ctl,
.sspp_count = ARRAY_SIZE(sm6150_sspp),
.sspp = sm6150_sspp,
.mixer_count = ARRAY_SIZE(sm6150_lm),
.mixer = sm6150_lm,
.dspp_count = ARRAY_SIZE(sm6150_dspp),
.dspp = sm6150_dspp,
.pingpong_count = ARRAY_SIZE(sm6150_pp),
.pingpong = sm6150_pp,
.intf_count = ARRAY_SIZE(sm6150_intf),
.intf = sm6150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm6150_perf_data,
};
#endif

View File

@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8250_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,

View File

@ -162,6 +162,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@ -169,6 +170,7 @@ static const struct dpu_lm_cfg sm8350_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,

View File

@ -257,13 +257,13 @@ static const struct dpu_pingpong_cfg sm8450_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,

View File

@ -256,13 +256,13 @@ static const struct dpu_pingpong_cfg sa8775p_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.name = "pingpong_6", .id = PINGPONG_CWB_0,
.base = 0x65800, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.name = "pingpong_7", .id = PINGPONG_CWB_1,
.base = 0x65c00, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,

View File

@ -160,6 +160,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@ -167,6 +168,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg sm8550_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,

View File

@ -159,6 +159,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
@ -166,6 +167,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
.base = 0x48000, .len = 0x320,
@ -251,13 +253,13 @@ static const struct dpu_pingpong_cfg x1e80100_pp[] = {
.merge_3d = MERGE_3D_2,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
}, {
.name = "pingpong_6", .id = PINGPONG_6,
.name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
.base = 0x66000, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
.merge_3d = MERGE_3D_3,
}, {
.name = "pingpong_7", .id = PINGPONG_7,
.name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
.base = 0x66400, .len = 0,
.features = BIT(DPU_PINGPONG_DITHER),
.sblk = &sc7280_pp_sblk,
@ -389,8 +391,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_2,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,

View File

@ -732,6 +732,13 @@ static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
int i;
/* if we cannot merge 2 LMs (no 3d mux) better to fail earlier
* before even checking the width after the split
*/
if (!dpu_kms->catalog->caps->has_3d_merge &&
adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
return -E2BIG;
for (i = 0; i < cstate->num_mixers; i++) {
struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
@ -1182,6 +1189,49 @@ static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
return false;
}
static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state)
{
int total_planes = crtc->dev->mode_config.num_total_plane;
struct drm_atomic_state *state = crtc_state->state;
struct dpu_global_state *global_state;
struct drm_plane_state **states;
struct drm_plane *plane;
int ret;
global_state = dpu_kms_get_global_state(crtc_state->state);
if (IS_ERR(global_state))
return PTR_ERR(global_state);
dpu_rm_release_all_sspp(global_state, crtc);
if (!crtc_state->enable)
return 0;
states = kcalloc(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
return -ENOMEM;
drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
goto done;
}
states[plane_state->normalized_zpos] = plane_state;
}
ret = dpu_assign_plane_resources(global_state, state, crtc, states, total_planes);
done:
kfree(states);
return ret;
return 0;
}
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
@ -1197,6 +1247,13 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
if (dpu_use_virtual_planes &&
(crtc_state->planes_changed || crtc_state->zpos_changed)) {
rc = dpu_crtc_reassign_planes(crtc, crtc_state);
if (rc < 0)
return rc;
}
if (!crtc_state->enable || !drm_atomic_crtc_effectively_active(crtc_state)) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
crtc->base.id, crtc_state->enable,
@ -1251,6 +1308,12 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
{
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
/* if there is no 3d_mux block we cannot merge LMs so we cannot
* split the large layer into 2 LMs, filter out such modes
*/
if (!dpu_kms->catalog->caps->has_3d_merge &&
mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
return MODE_BAD_HVALUE;
/*
* max crtc width is equal to the max mixer width * 2 and max height is 4K
*/

View File

@ -800,7 +800,7 @@ static int dpu_encoder_virt_atomic_check(
if (!crtc_state->active_changed || crtc_state->enable)
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
drm_enc, crtc_state, topology);
drm_enc, crtc_state, &topology);
if (!ret)
dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
global_state, crtc_state);

View File

@ -765,6 +765,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_5_2_sm7150.h"
#include "catalog/dpu_5_3_sm6150.h"
#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"

View File

@ -613,6 +613,16 @@ struct dpu_wb_cfg {
enum dpu_clk_ctrl_type clk_ctrl;
};
/*
* struct dpu_cwb_cfg : MDP CWB mux instance info
* @id: enum identifying this block
* @base: register base offset to mdss
* @features bit mask identifying sub-blocks/features
*/
struct dpu_cwb_cfg {
DPU_HW_BLK_INFO;
};
/**
* struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
* @pps pixel per seconds
@ -815,6 +825,9 @@ struct dpu_mdss_cfg {
u32 dspp_count;
const struct dpu_dspp_cfg *dspp;
u32 cwb_count;
const struct dpu_cwb_cfg *cwb;
/* Add additional block data structures here */
const struct dpu_perf_cfg *perf;
@ -839,6 +852,7 @@ extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;
extern const struct dpu_mdss_cfg dpu_sm6125_cfg;
extern const struct dpu_mdss_cfg dpu_sm6150_cfg;
extern const struct dpu_mdss_cfg dpu_sm6350_cfg;
extern const struct dpu_mdss_cfg dpu_qcm2290_cfg;
extern const struct dpu_mdss_cfg dpu_sm6375_cfg;

View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
*/
#include <drm/drm_managed.h>
#include "dpu_hw_cwb.h"
#include <linux/bitfield.h>
#define CWB_MUX 0x000
#define CWB_MODE 0x004
/* CWB mux block bit definitions */
#define CWB_MUX_MASK GENMASK(3, 0)
#define CWB_MODE_MASK GENMASK(2, 0)
static void dpu_hw_cwb_config(struct dpu_hw_cwb *ctx,
struct dpu_hw_cwb_setup_cfg *cwb_cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
int cwb_mux_cfg = 0xF;
enum dpu_pingpong pp;
enum cwb_mode_input input;
if (!cwb_cfg)
return;
input = cwb_cfg->input;
pp = cwb_cfg->pp_idx;
if (input >= INPUT_MODE_MAX)
return;
/*
* The CWB_MUX register takes the pingpong index for the real-time
* display
*/
if ((pp != PINGPONG_NONE) && (pp < PINGPONG_MAX))
cwb_mux_cfg = FIELD_PREP(CWB_MUX_MASK, pp - PINGPONG_0);
input = FIELD_PREP(CWB_MODE_MASK, input);
DPU_REG_WRITE(c, CWB_MUX, cwb_mux_cfg);
DPU_REG_WRITE(c, CWB_MODE, input);
}
/**
* dpu_hw_cwb_init() - Initializes the writeback hw driver object with cwb.
* @dev: Corresponding device for devres management
* @cfg: wb_path catalog entry for which driver object is required
* @addr: mapped register io address of MDP
* Return: Error code or allocated dpu_hw_wb context
*/
struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
const struct dpu_cwb_cfg *cfg,
void __iomem *addr)
{
struct dpu_hw_cwb *c;
if (!addr)
return ERR_PTR(-EINVAL);
c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
if (!c)
return ERR_PTR(-ENOMEM);
c->hw.blk_addr = addr + cfg->base;
c->hw.log_mask = DPU_DBG_MASK_CWB;
c->idx = cfg->id;
c->ops.config_cwb = dpu_hw_cwb_config;
return c;
}

View File

@ -0,0 +1,70 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved
*/
#ifndef _DPU_HW_CWB_H
#define _DPU_HW_CWB_H
#include "dpu_hw_util.h"
struct dpu_hw_cwb;
enum cwb_mode_input {
INPUT_MODE_LM_OUT,
INPUT_MODE_DSPP_OUT,
INPUT_MODE_MAX
};
/**
* struct dpu_hw_cwb_setup_cfg : Describes configuration for CWB mux
* @pp_idx: Index of the real-time pinpong that the CWB mux will
* feed the CWB mux
* @input: Input tap point
*/
struct dpu_hw_cwb_setup_cfg {
enum dpu_pingpong pp_idx;
enum cwb_mode_input input;
};
/**
*
* struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions
* @config_cwb: configure CWB mux
*/
struct dpu_hw_cwb_ops {
void (*config_cwb)(struct dpu_hw_cwb *ctx,
struct dpu_hw_cwb_setup_cfg *cwb_cfg);
};
/**
* struct dpu_hw_cwb : CWB mux driver object
* @base: Hardware block base structure
* @hw: Block hardware details
* @idx: CWB index
* @ops: handle to operations possible for this CWB
*/
struct dpu_hw_cwb {
struct dpu_hw_blk base;
struct dpu_hw_blk_reg_map hw;
enum dpu_cwb idx;
struct dpu_hw_cwb_ops ops;
};
/**
* dpu_hw_cwb - convert base object dpu_hw_base to container
* @hw: Pointer to base hardware block
* return: Pointer to hardware block container
*/
static inline struct dpu_hw_cwb *to_dpu_hw_cwb(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_cwb, base);
}
struct dpu_hw_cwb *dpu_hw_cwb_init(struct drm_device *dev,
const struct dpu_cwb_cfg *cfg,
void __iomem *addr);
#endif /*_DPU_HW_CWB_H */

View File

@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_MDSS_H
@ -181,10 +183,10 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
PINGPONG_6,
PINGPONG_7,
PINGPONG_8,
PINGPONG_9,
PINGPONG_CWB_0,
PINGPONG_CWB_1,
PINGPONG_CWB_2,
PINGPONG_CWB_3,
PINGPONG_S0,
PINGPONG_MAX
};
@ -350,6 +352,7 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_DSPP (1 << 10)
#define DPU_DBG_MASK_DSC (1 << 11)
#define DPU_DBG_MASK_CDM (1 << 12)
#define DPU_DBG_MASK_CWB (1 << 13)
/**
* struct dpu_hw_tear_check - Struct contains parameters to configure

View File

@ -173,7 +173,9 @@ static void dpu_hw_wb_bind_pingpong_blk(
mux_cfg = DPU_REG_READ(c, WB_MUX);
mux_cfg &= ~0xf;
if (pp)
if (pp >= PINGPONG_CWB_0)
mux_cfg |= (pp < PINGPONG_CWB_2) ? 0xd : 0xb;
else if (pp)
mux_cfg |= (pp - PINGPONG_0) & 0x7;
else
mux_cfg |= 0xf;

View File

@ -51,6 +51,9 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
bool dpu_use_virtual_planes;
module_param(dpu_use_virtual_planes, bool, 0);
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@ -829,8 +832,11 @@ static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
type, catalog->sspp[i].features,
catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
(1UL << max_crtc_count) - 1);
if (dpu_use_virtual_planes)
plane = dpu_plane_init_virtual(dev, type, (1UL << max_crtc_count) - 1);
else
plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
(1UL << max_crtc_count) - 1);
if (IS_ERR(plane)) {
DPU_ERROR("dpu_plane_init failed\n");
ret = PTR_ERR(plane);
@ -932,12 +938,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump CTL sub-blocks HW regs info */
for (i = 0; i < cat->ctl_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->ctl[i].len,
dpu_kms->mmio + cat->ctl[i].base, cat->ctl[i].name);
dpu_kms->mmio + cat->ctl[i].base, "%s",
cat->ctl[i].name);
/* dump DSPP sub-blocks HW regs info */
for (i = 0; i < cat->dspp_count; i++) {
base = dpu_kms->mmio + cat->dspp[i].base;
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base, cat->dspp[i].name);
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].len, base,
"%s", cat->dspp[i].name);
if (cat->dspp[i].sblk && cat->dspp[i].sblk->pcc.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->dspp[i].sblk->pcc.len,
@ -949,13 +957,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump INTF sub-blocks HW regs info */
for (i = 0; i < cat->intf_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->intf[i].len,
dpu_kms->mmio + cat->intf[i].base, cat->intf[i].name);
dpu_kms->mmio + cat->intf[i].base, "%s",
cat->intf[i].name);
/* dump PP sub-blocks HW regs info */
for (i = 0; i < cat->pingpong_count; i++) {
base = dpu_kms->mmio + cat->pingpong[i].base;
msm_disp_snapshot_add_block(disp_state, cat->pingpong[i].len, base,
cat->pingpong[i].name);
"%s", cat->pingpong[i].name);
/* TE2 sub-block has length of 0, so will not print it */
@ -969,7 +978,8 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump SSPP sub-blocks HW regs info */
for (i = 0; i < cat->sspp_count; i++) {
base = dpu_kms->mmio + cat->sspp[i].base;
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base, cat->sspp[i].name);
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].len, base,
"%s", cat->sspp[i].name);
if (cat->sspp[i].sblk && cat->sspp[i].sblk->scaler_blk.len > 0)
msm_disp_snapshot_add_block(disp_state, cat->sspp[i].sblk->scaler_blk.len,
@ -987,12 +997,14 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
/* dump LM sub-blocks HW regs info */
for (i = 0; i < cat->mixer_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->mixer[i].len,
dpu_kms->mmio + cat->mixer[i].base, cat->mixer[i].name);
dpu_kms->mmio + cat->mixer[i].base,
"%s", cat->mixer[i].name);
/* dump WB sub-blocks HW regs info */
for (i = 0; i < cat->wb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
dpu_kms->mmio + cat->wb[i].base, cat->wb[i].name);
dpu_kms->mmio + cat->wb[i].base, "%s",
cat->wb[i].name);
if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
@ -1004,10 +1016,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
dpu_kms->mmio + cat->mdp[0].base, "top");
}
/* dump CWB sub-blocks HW regs info */
for (i = 0; i < cat->cwb_count; i++)
msm_disp_snapshot_add_block(disp_state, cat->cwb[i].len,
dpu_kms->mmio + cat->cwb[i].base, cat->cwb[i].name);
/* dump DSC sub-blocks HW regs info */
for (i = 0; i < cat->dsc_count; i++) {
base = dpu_kms->mmio + cat->dsc[i].base;
msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base, cat->dsc[i].name);
msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len, base,
"%s", cat->dsc[i].name);
if (cat->dsc[i].features & BIT(DPU_DSC_HW_REV_1_2)) {
struct dpu_dsc_blk enc = cat->dsc[i].sblk->enc;
@ -1022,7 +1040,16 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
if (cat->cdm)
msm_disp_snapshot_add_block(disp_state, cat->cdm->len,
dpu_kms->mmio + cat->cdm->base, cat->cdm->name);
dpu_kms->mmio + cat->cdm->base,
"%s", cat->cdm->name);
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
const struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
msm_disp_snapshot_add_block(disp_state, vbif->len,
dpu_kms->vbif[vbif->id] + vbif->base,
"%s", vbif->name);
}
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@ -1478,6 +1505,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sc8280xp-dpu", .data = &dpu_sc8280xp_cfg, },
{ .compatible = "qcom,sm6115-dpu", .data = &dpu_sm6115_cfg, },
{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
{ .compatible = "qcom,sm6150-dpu", .data = &dpu_sm6150_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },

View File

@ -54,6 +54,8 @@
#define ktime_compare_safe(A, B) \
ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
extern bool dpu_use_virtual_planes;
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@ -128,6 +130,8 @@ struct dpu_global_state {
uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
uint32_t dsc_to_enc_id[DSC_MAX - DSC_0];
uint32_t cdm_to_enc_id;
uint32_t sspp_to_crtc_id[SSPP_MAX - SSPP_NONE];
};
struct dpu_global_state

View File

@ -20,7 +20,6 @@
#include "msm_drv.h"
#include "msm_mdss.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_util.h"
#include "dpu_trace.h"
@ -878,7 +877,7 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
drm_rect_rotate_inv(&pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
if (r_pipe_cfg->src_rect.x1 != 0)
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
@ -888,6 +887,32 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return 0;
}
static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
struct dpu_sw_pipe_cfg *pipe_cfg,
const struct msm_format *fmt,
uint32_t max_linewidth)
{
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect))
return false;
if (pipe_cfg->rotation & DRM_MODE_ROTATE_90)
return false;
if (MSM_FORMAT_IS_YUV(fmt))
return false;
if (MSM_FORMAT_IS_UBWC(fmt) &&
drm_rect_width(&pipe_cfg->src_rect) > max_linewidth / 2)
return false;
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
return true;
}
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
const struct drm_crtc_state *crtc_state)
@ -901,7 +926,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
const struct msm_format *fmt;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
uint32_t max_linewidth;
uint32_t supported_rotations;
const struct dpu_sspp_cfg *pipe_hw_caps;
const struct dpu_sspp_sub_blks *sblk;
@ -923,8 +947,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
fmt = msm_framebuffer_format(new_plane_state->fb);
max_linewidth = pdpu->catalog->caps->max_linewidth;
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0;
if (pipe_hw_caps->features & BIT(DPU_SSPP_INLINE_ROTATION))
@ -940,41 +962,6 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
return ret;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
/*
* In parallel multirect case only the half of the usual width
* is supported for tiled formats. If we are here, we know that
* full width is more than max_linewidth, thus each rect is
* wider than allowed.
*/
if (MSM_FORMAT_IS_UBWC(fmt) &&
drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, tiled format\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
if (drm_rect_width(&pipe_cfg->src_rect) != drm_rect_width(&pipe_cfg->dst_rect) ||
drm_rect_height(&pipe_cfg->src_rect) != drm_rect_height(&pipe_cfg->dst_rect) ||
(!test_bit(DPU_SSPP_SMART_DMA_V1, &pipe->sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &pipe->sspp->cap->features)) ||
pipe_cfg->rotation & DRM_MODE_ROTATE_90 ||
MSM_FORMAT_IS_YUV(fmt)) {
DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u, can't use split source\n",
DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
return -E2BIG;
}
/*
* Use multirect for wide plane. We do not support dynamic
* assignment of SSPPs, so we know the configuration.
*/
pipe->multirect_index = DPU_SSPP_RECT_0;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
r_pipe->sspp = pipe->sspp;
r_pipe->multirect_index = DPU_SSPP_RECT_1;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
&crtc_state->adjusted_mode);
if (ret)
@ -984,6 +971,36 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
return 0;
}
static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dpu_sw_pipe_cfg *pipe_cfg,
struct dpu_sw_pipe *r_pipe, struct dpu_sw_pipe_cfg *r_pipe_cfg,
struct dpu_hw_sspp *sspp, const struct msm_format *fmt,
uint32_t max_linewidth)
{
r_pipe->sspp = NULL;
pipe->multirect_index = DPU_SSPP_RECT_SOLO;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
if (!dpu_plane_is_multirect_parallel_capable(pipe->sspp, pipe_cfg, fmt, max_linewidth) ||
!dpu_plane_is_multirect_parallel_capable(pipe->sspp, r_pipe_cfg, fmt, max_linewidth))
return false;
r_pipe->sspp = pipe->sspp;
pipe->multirect_index = DPU_SSPP_RECT_0;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
r_pipe->multirect_index = DPU_SSPP_RECT_1;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
}
return true;
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
@ -995,14 +1012,19 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_sw_pipe *pipe = &pstate->pipe;
struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
const struct drm_crtc_state *crtc_state = NULL;
uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth;
if (new_plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
pipe->sspp = dpu_rm_get_sspp(&dpu_kms->rm, pdpu->pipe);
r_pipe->sspp = NULL;
if (!pipe->sspp)
return -EINVAL;
ret = dpu_plane_atomic_check_nosspp(plane, new_plane_state, crtc_state);
if (ret)
@ -1011,14 +1033,155 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
if (!new_plane_state->visible)
return 0;
pipe->multirect_index = DPU_SSPP_RECT_SOLO;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
pipe->sspp,
msm_framebuffer_format(new_plane_state->fb),
max_linewidth)) {
DPU_DEBUG_PLANE(pdpu, "invalid " DRM_RECT_FMT " /" DRM_RECT_FMT
" max_line:%u, can't use split source\n",
DRM_RECT_ARG(&pipe_cfg->src_rect),
DRM_RECT_ARG(&r_pipe_cfg->src_rect),
max_linewidth);
return -E2BIG;
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
}
static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
struct drm_plane_state *old_plane_state =
drm_atomic_get_old_plane_state(state, plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
struct drm_crtc_state *crtc_state;
int ret;
if (plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
ret = dpu_plane_atomic_check_nosspp(plane, plane_state, crtc_state);
if (ret)
return ret;
if (!plane_state->visible) {
/*
* resources are freed by dpu_crtc_assign_plane_resources(),
* but clean them here.
*/
pstate->pipe.sspp = NULL;
pstate->r_pipe.sspp = NULL;
return 0;
}
/*
* Force resource reallocation if the format of FB or src/dst have
* changed. We might need to allocate different SSPP or SSPPs for this
* plane than the one used previously.
*/
if (!old_plane_state || !old_plane_state->fb ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->src_h != plane_state->src_h ||
old_plane_state->src_w != plane_state->src_w ||
old_plane_state->crtc_h != plane_state->crtc_h ||
msm_framebuffer_format(old_plane_state->fb) !=
msm_framebuffer_format(plane_state->fb))
crtc_state->planes_changed = true;
return 0;
}
static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
struct dpu_global_state *global_state,
struct drm_atomic_state *state,
struct drm_plane_state *plane_state)
{
const struct drm_crtc_state *crtc_state = NULL;
struct drm_plane *plane = plane_state->plane;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_rm_sspp_requirements reqs;
struct dpu_plane_state *pstate;
struct dpu_sw_pipe *pipe;
struct dpu_sw_pipe *r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg;
struct dpu_sw_pipe_cfg *r_pipe_cfg;
const struct msm_format *fmt;
if (plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
pstate = to_dpu_plane_state(plane_state);
pipe = &pstate->pipe;
r_pipe = &pstate->r_pipe;
pipe_cfg = &pstate->pipe_cfg;
r_pipe_cfg = &pstate->r_pipe_cfg;
pipe->sspp = NULL;
r_pipe->sspp = NULL;
if (!plane_state->fb)
return -EINVAL;
fmt = msm_framebuffer_format(plane_state->fb);
reqs.yuv = MSM_FORMAT_IS_YUV(fmt);
reqs.scale = (plane_state->src_w >> 16 != plane_state->crtc_w) ||
(plane_state->src_h >> 16 != plane_state->crtc_h);
reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
if (!pipe->sspp)
return -ENODEV;
if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
pipe->sspp,
msm_framebuffer_format(plane_state->fb),
dpu_kms->catalog->caps->max_linewidth)) {
/* multirect is not possible, use two SSPP blocks */
r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
if (!r_pipe->sspp)
return -ENODEV;
pipe->multirect_index = DPU_SSPP_RECT_SOLO;
pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
}
int dpu_assign_plane_resources(struct dpu_global_state *global_state,
struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_plane_state **states,
unsigned int num_planes)
{
unsigned int i;
int ret;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
if (!plane_state ||
!plane_state->visible)
continue;
ret = dpu_plane_virtual_assign_resources(crtc, global_state,
state, plane_state);
if (ret)
break;
}
return ret;
}
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
{
const struct msm_format *format =
@ -1335,12 +1498,15 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tstage=%d\n", pstate->stage);
drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
drm_printf(p, "\tmultirect_mode[0]=%s\n", dpu_get_multirect_mode(pipe->multirect_mode));
drm_printf(p, "\tmultirect_index[0]=%s\n",
dpu_get_multirect_index(pipe->multirect_index));
drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
if (pipe->sspp) {
drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
drm_printf(p, "\tmultirect_mode[0]=%s\n",
dpu_get_multirect_mode(pipe->multirect_mode));
drm_printf(p, "\tmultirect_index[0]=%s\n",
dpu_get_multirect_index(pipe->multirect_index));
drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
}
if (r_pipe->sspp) {
drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
@ -1433,39 +1599,29 @@ static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
.atomic_update = dpu_plane_atomic_update,
};
/**
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
*
* Initialize the plane.
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs)
static const struct drm_plane_helper_funcs dpu_plane_virtual_helper_funcs = {
.prepare_fb = dpu_plane_prepare_fb,
.cleanup_fb = dpu_plane_cleanup_fb,
.atomic_check = dpu_plane_virtual_atomic_check,
.atomic_update = dpu_plane_atomic_update,
};
/* initialize plane */
static struct drm_plane *dpu_plane_init_common(struct drm_device *dev,
enum drm_plane_type type,
unsigned long possible_crtcs,
bool inline_rotation,
const uint32_t *format_list,
uint32_t num_formats,
enum dpu_sspp pipe)
{
struct drm_plane *plane = NULL;
const uint32_t *format_list;
struct dpu_plane *pdpu;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
struct dpu_hw_sspp *pipe_hw;
uint32_t num_formats;
uint32_t supported_rotations;
int ret;
/* initialize underlying h/w driver */
pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
DPU_ERROR("[%u]SSPP is invalid\n", pipe);
return ERR_PTR(-EINVAL);
}
format_list = pipe_hw->cap->sblk->format_list;
num_formats = pipe_hw->cap->sblk->num_formats;
pdpu = drmm_universal_plane_alloc(dev, struct dpu_plane, base,
0xff, &dpu_plane_funcs,
format_list, num_formats,
@ -1491,7 +1647,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
supported_rotations = DRM_MODE_REFLECT_MASK | DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
if (pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION))
if (inline_rotation)
supported_rotations |= DRM_MODE_ROTATE_MASK;
drm_plane_create_rotation_property(plane,
@ -1499,10 +1655,98 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
drm_plane_enable_fb_damage_clips(plane);
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
}
/**
* dpu_plane_init - create new dpu plane for the given pipe
* @dev: Pointer to DRM device
* @pipe: dpu hardware pipe identifier
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
*
* Initialize the plane.
*/
struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs)
{
struct drm_plane *plane = NULL;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
struct dpu_hw_sspp *pipe_hw;
/* initialize underlying h/w driver */
pipe_hw = dpu_rm_get_sspp(&kms->rm, pipe);
if (!pipe_hw || !pipe_hw->cap || !pipe_hw->cap->sblk) {
DPU_ERROR("[%u]SSPP is invalid\n", pipe);
return ERR_PTR(-EINVAL);
}
plane = dpu_plane_init_common(dev, type, possible_crtcs,
pipe_hw->cap->features & BIT(DPU_SSPP_INLINE_ROTATION),
pipe_hw->cap->sblk->format_list,
pipe_hw->cap->sblk->num_formats,
pipe);
if (IS_ERR(plane))
return plane;
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
DPU_DEBUG("%s created for pipe:%u id:%u\n", plane->name,
pipe, plane->base.id);
return plane;
}
/**
* dpu_plane_init_virtual - create new virtualized DPU plane
* @dev: Pointer to DRM device
* @type: Plane type - PRIMARY/OVERLAY/CURSOR
* @possible_crtcs: bitmask of crtc that can be attached to the given pipe
*
* Initialize the virtual plane with no backing SSPP / pipe.
*/
struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
enum drm_plane_type type,
unsigned long possible_crtcs)
{
struct drm_plane *plane = NULL;
struct msm_drm_private *priv = dev->dev_private;
struct dpu_kms *kms = to_dpu_kms(priv->kms);
bool has_inline_rotation = false;
const u32 *format_list = NULL;
u32 num_formats = 0;
int i;
/* Determine the largest configuration that we can implement */
for (i = 0; i < kms->catalog->sspp_count; i++) {
const struct dpu_sspp_cfg *cfg = &kms->catalog->sspp[i];
if (test_bit(DPU_SSPP_INLINE_ROTATION, &cfg->features))
has_inline_rotation = true;
if (!format_list ||
cfg->sblk->csc_blk.len) {
format_list = cfg->sblk->format_list;
num_formats = cfg->sblk->num_formats;
}
}
plane = dpu_plane_init_common(dev, type, possible_crtcs,
has_inline_rotation,
format_list,
num_formats,
SSPP_NONE);
if (IS_ERR(plane))
return plane;
drm_plane_helper_add(plane, &dpu_plane_virtual_helper_funcs);
DPU_DEBUG("%s created virtual id:%u\n", plane->name, plane->base.id);
return plane;
}

View File

@ -62,10 +62,23 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
uint32_t pipe, enum drm_plane_type type,
unsigned long possible_crtcs);
struct drm_plane *dpu_plane_init_virtual(struct drm_device *dev,
enum drm_plane_type type,
unsigned long possible_crtcs);
int dpu_plane_color_fill(struct drm_plane *plane,
uint32_t color, uint32_t alpha);
#ifdef CONFIG_DEBUG_FS
void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable);
#else
static inline void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable) {}
#endif
int dpu_assign_plane_resources(struct dpu_global_state *global_state,
struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_plane_state **states,
unsigned int num_planes);
#endif /* _DPU_PLANE_H_ */

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
@ -9,6 +9,7 @@
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_cdm.h"
#include "dpu_hw_cwb.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_sspp.h"
#include "dpu_hw_intf.h"
@ -26,14 +27,6 @@ static inline bool reserved_by_other(uint32_t *res_map, int idx,
return res_map[idx] && res_map[idx] != enc_id;
}
/**
* struct dpu_rm_requirements - Reservation requirements parameter bundle
* @topology: selected topology for the display
*/
struct dpu_rm_requirements {
struct msm_display_topology topology;
};
/**
* dpu_rm_init - Read hardware catalog and create reservation tracking objects
* for all HW blocks.
@ -130,6 +123,19 @@ int dpu_rm_init(struct drm_device *dev,
rm->hw_wb[wb->id - WB_0] = hw;
}
for (i = 0; i < cat->cwb_count; i++) {
struct dpu_hw_cwb *hw;
const struct dpu_cwb_cfg *cwb = &cat->cwb[i];
hw = dpu_hw_cwb_init(dev, cwb, mmio);
if (IS_ERR(hw)) {
rc = PTR_ERR(hw);
DPU_ERROR("failed cwb object creation: err %d\n", rc);
goto fail;
}
rm->cwb_blks[cwb->id - CWB_0] = &hw->base;
}
for (i = 0; i < cat->ctl_count; i++) {
struct dpu_hw_ctl *hw;
const struct dpu_ctl_cfg *ctl = &cat->ctl[i];
@ -241,14 +247,13 @@ static int _dpu_rm_get_lm_peer(struct dpu_rm *rm, int primary_idx)
* mixer in rm->pingpong_blks[].
* @dspp_idx: output parameter, index of dspp block attached to the layer
* mixer in rm->dspp_blks[].
* @reqs: input parameter, rm requirements for HW blocks needed in the
* datapath.
* @topology: selected topology for the display
* Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
struct dpu_rm_requirements *reqs)
struct msm_display_topology *topology)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@ -273,7 +278,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
}
*pp_idx = idx;
if (!reqs->topology.num_dspp)
if (!topology->num_dspp)
return true;
idx = lm_cfg->dspp - DSPP_0;
@ -295,7 +300,7 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
struct dpu_global_state *global_state,
uint32_t enc_id,
struct dpu_rm_requirements *reqs)
struct msm_display_topology *topology)
{
int lm_idx[MAX_BLOCKS];
@ -303,14 +308,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
int dspp_idx[MAX_BLOCKS] = {0};
int i, lm_count = 0;
if (!reqs->topology.num_lm) {
DPU_ERROR("invalid number of lm: %d\n", reqs->topology.num_lm);
if (!topology->num_lm) {
DPU_ERROR("invalid number of lm: %d\n", topology->num_lm);
return -EINVAL;
}
/* Find a primary mixer */
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
lm_count < reqs->topology.num_lm; i++) {
lm_count < topology->num_lm; i++) {
if (!rm->mixer_blks[i])
continue;
@ -319,14 +324,14 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
enc_id, i, &pp_idx[lm_count],
&dspp_idx[lm_count], reqs)) {
&dspp_idx[lm_count], topology)) {
continue;
}
++lm_count;
/* Valid primary mixer found, find matching peers */
if (lm_count < reqs->topology.num_lm) {
if (lm_count < topology->num_lm) {
int j = _dpu_rm_get_lm_peer(rm, i);
/* ignore the peer if there is an error or if the peer was already processed */
@ -339,7 +344,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
&pp_idx[lm_count], &dspp_idx[lm_count],
reqs)) {
topology)) {
continue;
}
@ -348,7 +353,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
}
}
if (lm_count != reqs->topology.num_lm) {
if (lm_count != topology->num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
@ -357,7 +362,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
global_state->dspp_to_enc_id[dspp_idx[i]] =
reqs->topology.num_dspp ? enc_id : 0;
topology->num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@ -594,28 +599,28 @@ static int _dpu_rm_make_reservation(
struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct dpu_rm_requirements *reqs)
struct msm_display_topology *topology)
{
int ret;
ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, reqs);
ret = _dpu_rm_reserve_lms(rm, global_state, enc->base.id, topology);
if (ret) {
DPU_ERROR("unable to find appropriate mixers\n");
return ret;
}
ret = _dpu_rm_reserve_ctls(rm, global_state, enc->base.id,
&reqs->topology);
topology);
if (ret) {
DPU_ERROR("unable to find appropriate CTL\n");
return ret;
}
ret = _dpu_rm_reserve_dsc(rm, global_state, enc, &reqs->topology);
ret = _dpu_rm_reserve_dsc(rm, global_state, enc, topology);
if (ret)
return ret;
if (reqs->topology.needs_cdm) {
if (topology->needs_cdm) {
ret = _dpu_rm_reserve_cdm(rm, global_state, enc);
if (ret) {
DPU_ERROR("unable to find CDM blk\n");
@ -626,20 +631,6 @@ static int _dpu_rm_make_reservation(
return ret;
}
static int _dpu_rm_populate_requirements(
struct drm_encoder *enc,
struct dpu_rm_requirements *reqs,
struct msm_display_topology req_topology)
{
reqs->topology = req_topology;
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d cdm: %d\n",
reqs->topology.num_lm, reqs->topology.num_dsc,
reqs->topology.num_intf, reqs->topology.needs_cdm);
return 0;
}
static void _dpu_rm_clear_mapping(uint32_t *res_mapping, int cnt,
uint32_t enc_id)
{
@ -693,9 +684,8 @@ int dpu_rm_reserve(
struct dpu_global_state *global_state,
struct drm_encoder *enc,
struct drm_crtc_state *crtc_state,
struct msm_display_topology topology)
struct msm_display_topology *topology)
{
struct dpu_rm_requirements reqs;
int ret;
/* Check if this is just a page-flip */
@ -710,13 +700,11 @@ int dpu_rm_reserve(
DRM_DEBUG_KMS("reserving hw for enc %d crtc %d\n",
enc->base.id, crtc_state->crtc->base.id);
ret = _dpu_rm_populate_requirements(enc, &reqs, topology);
if (ret) {
DPU_ERROR("failed to populate hw requirements\n");
return ret;
}
DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
topology->num_lm, topology->num_dsc,
topology->num_intf);
ret = _dpu_rm_make_reservation(rm, global_state, enc, &reqs);
ret = _dpu_rm_make_reservation(rm, global_state, enc, topology);
if (ret)
DPU_ERROR("failed to reserve hw resources: %d\n", ret);
@ -725,6 +713,88 @@ int dpu_rm_reserve(
return ret;
}
static struct dpu_hw_sspp *dpu_rm_try_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_crtc *crtc,
struct dpu_rm_sspp_requirements *reqs,
unsigned int type)
{
uint32_t crtc_id = crtc->base.id;
struct dpu_hw_sspp *hw_sspp;
int i;
for (i = 0; i < ARRAY_SIZE(rm->hw_sspp); i++) {
if (!rm->hw_sspp[i])
continue;
if (global_state->sspp_to_crtc_id[i])
continue;
hw_sspp = rm->hw_sspp[i];
if (hw_sspp->cap->type != type)
continue;
if (reqs->scale && !hw_sspp->cap->sblk->scaler_blk.len)
continue;
// TODO: QSEED2 and RGB scalers are not yet supported
if (reqs->scale && !hw_sspp->ops.setup_scaler)
continue;
if (reqs->yuv && !hw_sspp->cap->sblk->csc_blk.len)
continue;
if (reqs->rot90 && !(hw_sspp->cap->features & DPU_SSPP_INLINE_ROTATION))
continue;
global_state->sspp_to_crtc_id[i] = crtc_id;
return rm->hw_sspp[i];
}
return NULL;
}
/**
* dpu_rm_reserve_sspp - Reserve the required SSPP for the provided CRTC
* @rm: DPU Resource Manager handle
* @global_state: private global state
* @crtc: DRM CRTC handle
* @reqs: SSPP required features
*/
struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_crtc *crtc,
struct dpu_rm_sspp_requirements *reqs)
{
struct dpu_hw_sspp *hw_sspp = NULL;
if (!reqs->scale && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
if (!hw_sspp && reqs->scale)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
if (!hw_sspp)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
return hw_sspp;
}
/**
* dpu_rm_release_all_sspp - Given the CRTC, release all SSPP
* blocks previously reserved for that use case.
* @global_state: resources shared across multiple kms objects
* @crtc: DRM CRTC handle
*/
void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
struct drm_crtc *crtc)
{
uint32_t crtc_id = crtc->base.id;
_dpu_rm_clear_mapping(global_state->sspp_to_crtc_id,
ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
}
/**
* dpu_rm_get_assigned_resources - Get hw resources of the given type that are
* assigned to this encoder
@ -859,4 +929,11 @@ void dpu_rm_print_state(struct drm_printer *p,
dpu_rm_print_state_helper(p, rm->cdm_blk,
global_state->cdm_to_enc_id);
drm_puts(p, "\n");
drm_puts(p, "\tsspp=");
/* skip SSPP_NONE and start from the next index */
for (i = SSPP_NONE + 1; i < ARRAY_SIZE(global_state->sspp_to_crtc_id); i++)
dpu_rm_print_state_helper(p, rm->hw_sspp[i] ? &rm->hw_sspp[i]->base : NULL,
global_state->sspp_to_crtc_id[i]);
drm_puts(p, "\n");
}

View File

@ -20,6 +20,7 @@ struct dpu_global_state;
* @ctl_blks: array of ctl hardware resources
* @hw_intf: array of intf hardware resources
* @hw_wb: array of wb hardware resources
* @hw_cwb: array of cwb hardware resources
* @dspp_blks: array of dspp hardware resources
* @hw_sspp: array of sspp hardware resources
* @cdm_blk: cdm hardware resource
@ -30,6 +31,7 @@ struct dpu_rm {
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_intf *hw_intf[INTF_MAX - INTF_0];
struct dpu_hw_wb *hw_wb[WB_MAX - WB_0];
struct dpu_hw_blk *cwb_blks[CWB_MAX - CWB_0];
struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
struct dpu_hw_blk *merge_3d_blks[MERGE_3D_MAX - MERGE_3D_0];
struct dpu_hw_blk *dsc_blks[DSC_MAX - DSC_0];
@ -37,6 +39,12 @@ struct dpu_rm {
struct dpu_hw_blk *cdm_blk;
};
struct dpu_rm_sspp_requirements {
bool yuv;
bool scale;
bool rot90;
};
/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
@ -63,11 +71,19 @@ int dpu_rm_reserve(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_encoder *drm_enc,
struct drm_crtc_state *crtc_state,
struct msm_display_topology topology);
struct msm_display_topology *topology);
void dpu_rm_release(struct dpu_global_state *global_state,
struct drm_encoder *enc);
struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
struct dpu_global_state *global_state,
struct drm_crtc *crtc,
struct dpu_rm_sspp_requirements *reqs);
void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
struct drm_crtc *crtc);
int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
struct dpu_global_state *global_state, uint32_t enc_id,
enum dpu_hw_blk_type type, struct dpu_hw_blk **blks, int blks_size);

View File

@ -42,9 +42,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
if (!conn_state || !conn_state->connector) {
DPU_ERROR("invalid connector state\n");
return -EINVAL;
} else if (conn_state->connector->status != connector_status_connected) {
DPU_ERROR("connector not connected %d\n", conn_state->connector->status);
return -EINVAL;
}
crtc = conn_state->crtc;

View File

@ -389,7 +389,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: different regulators in other cases? */
mdp4_lcdc_encoder->regs[0].supply = "lvds-vccs-3p3v";
mdp4_lcdc_encoder->regs[1].supply = "lvds-vccs-3p3v";
mdp4_lcdc_encoder->regs[1].supply = "lvds-pll-vdda";
mdp4_lcdc_encoder->regs[2].supply = "lvds-vdda";
ret = devm_regulator_bulk_get(dev->dev,

View File

@ -14,6 +14,7 @@
#include "dp_catalog.h"
#include "dp_audio.h"
#include "dp_panel.h"
#include "dp_reg.h"
#include "dp_display.h"
#include "dp_utils.h"
@ -28,251 +29,64 @@ struct msm_dp_audio_private {
struct msm_dp_audio msm_dp_audio;
};
static u32 msm_dp_audio_get_header(struct msm_dp_catalog *catalog,
enum msm_dp_catalog_audio_sdp_type sdp,
enum msm_dp_catalog_audio_header_type header)
{
return msm_dp_catalog_audio_get_header(catalog, sdp, header);
}
static void msm_dp_audio_set_header(struct msm_dp_catalog *catalog,
u32 data,
enum msm_dp_catalog_audio_sdp_type sdp,
enum msm_dp_catalog_audio_header_type header)
{
msm_dp_catalog_audio_set_header(catalog, sdp, header, data);
}
static void msm_dp_audio_stream_sdp(struct msm_dp_audio_private *audio)
{
struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
struct dp_sdp_header sdp_hdr = {
.HB0 = 0x00,
.HB1 = 0x02,
.HB2 = 0x00,
.HB3 = audio->channels - 1,
};
/* Config header and parity byte 1 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
new_value = 0x02;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
new_value = value;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
new_value = audio->channels - 1;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
msm_dp_catalog_write_audio_stream(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_timestamp_sdp(struct msm_dp_audio_private *audio)
{
struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
struct dp_sdp_header sdp_hdr = {
.HB0 = 0x00,
.HB1 = 0x01,
.HB2 = 0x17,
.HB3 = 0x0 | (0x11 << 2),
};
/* Config header and parity byte 1 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
new_value = 0x1;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
new_value = 0x17;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
msm_dp_catalog_write_audio_timestamp(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_infoframe_sdp(struct msm_dp_audio_private *audio)
{
struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
struct dp_sdp_header sdp_hdr = {
.HB0 = 0x00,
.HB1 = 0x84,
.HB2 = 0x1b,
.HB3 = 0x0 | (0x11 << 2),
};
/* Config header and parity byte 1 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
new_value = 0x84;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
new_value = 0x1b;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
new_value = (0x0 | (0x11 << 2));
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
new_value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
msm_dp_catalog_write_audio_infoframe(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_copy_management_sdp(struct msm_dp_audio_private *audio)
{
struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
struct dp_sdp_header sdp_hdr = {
.HB0 = 0x00,
.HB1 = 0x05,
.HB2 = 0x0f,
.HB3 = 0x00,
};
/* Config header and parity byte 1 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
new_value = 0x05;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
/* Config header and parity byte 3 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
new_value = 0x0;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_3_BIT)
| (parity_byte << PARITY_BYTE_3_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
msm_dp_catalog_write_audio_copy_mgmt(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_isrc_sdp(struct msm_dp_audio_private *audio)
{
struct msm_dp_catalog *catalog = audio->catalog;
u32 value, new_value;
u8 parity_byte;
struct dp_sdp_header sdp_hdr = {
.HB0 = 0x00,
.HB1 = 0x06,
.HB2 = 0x0f,
.HB3 = 0x00,
};
/* Config header and parity byte 1 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
new_value = 0x06;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_1_BIT)
| (parity_byte << PARITY_BYTE_1_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
/* Config header and parity byte 2 */
value = msm_dp_audio_get_header(catalog,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
new_value = 0x0F;
parity_byte = msm_dp_utils_calculate_parity(new_value);
value |= ((new_value << HEADER_BYTE_2_BIT)
| (parity_byte << PARITY_BYTE_2_BIT));
drm_dbg_dp(audio->drm_dev,
"Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
value, parity_byte);
msm_dp_audio_set_header(catalog, value,
DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
msm_dp_catalog_write_audio_isrc(audio->catalog, &sdp_hdr);
}
static void msm_dp_audio_setup_sdp(struct msm_dp_audio_private *audio)
@ -329,10 +143,10 @@ static void msm_dp_audio_safe_to_exit_level(struct msm_dp_audio_private *audio)
safe_to_exit_level = 5;
break;
default:
safe_to_exit_level = 14;
drm_dbg_dp(audio->drm_dev,
"setting the default safe_to_exit_level = %u\n",
safe_to_exit_level);
safe_to_exit_level = 14;
break;
}
@ -539,14 +353,13 @@ int msm_dp_register_audio_driver(struct device *dev,
}
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_panel *panel,
struct msm_dp_catalog *catalog)
{
int rc = 0;
struct msm_dp_audio_private *audio;
struct msm_dp_audio *msm_dp_audio;
if (!pdev || !panel || !catalog) {
if (!pdev || !catalog) {
DRM_ERROR("invalid input\n");
rc = -EINVAL;
goto error;
@ -563,8 +376,6 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
msm_dp_audio = &audio->msm_dp_audio;
msm_dp_catalog_audio_init(catalog);
return msm_dp_audio;
error:
return ERR_PTR(rc);

View File

@ -8,7 +8,6 @@
#include <linux/platform_device.h>
#include "dp_panel.h"
#include "dp_catalog.h"
#include <sound/hdmi-codec.h>
@ -28,14 +27,12 @@ struct msm_dp_audio {
* Creates and instance of dp audio.
*
* @pdev: caller's platform device instance.
* @panel: an instance of msm_dp_panel module.
* @catalog: an instance of msm_dp_catalog module.
*
* Returns the error code in case of failure, otherwize
* an instance of newly created msm_dp_module.
*/
struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev,
struct msm_dp_panel *panel,
struct msm_dp_catalog *catalog);
/**

View File

@ -79,7 +79,6 @@ struct msm_dp_catalog_private {
struct device *dev;
struct drm_device *drm_dev;
struct dss_io_data io;
u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
struct msm_dp_catalog msm_dp_catalog;
};
@ -276,43 +275,6 @@ int msm_dp_catalog_aux_wait_for_hpd_connect_state(struct msm_dp_catalog *msm_dp_
min(wait_us, 2000), wait_us);
}
static void dump_regs(void __iomem *base, int len)
{
int i;
u32 x0, x4, x8, xc;
u32 addr_off = 0;
len = DIV_ROUND_UP(len, 16);
for (i = 0; i < len; i++) {
x0 = readl_relaxed(base + addr_off);
x4 = readl_relaxed(base + addr_off + 0x04);
x8 = readl_relaxed(base + addr_off + 0x08);
xc = readl_relaxed(base + addr_off + 0x0c);
pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
addr_off += 16;
}
}
void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
struct dss_io_data *io = &catalog->io;
pr_info("AHB regs\n");
dump_regs(io->ahb.base, io->ahb.len);
pr_info("AUXCLK regs\n");
dump_regs(io->aux.base, io->aux.len);
pr_info("LCLK regs\n");
dump_regs(io->link.base, io->link.len);
pr_info("P0CLK regs\n");
dump_regs(io->p0.base, io->p0.len);
}
u32 msm_dp_catalog_aux_get_irq(struct msm_dp_catalog *msm_dp_catalog)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
@ -1036,7 +998,6 @@ void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
display_hctl = (hsync_end_x << 16) | hsync_start_x;
msm_dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
msm_dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
msm_dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
hsync_period);
@ -1160,38 +1121,75 @@ struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev)
return &catalog->msm_dp_catalog;
}
u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
enum msm_dp_catalog_audio_sdp_type sdp,
enum msm_dp_catalog_audio_header_type header)
void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr)
{
struct msm_dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
u32 header[2];
catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
msm_dp_utils_pack_sdp_header(sdp_hdr, header);
sdp_map = catalog->audio_map;
return msm_dp_read_link(catalog, sdp_map[sdp][header]);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_0, header[0]);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_STREAM_1, header[1]);
}
void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
enum msm_dp_catalog_audio_sdp_type sdp,
enum msm_dp_catalog_audio_header_type header,
u32 data)
void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr)
{
struct msm_dp_catalog_private *catalog;
u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
u32 header[2];
if (!msm_dp_catalog)
return;
msm_dp_utils_pack_sdp_header(sdp_hdr, header);
catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_0, header[0]);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_TIMESTAMP_1, header[1]);
}
sdp_map = catalog->audio_map;
void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
u32 header[2];
msm_dp_write_link(catalog, sdp_map[sdp][header], data);
msm_dp_utils_pack_sdp_header(sdp_hdr, header);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_0, header[0]);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_INFOFRAME_1, header[1]);
}
void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
u32 header[2];
msm_dp_utils_pack_sdp_header(sdp_hdr, header);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_0, header[0]);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_COPYMANAGEMENT_1, header[1]);
}
void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr)
{
struct msm_dp_catalog_private *catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
struct dp_sdp_header tmp = *sdp_hdr;
u32 header[2];
u32 reg;
/* XXX: is it necessary to preserve this field? */
reg = msm_dp_read_link(catalog, MMSS_DP_AUDIO_ISRC_1);
tmp.HB3 = FIELD_GET(HEADER_3_MASK, reg);
msm_dp_utils_pack_sdp_header(&tmp, header);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_0, header[0]);
msm_dp_write_link(catalog, MMSS_DP_AUDIO_ISRC_1, header[1]);
}
void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *msm_dp_catalog, u32 select)
@ -1277,47 +1275,6 @@ void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *msm_dp_catalog)
msm_dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
}
void msm_dp_catalog_audio_init(struct msm_dp_catalog *msm_dp_catalog)
{
struct msm_dp_catalog_private *catalog;
static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
{
MMSS_DP_AUDIO_STREAM_0,
MMSS_DP_AUDIO_STREAM_1,
MMSS_DP_AUDIO_STREAM_1,
},
{
MMSS_DP_AUDIO_TIMESTAMP_0,
MMSS_DP_AUDIO_TIMESTAMP_1,
MMSS_DP_AUDIO_TIMESTAMP_1,
},
{
MMSS_DP_AUDIO_INFOFRAME_0,
MMSS_DP_AUDIO_INFOFRAME_1,
MMSS_DP_AUDIO_INFOFRAME_1,
},
{
MMSS_DP_AUDIO_COPYMANAGEMENT_0,
MMSS_DP_AUDIO_COPYMANAGEMENT_1,
MMSS_DP_AUDIO_COPYMANAGEMENT_1,
},
{
MMSS_DP_AUDIO_ISRC_0,
MMSS_DP_AUDIO_ISRC_1,
MMSS_DP_AUDIO_ISRC_1,
},
};
if (!msm_dp_catalog)
return;
catalog = container_of(msm_dp_catalog,
struct msm_dp_catalog_private, msm_dp_catalog);
catalog->audio_map = sdp_map;
}
void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *msm_dp_catalog, u32 safe_to_exit_level)
{
struct msm_dp_catalog_private *catalog;

View File

@ -31,22 +31,6 @@
#define DP_HW_VERSION_1_0 0x10000000
#define DP_HW_VERSION_1_2 0x10020000
enum msm_dp_catalog_audio_sdp_type {
DP_AUDIO_SDP_STREAM,
DP_AUDIO_SDP_TIMESTAMP,
DP_AUDIO_SDP_INFOFRAME,
DP_AUDIO_SDP_COPYMANAGEMENT,
DP_AUDIO_SDP_ISRC,
DP_AUDIO_SDP_MAX,
};
enum msm_dp_catalog_audio_header_type {
DP_AUDIO_SDP_HEADER_1,
DP_AUDIO_SDP_HEADER_2,
DP_AUDIO_SDP_HEADER_3,
DP_AUDIO_SDP_HEADER_MAX,
};
struct msm_dp_catalog {
bool wide_bus_en;
};
@ -104,7 +88,6 @@ int msm_dp_catalog_panel_timing_cfg(struct msm_dp_catalog *msm_dp_catalog, u32 t
u32 sync_start, u32 width_blanking, u32 msm_dp_active);
void msm_dp_catalog_panel_enable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog, struct dp_sdp *vsc_sdp);
void msm_dp_catalog_panel_disable_vsc_sdp(struct msm_dp_catalog *msm_dp_catalog);
void msm_dp_catalog_dump_regs(struct msm_dp_catalog *msm_dp_catalog);
void msm_dp_catalog_panel_tpg_enable(struct msm_dp_catalog *msm_dp_catalog,
struct drm_display_mode *drm_mode);
void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
@ -112,17 +95,19 @@ void msm_dp_catalog_panel_tpg_disable(struct msm_dp_catalog *msm_dp_catalog);
struct msm_dp_catalog *msm_dp_catalog_get(struct device *dev);
/* DP Audio APIs */
u32 msm_dp_catalog_audio_get_header(struct msm_dp_catalog *msm_dp_catalog,
enum msm_dp_catalog_audio_sdp_type sdp,
enum msm_dp_catalog_audio_header_type header);
void msm_dp_catalog_audio_set_header(struct msm_dp_catalog *msm_dp_catalog,
enum msm_dp_catalog_audio_sdp_type sdp,
enum msm_dp_catalog_audio_header_type header,
u32 data);
void msm_dp_catalog_write_audio_stream(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_write_audio_timestamp(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_write_audio_infoframe(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_write_audio_copy_mgmt(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_write_audio_isrc(struct msm_dp_catalog *msm_dp_catalog,
struct dp_sdp_header *sdp_hdr);
void msm_dp_catalog_audio_config_acr(struct msm_dp_catalog *catalog, u32 select);
void msm_dp_catalog_audio_enable(struct msm_dp_catalog *catalog, bool enable);
void msm_dp_catalog_audio_config_sdp(struct msm_dp_catalog *catalog);
void msm_dp_catalog_audio_init(struct msm_dp_catalog *catalog);
void msm_dp_catalog_audio_sfe_level(struct msm_dp_catalog *catalog, u32 safe_to_exit_level);
#endif /* _DP_CATALOG_H_ */

View File

@ -178,7 +178,6 @@ static void msm_dp_ctrl_configure_source_params(struct msm_dp_ctrl_private *ctrl
u32 cc, tb;
msm_dp_catalog_ctrl_lane_mapping(ctrl->catalog);
msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
msm_dp_catalog_setup_peripheral_flush(ctrl->catalog);
msm_dp_ctrl_config_ctrl(ctrl);
@ -2071,6 +2070,7 @@ void msm_dp_ctrl_off_link(struct msm_dp_ctrl *msm_dp_ctrl)
msm_dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
dev_pm_opp_set_rate(ctrl->dev, 0);
msm_dp_ctrl_link_clk_disable(&ctrl->msm_dp_ctrl);
DRM_DEBUG_DP("Before, phy=%p init_count=%d power_on=%d\n",

View File

@ -722,9 +722,6 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
{
int rc = 0;
struct device *dev = &dp->msm_dp_display.pdev->dev;
struct msm_dp_panel_in panel_in = {
.dev = dev,
};
struct phy *phy;
phy = devm_phy_get(dev, "dp");
@ -765,11 +762,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_link;
}
panel_in.aux = dp->aux;
panel_in.catalog = dp->catalog;
panel_in.link = dp->link;
dp->panel = msm_dp_panel_get(&panel_in);
dp->panel = msm_dp_panel_get(dev, dp->aux, dp->link, dp->catalog);
if (IS_ERR(dp->panel)) {
rc = PTR_ERR(dp->panel);
DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
@ -787,7 +780,7 @@ static int msm_dp_init_sub_modules(struct msm_dp_display_private *dp)
goto error_ctrl;
}
dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->panel, dp->catalog);
dp->audio = msm_dp_audio_get(dp->msm_dp_display.pdev, dp->catalog);
if (IS_ERR(dp->audio)) {
rc = PTR_ERR(dp->audio);
pr_err("failed to initialize audio, rc = %d\n", rc);

View File

@ -317,17 +317,6 @@ static int msm_dp_panel_setup_vsc_sdp_yuv_420(struct msm_dp_panel *msm_dp_panel)
return 0;
}
void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel)
{
struct msm_dp_catalog *catalog;
struct msm_dp_panel_private *panel;
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
catalog = panel->catalog;
msm_dp_catalog_dump_regs(catalog);
}
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel)
{
u32 data, total_ver, total_hor;
@ -486,25 +475,26 @@ static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
return 0;
}
struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in)
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
struct msm_dp_link *link, struct msm_dp_catalog *catalog)
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
int ret;
if (!in->dev || !in->catalog || !in->aux || !in->link) {
if (!dev || !catalog || !aux || !link) {
DRM_ERROR("invalid input\n");
return ERR_PTR(-EINVAL);
}
panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return ERR_PTR(-ENOMEM);
panel->dev = in->dev;
panel->aux = in->aux;
panel->catalog = in->catalog;
panel->link = in->link;
panel->dev = dev;
panel->aux = aux;
panel->catalog = catalog;
panel->link = link;
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;

View File

@ -21,13 +21,6 @@ struct msm_dp_display_mode {
bool out_fmt_is_yuv_420;
};
struct msm_dp_panel_in {
struct device *dev;
struct drm_dp_aux *aux;
struct msm_dp_link *link;
struct msm_dp_catalog *catalog;
};
struct msm_dp_panel_psr {
u8 version;
u8 capabilities;
@ -55,7 +48,6 @@ struct msm_dp_panel {
int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_deinit(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_timing_cfg(struct msm_dp_panel *msm_dp_panel);
void msm_dp_panel_dump_regs(struct msm_dp_panel *msm_dp_panel);
int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
struct drm_connector *connector);
u32 msm_dp_panel_get_mode_bpp(struct msm_dp_panel *msm_dp_panel, u32 mode_max_bpp,
@ -92,6 +84,7 @@ static inline bool is_lane_count_valid(u32 lane_count)
lane_count == 4);
}
struct msm_dp_panel *msm_dp_panel_get(struct msm_dp_panel_in *in);
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
struct msm_dp_link *link, struct msm_dp_catalog *catalog);
void msm_dp_panel_put(struct msm_dp_panel *msm_dp_panel);
#endif /* _DP_PANEL_H_ */

View File

@ -74,14 +74,8 @@ u8 msm_dp_utils_calculate_parity(u32 data)
return parity_byte;
}
ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff)
void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2])
{
size_t length;
length = sizeof(header_buff);
if (length < DP_SDP_HEADER_SIZE)
return -ENOSPC;
header_buff[0] = FIELD_PREP(HEADER_0_MASK, sdp_header->HB0) |
FIELD_PREP(PARITY_0_MASK, msm_dp_utils_calculate_parity(sdp_header->HB0)) |
FIELD_PREP(HEADER_1_MASK, sdp_header->HB1) |
@ -91,6 +85,4 @@ ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *head
FIELD_PREP(PARITY_2_MASK, msm_dp_utils_calculate_parity(sdp_header->HB2)) |
FIELD_PREP(HEADER_3_MASK, sdp_header->HB3) |
FIELD_PREP(PARITY_3_MASK, msm_dp_utils_calculate_parity(sdp_header->HB3));
return length;
}

View File

@ -31,6 +31,6 @@
u8 msm_dp_utils_get_g0_value(u8 data);
u8 msm_dp_utils_get_g1_value(u8 data);
u8 msm_dp_utils_calculate_parity(u32 data);
ssize_t msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 *header_buff);
void msm_dp_utils_pack_sdp_header(struct dp_sdp_header *sdp_header, u32 header_buff[2]);
#endif /* _DP_UTILS_H_ */

View File

@ -286,6 +286,8 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_1,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
&sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,

View File

@ -23,6 +23,7 @@
#define MSM_DSI_6G_VER_MINOR_V2_2_0 0x20000000
#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_6G_VER_MINOR_V2_3_0 0x20030000
#define MSM_DSI_6G_VER_MINOR_V2_3_1 0x20030001
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000

View File

@ -567,6 +567,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_8953_cfgs },
{ .compatible = "qcom,sm6125-dsi-phy-14nm",
.data = &dsi_phy_14nm_2290_cfgs },
{ .compatible = "qcom,sm6150-dsi-phy-14nm",
.data = &dsi_phy_14nm_6150_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",

View File

@ -46,6 +46,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;

View File

@ -1032,6 +1032,10 @@ static const struct regulator_bulk_data dsi_phy_14nm_73p4mA_regulators[] = {
{ .supply = "vcca", .init_load_uA = 73400 },
};
static const struct regulator_bulk_data dsi_phy_14nm_36mA_regulators[] = {
{ .supply = "vdda", .init_load_uA = 36000 },
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_17mA_regulators,
@ -1097,3 +1101,20 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_2290_cfgs = {
.io_start = { 0x5e94400 },
.num_dsi_phy = 1,
};
const struct msm_dsi_phy_cfg dsi_phy_14nm_6150_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_14nm_36mA_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_14nm_36mA_regulators),
.ops = {
.enable = dsi_14nm_phy_enable,
.disable = dsi_14nm_phy_disable,
.pll_init = dsi_pll_14nm_init,
.save_pll_state = dsi_14nm_pll_save_state,
.restore_pll_state = dsi_14nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0xae94400 },
.num_dsi_phy = 1,
};

View File

@ -137,7 +137,7 @@ static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
base <<= (digclk_divsel == 2 ? 1 : 0);
return (base <= 2046 ? base : 2046);
return base;
}
static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)

View File

@ -538,7 +538,7 @@ static int msm_ioctl_gem_info_set_iova(struct drm_device *dev,
/* Only supported if per-process address space is supported: */
if (priv->gpu->aspace == ctx->aspace)
return -EOPNOTSUPP;
return UERR(EOPNOTSUPP, dev, "requires per-process pgtables");
if (should_fail(&fail_gem_iova, obj->size))
return -ENOMEM;

View File

@ -28,6 +28,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_dsc.h>
#include <drm/msm_drm.h>
@ -506,6 +507,12 @@ void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
clockid_t clock_id,
enum hrtimer_mode mode);
/* Helper for returning a UABI error with optional logging which can make
* it easier for userspace to understand what it is doing wrong.
*/
#define UERR(err, drm, fmt, ...) \
({ DRM_DEV_DEBUG_DRIVER((drm)->dev, fmt, ##__VA_ARGS__); -(err); })
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)

View File

@ -20,8 +20,8 @@
/* For userspace errors, use DRM_UT_DRIVER.. so that userspace can enable
* error msgs for debugging, but we don't spam dmesg by default
*/
#define SUBMIT_ERROR(submit, fmt, ...) \
DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__)
#define SUBMIT_ERROR(err, submit, fmt, ...) \
UERR(err, (submit)->dev, fmt, ##__VA_ARGS__)
/*
* Cmdstream submission:
@ -142,8 +142,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
!(submit_bo.flags & MANDATORY_FLAGS)) {
SUBMIT_ERROR(submit, "invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
ret = SUBMIT_ERROR(EINVAL, submit, "invalid flags: %x\n", submit_bo.flags);
i = 0;
goto out;
}
@ -162,8 +161,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
SUBMIT_ERROR(submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
ret = SUBMIT_ERROR(EINVAL, submit, "invalid handle %u at index %u\n", submit->bos[i].handle, i);
goto out_unlock;
}
@ -206,14 +204,12 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
break;
default:
SUBMIT_ERROR(submit, "invalid type: %08x\n", submit_cmd.type);
return -EINVAL;
return SUBMIT_ERROR(EINVAL, submit, "invalid type: %08x\n", submit_cmd.type);
}
if (submit_cmd.size % 4) {
SUBMIT_ERROR(submit, "non-aligned cmdstream buffer size: %u\n",
submit_cmd.size);
ret = -EINVAL;
ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer size: %u\n",
submit_cmd.size);
goto out;
}
@ -371,9 +367,8 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
struct drm_gem_object **obj, uint64_t *iova)
{
if (idx >= submit->nr_bos) {
SUBMIT_ERROR(submit, "invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
return -EINVAL;
return SUBMIT_ERROR(EINVAL, submit, "invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
}
if (obj)
@ -392,10 +387,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint32_t *ptr;
int ret = 0;
if (offset % 4) {
SUBMIT_ERROR(submit, "non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
}
if (offset % 4)
return SUBMIT_ERROR(EINVAL, submit, "non-aligned cmdstream buffer: %u\n", offset);
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
@ -414,9 +407,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
uint64_t iova;
if (submit_reloc.submit_offset % 4) {
SUBMIT_ERROR(submit, "non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
ret = -EINVAL;
ret = SUBMIT_ERROR(EINVAL, submit, "non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
goto out;
}
@ -425,8 +417,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *ob
if ((off >= (obj->size / 4)) ||
(off < last_offset)) {
SUBMIT_ERROR(submit, "invalid offset %u at reloc %u\n", off, i);
ret = -EINVAL;
ret = SUBMIT_ERROR(EINVAL, submit, "invalid offset %u at reloc %u\n", off, i);
goto out;
}
@ -513,12 +504,12 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (syncobj_desc.point &&
!drm_core_check_feature(submit->dev, DRIVER_SYNCOBJ_TIMELINE)) {
ret = -EOPNOTSUPP;
ret = SUBMIT_ERROR(EOPNOTSUPP, submit, "syncobj timeline unsupported");
break;
}
if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
ret = -EINVAL;
ret = -SUBMIT_ERROR(EINVAL, submit, "invalid syncobj flags: %x", syncobj_desc.flags);
break;
}
@ -531,7 +522,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
syncobjs[i] =
drm_syncobj_find(file, syncobj_desc.handle);
if (!syncobjs[i]) {
ret = -EINVAL;
ret = SUBMIT_ERROR(EINVAL, submit, "invalid syncobj handle: %u", i);
break;
}
}
@ -588,14 +579,14 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].point = syncobj_desc.point;
if (syncobj_desc.flags) {
ret = -EINVAL;
ret = UERR(EINVAL, dev, "invalid syncobj flags");
break;
}
if (syncobj_desc.point) {
if (!drm_core_check_feature(dev,
DRIVER_SYNCOBJ_TIMELINE)) {
ret = -EOPNOTSUPP;
ret = UERR(EOPNOTSUPP, dev, "syncobj timeline unsupported");
break;
}
@ -609,7 +600,7 @@ static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
post_deps[i].syncobj =
drm_syncobj_find(file, syncobj_desc.handle);
if (!post_deps[i].syncobj) {
ret = -EINVAL;
ret = UERR(EINVAL, dev, "invalid syncobj handle");
break;
}
}
@ -677,10 +668,10 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* be more clever to dispatch to appropriate gpu module:
*/
if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
return -EINVAL;
return UERR(EINVAL, dev, "invalid pipe");
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
return UERR(EINVAL, dev, "invalid flags");
if (args->flags & MSM_SUBMIT_SUDO) {
if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
@ -724,7 +715,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = -EINVAL;
ret = UERR(EINVAL, dev, "invalid in-fence");
goto out_unlock;
}
@ -787,10 +778,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
if (!submit->cmd[i].size ||
((submit->cmd[i].size + submit->cmd[i].offset) >
obj->size / 4)) {
SUBMIT_ERROR(submit, "invalid cmdstream size: %u\n", submit->cmd[i].size * 4);
ret = -EINVAL;
(size_add(submit->cmd[i].size, submit->cmd[i].offset) > obj->size / 4)) {
ret = UERR(EINVAL, dev, "invalid cmdstream size: %u\n",
submit->cmd[i].size * 4);
goto out;
}
@ -800,8 +790,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
continue;
if (!gpu->allow_relocs) {
SUBMIT_ERROR(submit, "relocs not allowed\n");
ret = -EINVAL;
ret = UERR(EINVAL, dev, "relocs not allowed\n");
goto out;
}
@ -827,7 +816,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
(!args->fence || idr_find(&queue->fence_idr, args->fence))) {
spin_unlock(&queue->idr_lock);
idr_preload_end();
ret = -EINVAL;
ret = UERR(EINVAL, dev, "invalid in-fence-sn");
goto out;
}

View File

@ -244,7 +244,6 @@ int msm_drm_kms_init(struct device *dev, const struct drm_driver *drv)
ret = priv->kms_init(ddev);
if (ret) {
DRM_DEV_ERROR(dev, "failed to load kms\n");
priv->kms = NULL;
return ret;
}

View File

@ -166,22 +166,32 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
writel_relaxed(data->ubwc_static, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
if (data->ubwc_enc_version == UBWC_1_0)
value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
u32 value = (data->ubwc_swizzle & 0x1) |
(data->highest_bank_bit & 0x3) << 4 |
(data->macrotile_mode & 0x1) << 12;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle & 0x1) |
MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
if (data->macrotile_mode)
value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
if (data->ubwc_enc_version == UBWC_3_0)
value |= BIT(10);
value |= MDSS_UBWC_STATIC_UBWC_AMSBC;
if (data->ubwc_enc_version == UBWC_1_0)
value |= BIT(8);
value |= MDSS_UBWC_STATIC_UBWC_MIN_ACC_LEN(1);
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
}
@ -189,10 +199,14 @@ static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss *msm_mdss)
static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss *msm_mdss)
{
const struct msm_mdss_data *data = msm_mdss->mdss_data;
u32 value = (data->ubwc_swizzle & 0x7) |
(data->ubwc_static & 0x1) << 3 |
(data->highest_bank_bit & 0x7) << 4 |
(data->macrotile_mode & 0x1) << 12;
u32 value = MDSS_UBWC_STATIC_UBWC_SWIZZLE(data->ubwc_swizzle) |
MDSS_UBWC_STATIC_HIGHEST_BANK_BIT(data->highest_bank_bit);
if (data->ubwc_bank_spread)
value |= MDSS_UBWC_STATIC_UBWC_BANK_SPREAD;
if (data->macrotile_mode)
value |= MDSS_UBWC_STATIC_MACROTILE_MODE;
writel_relaxed(value, msm_mdss->mmio + REG_MDSS_UBWC_STATIC);
@ -572,16 +586,17 @@ static const struct msm_mdss_data sa8775p_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 4,
.ubwc_static = 1,
.ubwc_bank_spread = true,
.highest_bank_bit = 0,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 74000,
};
static const struct msm_mdss_data sc7180_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_static = 0x1e,
.ubwc_swizzle = 6,
.ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@ -590,9 +605,9 @@ static const struct msm_mdss_data sc7280_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.ubwc_bank_spread = true,
.highest_bank_bit = 1,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 74000,
};
@ -600,7 +615,7 @@ static const struct msm_mdss_data sc8180x_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
.highest_bank_bit = 3,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 76800,
};
@ -608,9 +623,9 @@ static const struct msm_mdss_data sc8280xp_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.ubwc_bank_spread = true,
.highest_bank_bit = 3,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 76800,
};
@ -632,7 +647,7 @@ static const struct msm_mdss_data sm6350_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 6,
.ubwc_static = 0x1e,
.ubwc_bank_spread = true,
.highest_bank_bit = 1,
.reg_bus_bw = 76800,
};
@ -655,7 +670,7 @@ static const struct msm_mdss_data sm6115_data = {
.ubwc_enc_version = UBWC_1_0,
.ubwc_dec_version = UBWC_2_0,
.ubwc_swizzle = 7,
.ubwc_static = 0x11f,
.ubwc_bank_spread = true,
.highest_bank_bit = 0x1,
.reg_bus_bw = 76800,
};
@ -667,14 +682,21 @@ static const struct msm_mdss_data sm6125_data = {
.highest_bank_bit = 1,
};
static const struct msm_mdss_data sm6150_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 1,
.reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm8250_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 76800,
};
@ -682,10 +704,10 @@ static const struct msm_mdss_data sm8350_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_0,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 74000,
};
@ -693,10 +715,10 @@ static const struct msm_mdss_data sm8550_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
.macrotile_mode = true,
.reg_bus_bw = 57000,
};
@ -704,10 +726,10 @@ static const struct msm_mdss_data x1e80100_data = {
.ubwc_enc_version = UBWC_4_0,
.ubwc_dec_version = UBWC_4_3,
.ubwc_swizzle = 6,
.ubwc_static = 1,
.ubwc_bank_spread = true,
/* TODO: highest_bank_bit = 2 for LP_DDR4 */
.highest_bank_bit = 3,
.macrotile_mode = 1,
.macrotile_mode = true,
/* TODO: Add reg_bus_bw with real value */
};
@ -724,6 +746,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sc8280xp-mdss", .data = &sc8280xp_data },
{ .compatible = "qcom,sm6115-mdss", .data = &sm6115_data },
{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
{ .compatible = "qcom,sm6150-mdss", .data = &sm6150_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },

View File

@ -11,9 +11,9 @@ struct msm_mdss_data {
/* can be read from register 0x58 */
u32 ubwc_dec_version;
u32 ubwc_swizzle;
u32 ubwc_static;
u32 highest_bank_bit;
u32 macrotile_mode;
bool ubwc_bank_spread;
bool macrotile_mode;
u32 reg_bus_bw;
};

View File

@ -18,7 +18,7 @@ int msm_file_private_set_sysprof(struct msm_file_private *ctx,
switch (sysprof) {
default:
return -EINVAL;
return UERR(EINVAL, gpu->dev, "Invalid sysprof: %d", sysprof);
case 2:
pm_runtime_get_sync(&gpu->pdev->dev);
fallthrough;

View File

@ -52,6 +52,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/>
<reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/>
<reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/>
<reg32 offset="0x2bf8" name="GMU_CORE_FW_VERSION">
<bitfield name="MAJOR" low="28" high="31"/>
<bitfield name="MINOR" low="16" high="27"/>
<bitfield name="STEP" low="0" high="15"/>
</reg32>
<reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/>
<reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/>
<reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/>

View File

@ -21,7 +21,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00058" name="UBWC_DEC_HW_VERSION"/>
<reg32 offset="0x00144" name="UBWC_STATIC"/>
<reg32 offset="0x00144" name="UBWC_STATIC">
<bitfield name="UBWC_SWIZZLE" low="0" high="2"/>
<bitfield name="UBWC_BANK_SPREAD" pos="3"/>
<!-- high=5 for UBWC < 4.0 -->
<bitfield name="HIGHEST_BANK_BIT" low="4" high="6"/>
<bitfield name="UBWC_MIN_ACC_LEN" low="8" high="9"/>
<bitfield name="UBWC_AMSBC" pos="10"/>
<bitfield name="MACROTILE_MODE" pos="12"/>
</reg32>
<reg32 offset="0x00150" name="UBWC_CTRL_2"/>
<reg32 offset="0x00154" name="UBWC_PREDICTION_MODE"/>
</domain>

View File

@ -101,11 +101,55 @@ struct opp_table *_find_opp_table(struct device *dev)
* representation in the OPP table and manage the clock configuration themselves
* in an platform specific way.
*/
static bool assert_single_clk(struct opp_table *opp_table)
static bool assert_single_clk(struct opp_table *opp_table,
unsigned int __always_unused index)
{
return !WARN_ON(opp_table->clk_count > 1);
}
/*
* Returns true if clock table is large enough to contain the clock index.
*/
static bool assert_clk_index(struct opp_table *opp_table,
unsigned int index)
{
return opp_table->clk_count > index;
}
/*
* Returns true if bandwidth table is large enough to contain the bandwidth index.
*/
static bool assert_bandwidth_index(struct opp_table *opp_table,
unsigned int index)
{
return opp_table->path_count > index;
}
/**
* dev_pm_opp_get_bw() - Gets the bandwidth corresponding to an opp
* @opp: opp for which bandwidth has to be returned for
* @peak: select peak or average bandwidth
* @index: bandwidth index
*
* Return: bandwidth in kBps, else return 0
*/
unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
{
if (IS_ERR_OR_NULL(opp)) {
pr_err("%s: Invalid parameters\n", __func__);
return 0;
}
if (index >= opp->opp_table->path_count)
return 0;
if (!opp->bandwidth)
return 0;
return peak ? opp->bandwidth[index].peak : opp->bandwidth[index].avg;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_bw);
/**
* dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
* @opp: opp for which voltage has to be returned for
@ -499,12 +543,12 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key),
bool (*assert)(struct opp_table *opp_table))
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
/* Assert that the requirement is met */
if (assert && !assert(opp_table))
if (assert && !assert(opp_table, index))
return ERR_PTR(-EINVAL);
mutex_lock(&opp_table->lock);
@ -532,7 +576,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp,
unsigned long opp_key, unsigned long key),
bool (*assert)(struct opp_table *opp_table))
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct opp_table *opp_table;
struct dev_pm_opp *opp;
@ -555,7 +599,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
static struct dev_pm_opp *_find_key_exact(struct device *dev,
unsigned long key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*assert)(struct opp_table *opp_table))
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
/*
* The value of key will be updated here, but will be ignored as the
@ -568,7 +612,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev,
static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*assert)(struct opp_table *opp_table))
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _opp_table_find_key(opp_table, key, index, available, read,
_compare_ceil, assert);
@ -577,7 +621,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table,
static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*assert)(struct opp_table *opp_table))
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _find_key(dev, key, index, available, read, _compare_ceil,
assert);
@ -586,7 +630,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key,
static struct dev_pm_opp *_find_key_floor(struct device *dev,
unsigned long *key, int index, bool available,
unsigned long (*read)(struct dev_pm_opp *opp, int index),
bool (*assert)(struct opp_table *opp_table))
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
return _find_key(dev, key, index, available, read, _compare_floor,
assert);
@ -647,7 +691,8 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq,
u32 index, bool available)
{
return _find_key_exact(dev, freq, index, available, _read_freq, NULL);
return _find_key_exact(dev, freq, index, available, _read_freq,
assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed);
@ -707,7 +752,8 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq,
u32 index)
{
return _find_key_ceil(dev, freq, index, true, _read_freq, NULL);
return _find_key_ceil(dev, freq, index, true, _read_freq,
assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed);
@ -760,7 +806,7 @@ struct dev_pm_opp *
dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq,
u32 index)
{
return _find_key_floor(dev, freq, index, true, _read_freq, NULL);
return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed);
@ -878,7 +924,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw,
unsigned long temp = *bw;
struct dev_pm_opp *opp;
opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL);
opp = _find_key_ceil(dev, &temp, index, true, _read_bw,
assert_bandwidth_index);
*bw = temp;
return opp;
}
@ -909,7 +956,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
unsigned long temp = *bw;
struct dev_pm_opp *opp;
opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL);
opp = _find_key_floor(dev, &temp, index, true, _read_bw,
assert_bandwidth_index);
*bw = temp;
return opp;
}
@ -1702,7 +1750,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
if (IS_ERR(opp_table))
return;
if (!assert_single_clk(opp_table))
if (!assert_single_clk(opp_table, 0))
goto put_table;
mutex_lock(&opp_table->lock);
@ -2054,7 +2102,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev,
unsigned long tol, u_volt = data->u_volt;
int ret;
if (!assert_single_clk(opp_table))
if (!assert_single_clk(opp_table, 0))
return -EINVAL;
new_opp = _opp_allocate(opp_table);
@ -2810,7 +2858,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
return r;
}
if (!assert_single_clk(opp_table)) {
if (!assert_single_clk(opp_table, 0)) {
r = -EINVAL;
goto put_table;
}
@ -2886,7 +2934,7 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
return r;
}
if (!assert_single_clk(opp_table)) {
if (!assert_single_clk(opp_table, 0)) {
r = -EINVAL;
goto put_table;
}

View File

@ -102,6 +102,8 @@ struct dev_pm_opp_data {
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
void dev_pm_opp_put_opp_table(struct opp_table *opp_table);
unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index);
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *supplies);
@ -205,6 +207,11 @@ static inline struct opp_table *dev_pm_opp_get_opp_table_indexed(struct device *
static inline void dev_pm_opp_put_opp_table(struct opp_table *opp_table) {}
static inline unsigned long dev_pm_opp_get_bw(struct dev_pm_opp *opp, bool peak, int index)
{
return 0;
}
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
return 0;

View File

@ -90,6 +90,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_RAYTRACING 0x11 /* RO */
#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #