Merge tag 'drm-msm-next-2024-07-04' of https://gitlab.freedesktop.org/drm/msm into drm-next

Updates for v6.11

Core:
- SM7150 support

DPU:
- SM7150 support
- Fix DSC support for DSI panels in video mode
- Fixed TE vsync source support for DSI command-mode panels
- Fix for devices without UBWC in the display controller (ie.
  QCM2290)

DSI:
- Remove unused register-writing wrappers
- Fix DSC support for panels in video mode
- Add support for parsing TE vsync source
- Add support for MSM8937 (28nm DSI PHY)

MDP5:
- Add support for MSM8937
- Fix configuration for MSM8953

GPU:
- Split giant device table into per-gen "hw catalog" similar to
  what is done on the display side of the driver
- Fix a702 UBWC mode
- Fix unused variably warnings
- GPU memory traces
- Add param for userspace to know if raytracing is supported
- Memory barrier cleanup and GBIF unhalt fix
- X185 support (aka gpu in X1 laptop chips)
- a505 support
- fixes

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvZQpYEHpSCgXGJ2kaHJDK6QFAFfTsfiWm4b2zZOnjXGw@mail.gmail.com
This commit is contained in:
Daniel Vetter 2024-07-05 12:45:40 +02:00
commit 62a05f4ae9
72 changed files with 4060 additions and 2493 deletions

View File

@ -32,6 +32,7 @@ properties:
- qcom,sm6125-dsi-ctrl
- qcom,sm6350-dsi-ctrl
- qcom,sm6375-dsi-ctrl
- qcom,sm7150-dsi-ctrl
- qcom,sm8150-dsi-ctrl
- qcom,sm8250-dsi-ctrl
- qcom,sm8350-dsi-ctrl
@ -162,6 +163,22 @@ properties:
items:
enum: [ 0, 1, 2, 3 ]
qcom,te-source:
$ref: /schemas/types.yaml#/definitions/string
description:
Specifies the source of vsync signal from the panel used for
tearing elimination.
default: mdp_vsync_p
enum:
- mdp_vsync_p
- mdp_vsync_s
- mdp_vsync_e
- timer0
- timer1
- timer2
- timer3
- timer4
required:
- port@0
- port@1
@ -332,6 +349,7 @@ allOf:
enum:
- qcom,sc7180-dsi-ctrl
- qcom,sc7280-dsi-ctrl
- qcom,sm7150-dsi-ctrl
- qcom,sm8150-dsi-ctrl
- qcom,sm8250-dsi-ctrl
- qcom,sm8350-dsi-ctrl
@ -452,6 +470,7 @@ examples:
dsi0_out: endpoint {
remote-endpoint = <&sn65dsi86_in>;
data-lanes = <0 1 2 3>;
qcom,te-source = "mdp_vsync_e";
};
};
};

View File

@ -16,6 +16,7 @@ properties:
compatible:
enum:
- qcom,dsi-phy-28nm-8226
- qcom,dsi-phy-28nm-8937
- qcom,dsi-phy-28nm-8960
- qcom,dsi-phy-28nm-hpm
- qcom,dsi-phy-28nm-hpm-fam-b

View File

@ -23,6 +23,9 @@ properties:
- items:
- pattern: '^qcom,adreno-gmu-[67][0-9][0-9]\.[0-9]$'
- const: qcom,adreno-gmu
- items:
- pattern: '^qcom,adreno-gmu-x[1-9][0-9][0-9]\.[0-9]$'
- const: qcom,adreno-gmu
- const: qcom,adreno-gmu-wrapper
reg:
@ -225,6 +228,7 @@ allOf:
- qcom,adreno-gmu-730.1
- qcom,adreno-gmu-740.1
- qcom,adreno-gmu-750.1
- qcom,adreno-gmu-x185.1
then:
properties:
reg:

View File

@ -10,6 +10,18 @@ title: Adreno or Snapdragon GPUs
maintainers:
- Rob Clark <robdclark@gmail.com>
# dtschema does not select nodes based on pattern+const, so add custom select
# as a work-around:
select:
properties:
compatible:
contains:
enum:
- qcom,adreno
- amd,imageon
required:
- compatible
properties:
compatible:
oneOf:
@ -17,7 +29,7 @@ properties:
The driver is parsing the compat string for Adreno to
figure out the chip-id.
items:
- pattern: '^qcom,adreno-[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]$'
- pattern: '^qcom,adreno-[0-9a-f]{8}$'
- const: qcom,adreno
- description: |
The driver is parsing the compat string for Adreno to
@ -32,9 +44,13 @@ properties:
- pattern: '^amd,imageon-200\.[0-1]$'
- const: amd,imageon
clocks: true
clocks:
minItems: 2
maxItems: 7
clock-names: true
clock-names:
minItems: 2
maxItems: 7
reg:
minItems: 1
@ -42,7 +58,10 @@ properties:
reg-names:
minItems: 1
maxItems: 3
items:
- const: kgsl_3d0_reg_memory
- const: cx_mem
- const: cx_dbgc
interrupts:
maxItems: 1

View File

@ -25,6 +25,7 @@ properties:
- qcom,msm8226-mdp5
- qcom,msm8916-mdp5
- qcom,msm8917-mdp5
- qcom,msm8937-mdp5
- qcom,msm8953-mdp5
- qcom,msm8974-mdp5
- qcom,msm8976-mdp5

View File

@ -126,6 +126,7 @@ patternProperties:
- qcom,dsi-phy-14nm-8953
- qcom,dsi-phy-20nm
- qcom,dsi-phy-28nm-8226
- qcom,dsi-phy-28nm-8937
- qcom,dsi-phy-28nm-hpm
- qcom,dsi-phy-28nm-hpm-fam-b
- qcom,dsi-phy-28nm-lp

View File

@ -0,0 +1,143 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm7150-dpu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM7150 Display Processing Unit (DPU)
maintainers:
- Danila Tikhonov <danila@jiaxyga.com>
$ref: /schemas/display/msm/dpu-common.yaml#
properties:
compatible:
const: qcom,sm7150-dpu
reg:
items:
- description: Address offset and size for mdp register set
- description: Address offset and size for vbif register set
reg-names:
items:
- const: mdp
- const: vbif
clocks:
items:
- description: Display hf axi clock
- description: Display ahb clock
- description: Display rotator clock
- description: Display lut clock
- description: Display core clock
- description: Display vsync clock
clock-names:
items:
- const: bus
- const: iface
- const: rot
- const: lut
- const: core
- const: vsync
required:
- compatible
- reg
- reg-names
- clocks
- clock-names
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
display-controller@ae01000 {
compatible = "qcom,sm7150-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&gcc_disp_hf_axi_clk>,
<&dispcc_mdss_ahb_clk>,
<&dispcc_mdss_rot_clk>,
<&dispcc_mdss_mdp_lut_clk>,
<&dispcc_mdss_mdp_clk>,
<&dispcc_mdss_vsync_clk>;
clock-names = "bus",
"iface",
"rot",
"lut",
"core",
"vsync";
assigned-clocks = <&dispcc_mdss_vsync_clk>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&mdss_dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&mdss_dsi1_in>;
};
};
port@2 {
reg = <2>;
dpu_intf0_out: endpoint {
remote-endpoint = <&dp_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-19200000 {
opp-hz = /bits/ 64 <19200000>;
required-opps = <&rpmhpd_opp_min_svs>;
};
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-344000000 {
opp-hz = /bits/ 64 <344000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-430000000 {
opp-hz = /bits/ 64 <430000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
...

View File

@ -0,0 +1,458 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/qcom,sm7150-mdss.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM7150 Display MDSS
maintainers:
- Danila Tikhonov <danila@jiaxyga.com>
description:
SM7150 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
DPU display controller, DSI and DP interfaces etc.
$ref: /schemas/display/msm/mdss-common.yaml#
properties:
compatible:
const: qcom,sm7150-mdss
clocks:
items:
- description: Display ahb clock from gcc
- description: Display hf axi clock
- description: Display sf axi clock
- description: Display core clock
clock-names:
items:
- const: iface
- const: bus
- const: nrt_bus
- const: core
iommus:
maxItems: 1
interconnects:
items:
- description: Interconnect path from mdp0 port to the data bus
- description: Interconnect path from mdp1 port to the data bus
- description: Interconnect path from CPU to the reg bus
interconnect-names:
items:
- const: mdp0-mem
- const: mdp1-mem
- const: cpu-cfg
patternProperties:
"^display-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm7150-dpu
"^displayport-controller@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,sm7150-dp
"^dsi@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
items:
- const: qcom,sm7150-dsi-ctrl
- const: qcom,mdss-dsi-ctrl
"^phy@[0-9a-f]+$":
type: object
additionalProperties: true
properties:
compatible:
const: qcom,dsi-phy-10nm
required:
- compatible
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm7150-rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom,rpmhpd.h>
display-subsystem@ae00000 {
compatible = "qcom,sm7150-mdss";
reg = <0x0ae00000 0x1000>;
reg-names = "mdss";
power-domains = <&dispcc_mdss_gdsc>;
clocks = <&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>,
<&gcc_disp_sf_axi_clk>,
<&dispcc_mdss_mdp_clk>;
clock-names = "iface",
"bus",
"nrt_bus",
"core";
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <1>;
interconnects = <&mmss_noc MASTER_MDP_PORT0 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI_CH0 QCOM_ICC_TAG_ALWAYS>,
<&mmss_noc MASTER_MDP_PORT1 QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI_CH0 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_AMPSS_M0 QCOM_ICC_TAG_ACTIVE_ONLY
&config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "mdp0-mem",
"mdp1-mem",
"cpu-cfg";
iommus = <&apps_smmu 0x800 0x440>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
display-controller@ae01000 {
compatible = "qcom,sm7150-dpu";
reg = <0x0ae01000 0x8f000>,
<0x0aeb0000 0x2008>;
reg-names = "mdp", "vbif";
clocks = <&gcc_disp_hf_axi_clk>,
<&dispcc_mdss_ahb_clk>,
<&dispcc_mdss_rot_clk>,
<&dispcc_mdss_mdp_lut_clk>,
<&dispcc_mdss_mdp_clk>,
<&dispcc_mdss_vsync_clk>;
clock-names = "bus",
"iface",
"rot",
"lut",
"core",
"vsync";
assigned-clocks = <&dispcc_mdss_vsync_clk>;
assigned-clock-rates = <19200000>;
operating-points-v2 = <&mdp_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
interrupt-parent = <&mdss>;
interrupts = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dpu_intf1_out: endpoint {
remote-endpoint = <&mdss_dsi0_in>;
};
};
port@1 {
reg = <1>;
dpu_intf2_out: endpoint {
remote-endpoint = <&mdss_dsi1_in>;
};
};
port@2 {
reg = <2>;
dpu_intf0_out: endpoint {
remote-endpoint = <&dp_in>;
};
};
};
mdp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-19200000 {
opp-hz = /bits/ 64 <19200000>;
required-opps = <&rpmhpd_opp_min_svs>;
};
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-344000000 {
opp-hz = /bits/ 64 <344000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-430000000 {
opp-hz = /bits/ 64 <430000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
dsi@ae94000 {
compatible = "qcom,sm7150-dsi-ctrl",
"qcom,mdss-dsi-ctrl";
reg = <0x0ae94000 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <4>;
clocks = <&dispcc_mdss_byte0_clk>,
<&dispcc_mdss_byte0_intf_clk>,
<&dispcc_mdss_pclk0_clk>,
<&dispcc_mdss_esc0_clk>,
<&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>;
clock-names = "byte",
"byte_intf",
"pixel",
"core",
"iface",
"bus";
assigned-clocks = <&dispcc_mdss_byte0_clk_src>,
<&dispcc_mdss_pclk0_clk_src>;
assigned-clock-parents = <&mdss_dsi0_phy 0>,
<&mdss_dsi0_phy 1>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
phys = <&mdss_dsi0_phy>;
phy-names = "dsi";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdss_dsi0_in: endpoint {
remote-endpoint = <&dpu_intf1_out>;
};
};
port@1 {
reg = <1>;
mdss_dsi0_out: endpoint {
};
};
};
dsi_opp_table: opp-table {
compatible = "operating-points-v2";
opp-180000000 {
opp-hz = /bits/ 64 <180000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-275000000 {
opp-hz = /bits/ 64 <275000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-358000000 {
opp-hz = /bits/ 64 <358000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
};
};
mdss_dsi0_phy: phy@ae94400 {
compatible = "qcom,dsi-phy-10nm";
reg = <0x0ae94400 0x200>,
<0x0ae94600 0x280>,
<0x0ae94a00 0x1e0>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
clocks = <&dispcc_mdss_ahb_clk>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
vdds-supply = <&vdda_mipi_dsi0_pll>;
};
dsi@ae96000 {
compatible = "qcom,sm7150-dsi-ctrl",
"qcom,mdss-dsi-ctrl";
reg = <0x0ae96000 0x400>;
reg-names = "dsi_ctrl";
interrupt-parent = <&mdss>;
interrupts = <5>;
clocks = <&dispcc_mdss_byte1_clk>,
<&dispcc_mdss_byte1_intf_clk>,
<&dispcc_mdss_pclk1_clk>,
<&dispcc_mdss_esc1_clk>,
<&dispcc_mdss_ahb_clk>,
<&gcc_disp_hf_axi_clk>;
clock-names = "byte",
"byte_intf",
"pixel",
"core",
"iface",
"bus";
assigned-clocks = <&dispcc_mdss_byte1_clk_src>,
<&dispcc_mdss_pclk1_clk_src>;
assigned-clock-parents = <&mdss_dsi1_phy 0>,
<&mdss_dsi1_phy 1>;
operating-points-v2 = <&dsi_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
phys = <&mdss_dsi1_phy>;
phy-names = "dsi";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mdss_dsi1_in: endpoint {
remote-endpoint = <&dpu_intf2_out>;
};
};
port@1 {
reg = <1>;
mdss_dsi1_out: endpoint {
};
};
};
};
mdss_dsi1_phy: phy@ae96400 {
compatible = "qcom,dsi-phy-10nm";
reg = <0x0ae96400 0x200>,
<0x0ae96600 0x280>,
<0x0ae96a00 0x1e0>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
clocks = <&dispcc_mdss_ahb_clk>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
vdds-supply = <&vdda_mipi_dsi1_pll>;
};
displayport-controller@ae90000 {
compatible = "qcom,sm7150-dp";
reg = <0xae90000 0x200>,
<0xae90200 0x200>,
<0xae90400 0xc00>,
<0xae91000 0x400>,
<0xae91400 0x400>;
interrupt-parent = <&mdss>;
interrupts = <12>;
clocks = <&dispcc_mdss_ahb_clk>,
<&dispcc_mdss_dp_aux_clk>,
<&dispcc_mdss_dp_link_clk>,
<&dispcc_mdss_dp_link_intf_clk>,
<&dispcc_mdss_dp_pixel_clk>;
clock-names = "core_iface",
"core_aux",
"ctrl_link",
"ctrl_link_iface",
"stream_pixel";
assigned-clocks = <&dispcc_mdss_dp_link_clk_src>,
<&dispcc_mdss_dp_pixel_clk_src>;
assigned-clock-parents = <&dp_phy 0>,
<&dp_phy 1>;
operating-points-v2 = <&dp_opp_table>;
power-domains = <&rpmhpd RPMHPD_CX>;
phys = <&dp_phy>;
phy-names = "dp";
#sound-dai-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
dp_in: endpoint {
remote-endpoint = <&dpu_intf0_out>;
};
};
port@1 {
reg = <1>;
dp_out: endpoint {
};
};
};
dp_opp_table: opp-table {
compatible = "operating-points-v2";
opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-270000000 {
opp-hz = /bits/ 64 <270000000>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-540000000 {
opp-hz = /bits/ 64 <540000000>;
required-opps = <&rpmhpd_opp_svs_l1>;
};
opp-810000000 {
opp-hz = /bits/ 64 <810000000>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
};
...

View File

@ -1394,6 +1394,20 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
}
EXPORT_SYMBOL_GPL(qcom_scm_lmh_dcvsh);
int qcom_scm_gpu_init_regs(u32 gpu_req)
{
struct qcom_scm_desc desc = {
.svc = QCOM_SCM_SVC_GPU,
.cmd = QCOM_SCM_SVC_GPU_INIT_REGS,
.arginfo = QCOM_SCM_ARGS(1),
.args[0] = gpu_req,
.owner = ARM_SMCCC_OWNER_SIP,
};
return qcom_scm_call(__scm->dev, &desc, NULL);
}
EXPORT_SYMBOL_GPL(qcom_scm_gpu_init_regs);
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;

View File

@ -138,6 +138,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
#define QCOM_SCM_WAITQ_RESUME 0x02
#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
#define QCOM_SCM_SVC_GPU 0x28
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5

View File

@ -33,6 +33,7 @@ config DRM_MSM
select PM_OPP
select NVMEM
select PM_GENERIC_DOMAINS
select TRACE_GPU_MEM
help
DRM/KMS driver for MSM/snapdragon.

View File

@ -8,13 +8,18 @@ ccflags-$(CONFIG_DRM_MSM_DP) += -I $(src)/dp
adreno-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a2xx_catalog.o \
adreno/a2xx_gpu.o \
adreno/a2xx_gpummu.o \
adreno/a3xx_catalog.o \
adreno/a3xx_gpu.o \
adreno/a4xx_catalog.o \
adreno/a4xx_gpu.o \
adreno/a5xx_catalog.o \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
adreno/a6xx_catalog.o \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \

View File

@ -0,0 +1,52 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*/
#include "adreno_gpu.h"
static const struct adreno_info a2xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x02000000),
.family = ADRENO_2XX_GEN1,
.revn = 200,
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, { /* a200 on i.mx51 has only 128kib gmem */
.chip_ids = ADRENO_CHIP_IDS(0x02000001),
.family = ADRENO_2XX_GEN1,
.revn = 201,
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x02020000),
.family = ADRENO_2XX_GEN2,
.revn = 220,
.fw = {
[ADRENO_FW_PM4] = "leia_pm4_470.fw",
[ADRENO_FW_PFP] = "leia_pfp_470.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}
};
DECLARE_ADRENO_GPULIST(a2xx);
MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
MODULE_FIRMWARE("qcom/yamato_pfp.fw");
MODULE_FIRMWARE("qcom/yamato_pm4.fw");

View File

@ -0,0 +1,81 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*/
#include "adreno_gpu.h"
static const struct adreno_info a3xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x03000512),
.family = ADRENO_3XX,
.fw = {
[ADRENO_FW_PM4] = "a330_pm4.fw",
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000520),
.family = ADRENO_3XX,
.revn = 305,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000600),
.family = ADRENO_3XX,
.revn = 307, /* because a305c is revn==306 */
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
0x03020001,
0x03020002
),
.family = ADRENO_3XX,
.revn = 320,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03030000,
0x03030001,
0x03030002
),
.family = ADRENO_3XX,
.revn = 330,
.fw = {
[ADRENO_FW_PM4] = "a330_pm4.fw",
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}
};
DECLARE_ADRENO_GPULIST(a3xx);
MODULE_FIRMWARE("qcom/a300_pm4.fw");
MODULE_FIRMWARE("qcom/a300_pfp.fw");
MODULE_FIRMWARE("qcom/a330_pm4.fw");
MODULE_FIRMWARE("qcom/a330_pfp.fw");

View File

@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*/
#include "adreno_gpu.h"
static const struct adreno_info a4xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x04000500),
.family = ADRENO_4XX,
.revn = 405,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04020000),
.family = ADRENO_4XX,
.revn = 420,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04030002),
.family = ADRENO_4XX,
.revn = 430,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}
};
DECLARE_ADRENO_GPULIST(a4xx);
MODULE_FIRMWARE("qcom/a420_pm4.fw");
MODULE_FIRMWARE("qcom/a420_pfp.fw");

View File

@ -0,0 +1,161 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*/
#include "adreno_gpu.h"
static const struct adreno_info a5xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x05000500),
.family = ADRENO_5XX,
.revn = 505,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000600),
.family = ADRENO_5XX,
.revn = 506,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a506_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000800),
.family = ADRENO_5XX,
.revn = 508,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a508_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000900),
.family = ADRENO_5XX,
.revn = 509,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010000),
.family = ADRENO_5XX,
.revn = 510,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = SZ_256K,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010200),
.family = ADRENO_5XX,
.revn = 512,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x05030002,
0x05030004
),
.family = ADRENO_5XX,
.revn = 530,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05040001),
.family = ADRENO_5XX,
.revn = 540,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}
};
DECLARE_ADRENO_GPULIST(a5xx);
MODULE_FIRMWARE("qcom/a530_pm4.fw");
MODULE_FIRMWARE("qcom/a530_pfp.fw");
MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
MODULE_FIRMWARE("qcom/a540_gpmu.fw2");

View File

@ -439,7 +439,8 @@ void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
const struct adreno_five_hwcg_regs *regs;
unsigned int i, sz;
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu)) {
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a508(adreno_gpu)) {
regs = a50x_hwcg;
sz = ARRAY_SIZE(a50x_hwcg);
} else if (adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu)) {
@ -483,7 +484,8 @@ static int a5xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
/* Specify workarounds for various microcode issues */
if (adreno_is_a506(adreno_gpu) || adreno_is_a530(adreno_gpu)) {
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a530(adreno_gpu)) {
/* Workaround for token end syncs
* Force a WFI after every direct-render 3D mode draw and every
* 2D mode 3 draw
@ -752,10 +754,11 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
0x00100000 + adreno_gpu->info->gmem - 1);
gpu_write(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_HI, 0x00000000);
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
adreno_is_a510(adreno_gpu)) {
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu)) {
gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x20);
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x400);
else
gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x20);
@ -771,7 +774,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
}
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu))
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a508(adreno_gpu))
gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL,
(0x100 << 11 | 0x100 << 22));
else if (adreno_is_a509(adreno_gpu) || adreno_is_a510(adreno_gpu) ||
@ -789,8 +793,9 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
* Disable the RB sampler datapath DP2 clock gating optimization
* for 1-SP GPUs, as it is enabled by default.
*/
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
adreno_is_a509(adreno_gpu) || adreno_is_a512(adreno_gpu))
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a508(adreno_gpu) || adreno_is_a509(adreno_gpu) ||
adreno_is_a512(adreno_gpu))
gpu_rmw(gpu, REG_A5XX_RB_DBG_ECO_CNTL, 0, (1 << 9));
/* Disable UCHE global filter as SP can invalidate/flush independently */
@ -1345,7 +1350,7 @@ static int a5xx_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
/* Adreno 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
/* Adreno 505, 506, 508, 509, 510, 512 needs manual RBBM sus/res control */
if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu))) {
/* Halt the sp_input_clk at HM level */
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0x00000055);
@ -1388,9 +1393,9 @@ static int a5xx_pm_suspend(struct msm_gpu *gpu)
u32 mask = 0xf;
int i, ret;
/* A506, A508, A510 have 3 XIN ports in VBIF */
if (adreno_is_a506(adreno_gpu) || adreno_is_a508(adreno_gpu) ||
adreno_is_a510(adreno_gpu))
/* A505, A506, A508, A510 have 3 XIN ports in VBIF */
if (adreno_is_a505(adreno_gpu) || adreno_is_a506(adreno_gpu) ||
adreno_is_a508(adreno_gpu) || adreno_is_a510(adreno_gpu))
mask = 0x7;
/* Clear the VBIF pipe before shutting down */

File diff suppressed because it is too large Load Diff

View File

@ -466,9 +466,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
int ret;
u32 val;
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
/* Wait for the register to finish posting */
wmb();
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
@ -769,8 +767,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx;
u32 fence_range_lower, fence_range_upper;
u32 chipid, chipid_min = 0;
u32 chipid = 0;
int ret;
/* Vote veto for FAL10 */
@ -830,27 +829,8 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
*/
gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, 0x4052);
/* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */
if (adreno_is_a7xx(adreno_gpu) && !adreno_is_a730(adreno_gpu)) {
/* A7xx GPUs have obfuscated chip IDs. Use constant maj = 7 */
chipid = FIELD_PREP(GENMASK(31, 24), 0x7);
/*
* The min part has a 1-1 mapping for each GPU SKU.
* This chipid that the GMU expects corresponds to the "GENX_Y_Z" naming,
* where X = major, Y = minor, Z = patchlevel, e.g. GEN7_2_1 for prod A740.
*/
if (adreno_is_a740(adreno_gpu))
chipid_min = 2;
else if (adreno_is_a750(adreno_gpu))
chipid_min = 9;
else
return -EINVAL;
chipid |= FIELD_PREP(GENMASK(23, 16), chipid_min);
/* Get the patchid (which may vary) from the device tree */
chipid |= FIELD_PREP(GENMASK(15, 8), adreno_patchid(adreno_gpu));
if (a6xx_info->gmu_chipid) {
chipid = a6xx_info->gmu_chipid;
} else {
/*
* Note that the GMU has a slightly different layout for
@ -1329,7 +1309,13 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
if (!pri_count)
return -EINVAL;
sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
/*
* Some targets have a separate gfx mxc rail. So try to read that first and then fall back
* to regular mx rail if it is missing
*/
sec = cmd_db_read_aux_data("gmxc.lvl", &sec_count);
if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER))
sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
if (IS_ERR(sec))
return PTR_ERR(sec);

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,18 @@
extern bool hang_debug;
/**
* struct a6xx_info - a6xx specific information from device table
*
* @hwcg: hw clock gating register sequence
* @protect: CP_PROTECT settings
*/
struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
u32 gmu_chipid;
};
struct a6xx_gpu {
struct adreno_gpu base;

View File

@ -8,19 +8,16 @@
#include "a6xx_gpu_state.h"
#include "a6xx_gmu.xml.h"
/* Ignore diagnostics about register tables that we aren't using yet. We don't
* want to modify these headers too much from their original source.
*/
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#pragma GCC diagnostic ignored "-Wunused-const-variable"
static const unsigned int *gen7_0_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_2_0_external_core_regs[] __always_unused;
static const unsigned int *gen7_9_0_external_core_regs[] __always_unused;
static struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] __always_unused;
static const u32 gen7_9_0_cx_debugbus_blocks[] __always_unused;
#include "adreno_gen7_0_0_snapshot.h"
#include "adreno_gen7_2_0_snapshot.h"
#include "adreno_gen7_9_0_snapshot.h"
#pragma GCC diagnostic pop
struct a6xx_gpu_state_obj {
const void *handle;
u32 *data;

View File

@ -20,610 +20,36 @@ bool allow_vram_carveout = false;
MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
static const struct adreno_info gpulist[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x02000000),
.family = ADRENO_2XX_GEN1,
.revn = 200,
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, { /* a200 on i.mx51 has only 128kib gmem */
.chip_ids = ADRENO_CHIP_IDS(0x02000001),
.family = ADRENO_2XX_GEN1,
.revn = 201,
.fw = {
[ADRENO_FW_PM4] = "yamato_pm4.fw",
[ADRENO_FW_PFP] = "yamato_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x02020000),
.family = ADRENO_2XX_GEN2,
.revn = 220,
.fw = {
[ADRENO_FW_PM4] = "leia_pm4_470.fw",
[ADRENO_FW_PFP] = "leia_pfp_470.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a2xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000512),
.family = ADRENO_3XX,
.fw = {
[ADRENO_FW_PM4] = "a330_pm4.fw",
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000520),
.family = ADRENO_3XX,
.revn = 305,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000600),
.family = ADRENO_3XX,
.revn = 307, /* because a305c is revn==306 */
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
0x03020001,
0x03020002
),
.family = ADRENO_3XX,
.revn = 320,
.fw = {
[ADRENO_FW_PM4] = "a300_pm4.fw",
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03030000,
0x03030001,
0x03030002
),
.family = ADRENO_3XX,
.revn = 330,
.fw = {
[ADRENO_FW_PM4] = "a330_pm4.fw",
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04000500),
.family = ADRENO_4XX,
.revn = 405,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04020000),
.family = ADRENO_4XX,
.revn = 420,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04030002),
.family = ADRENO_4XX,
.revn = 430,
.fw = {
[ADRENO_FW_PM4] = "a420_pm4.fw",
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000600),
.family = ADRENO_5XX,
.revn = 506,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a506_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000800),
.family = ADRENO_5XX,
.revn = 508,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_128K + SZ_8K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a508_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000900),
.family = ADRENO_5XX,
.revn = 509,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010000),
.family = ADRENO_5XX,
.revn = 510,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = SZ_256K,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.init = a5xx_gpu_init,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010200),
.family = ADRENO_5XX,
.revn = 512,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
},
.gmem = (SZ_256K + SZ_16K),
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x05030002,
0x05030004
),
.family = ADRENO_5XX,
.revn = 530,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05040001),
.family = ADRENO_5XX,
.revn = 540,
.fw = {
[ADRENO_FW_PM4] = "a530_pm4.fw",
[ADRENO_FW_PFP] = "a530_pfp.fw",
[ADRENO_FW_GPMU] = "a540_gpmu.fw2",
},
.gmem = SZ_1M,
/*
* Increase inactive period to 250 to avoid bouncing
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
.init = a5xx_gpu_init,
.zapfw = "a540_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010000),
.family = ADRENO_6XX_GEN1,
.revn = 610,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
},
.gmem = (SZ_128K + SZ_4K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a610_zap.mdt",
.hwcg = a612_hwcg,
/*
* There are (at least) three SoCs implementing A610: SM6125
* (trinket), SM6115 (bengal) and SM6225 (khaje). Trinket does
* not have speedbinning, as only a single SKU exists and we
* don't support khaje upstream yet. Hence, this matching
* table is only valid for bengal.
*/
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 206, 1 },
{ 200, 2 },
{ 157, 3 },
{ 127, 4 },
),
}, {
.machine = "qcom,sm7150",
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mbn",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 128, 1 },
{ 146, 2 },
{ 167, 3 },
{ 172, 4 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
.revn = 618,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 169, 1 },
{ 174, 2 },
),
}, {
.machine = "qcom,sm4350",
.chip_ids = ADRENO_CHIP_IDS(0x06010900),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 138, 1 },
{ 92, 2 },
),
}, {
.machine = "qcom,sm6375",
.chip_ids = ADRENO_CHIP_IDS(0x06010901),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 190, 1 },
{ 177, 2 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06010900),
.family = ADRENO_6XX_GEN1,
.revn = 619,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a619_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a615_zap.mdt",
.hwcg = a615_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 120, 4 },
{ 138, 3 },
{ 169, 2 },
{ 180, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
),
.family = ADRENO_6XX_GEN1,
.revn = 630,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a630_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
.hwcg = a630_hwcg,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06040001),
.family = ADRENO_6XX_GEN2,
.revn = 640,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06050002),
.family = ADRENO_6XX_GEN3,
.revn = 650,
.fw = {
[ADRENO_FW_SQE] = "a650_sqe.fw",
[ADRENO_FW_GMU] = "a650_gmu.bin",
},
.gmem = SZ_1M + SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a650_zap.mdt",
.hwcg = a650_hwcg,
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 1, 1 },
{ 2, 3 }, /* Yep, 2 and 3 are swapped! :/ */
{ 3, 2 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06060001),
.family = ADRENO_6XX_GEN4,
.revn = 660,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_1M + SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mdt",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a660_zap.mbn",
.hwcg = a660_hwcg,
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
{ 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06080001),
.family = ADRENO_6XX_GEN2,
.revn = 680,
.fw = {
[ADRENO_FW_SQE] = "a630_sqe.fw",
[ADRENO_FW_GMU] = "a640_gmu.bin",
},
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT,
.init = a6xx_gpu_init,
.zapfw = "a640_zap.mdt",
.hwcg = a640_hwcg,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06090000),
.family = ADRENO_6XX_GEN4,
.fw = {
[ADRENO_FW_SQE] = "a660_sqe.fw",
[ADRENO_FW_GMU] = "a660_gmu.bin",
},
.gmem = SZ_4M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a690_zap.mdt",
.hwcg = a690_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
.family = ADRENO_6XX_GEN1, /* NOT a mistake! */
.fw = {
[ADRENO_FW_SQE] = "a702_sqe.fw",
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a702_zap.mbn",
.hwcg = a702_hwcg,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 236, 1 },
{ 178, 2 },
{ 142, 3 },
),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x07030001),
.family = ADRENO_7XX_GEN1,
.fw = {
[ADRENO_FW_SQE] = "a730_sqe.fw",
[ADRENO_FW_GMU] = "gmu_gen70000.bin",
},
.gmem = SZ_2M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a730_zap.mdt",
.hwcg = a730_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43050a01), /* "C510v2" */
.family = ADRENO_7XX_GEN2,
.fw = {
[ADRENO_FW_SQE] = "a740_sqe.fw",
[ADRENO_FW_GMU] = "gmu_gen70200.bin",
},
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "a740_zap.mdt",
.hwcg = a740_hwcg,
.address_space_size = SZ_16G,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x43051401), /* "C520v2" */
.family = ADRENO_7XX_GEN3,
.fw = {
[ADRENO_FW_SQE] = "gen70900_sqe.fw",
[ADRENO_FW_GMU] = "gmu_gen70900.bin",
},
.gmem = 3 * SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
.init = a6xx_gpu_init,
.zapfw = "gen70900_zap.mbn",
.address_space_size = SZ_16G,
},
};
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
extern const struct adreno_gpulist a5xx_gpulist;
extern const struct adreno_gpulist a6xx_gpulist;
extern const struct adreno_gpulist a7xx_gpulist;
MODULE_FIRMWARE("qcom/a300_pm4.fw");
MODULE_FIRMWARE("qcom/a300_pfp.fw");
MODULE_FIRMWARE("qcom/a330_pm4.fw");
MODULE_FIRMWARE("qcom/a330_pfp.fw");
MODULE_FIRMWARE("qcom/a420_pm4.fw");
MODULE_FIRMWARE("qcom/a420_pfp.fw");
MODULE_FIRMWARE("qcom/a530_pm4.fw");
MODULE_FIRMWARE("qcom/a530_pfp.fw");
MODULE_FIRMWARE("qcom/a530v3_gpmu.fw2");
MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
MODULE_FIRMWARE("qcom/a540_gpmu.fw2");
MODULE_FIRMWARE("qcom/a615_zap.mbn");
MODULE_FIRMWARE("qcom/a619_gmu.bin");
MODULE_FIRMWARE("qcom/a630_sqe.fw");
MODULE_FIRMWARE("qcom/a630_gmu.bin");
MODULE_FIRMWARE("qcom/a630_zap.mbn");
MODULE_FIRMWARE("qcom/a640_gmu.bin");
MODULE_FIRMWARE("qcom/a650_gmu.bin");
MODULE_FIRMWARE("qcom/a650_sqe.fw");
MODULE_FIRMWARE("qcom/a660_gmu.bin");
MODULE_FIRMWARE("qcom/a660_sqe.fw");
MODULE_FIRMWARE("qcom/leia_pfp_470.fw");
MODULE_FIRMWARE("qcom/leia_pm4_470.fw");
MODULE_FIRMWARE("qcom/yamato_pfp.fw");
MODULE_FIRMWARE("qcom/yamato_pm4.fw");
static const struct adreno_gpulist *gpulists[] = {
&a2xx_gpulist,
&a3xx_gpulist,
&a4xx_gpulist,
&a5xx_gpulist,
&a6xx_gpulist,
&a7xx_gpulist,
};
static const struct adreno_info *adreno_info(uint32_t chip_id)
{
/* identify gpu: */
for (int i = 0; i < ARRAY_SIZE(gpulist); i++) {
const struct adreno_info *info = &gpulist[i];
if (info->machine && !of_machine_is_compatible(info->machine))
continue;
for (int j = 0; info->chip_ids[j]; j++)
if (info->chip_ids[j] == chip_id)
return info;
for (int i = 0; i < ARRAY_SIZE(gpulists); i++) {
for (int j = 0; j < gpulists[i]->gpus_count; j++) {
const struct adreno_info *info = &gpulists[i]->gpus[j];
if (info->machine && !of_machine_is_compatible(info->machine))
continue;
for (int k = 0; info->chip_ids[k]; k++)
if (info->chip_ids[k] == chip_id)
return info;
}
}
return NULL;

View File

@ -46,7 +46,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
}
np = of_get_child_by_name(dev->of_node, "zap-shader");
if (!np) {
if (!of_device_is_available(np)) {
zap_available = false;
return -ENODEV;
}
@ -376,6 +376,9 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_HIGHEST_BANK_BIT:
*value = adreno_gpu->ubwc_config.highest_bank_bit;
return 0;
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@ -887,6 +890,7 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " - iova: 0x%016llx\n",
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
drm_printf(p, " flags: 0x%x\n", state->bos[i].flags);
drm_printf(p, " name: %-32s\n", state->bos[i].name);
adreno_show_object(p, &state->bos[i].data,

View File

@ -77,14 +77,13 @@ struct adreno_reglist {
u32 value;
};
extern const struct adreno_reglist a612_hwcg[], a615_hwcg[], a630_hwcg[], a640_hwcg[], a650_hwcg[];
extern const struct adreno_reglist a660_hwcg[], a690_hwcg[], a702_hwcg[], a730_hwcg[], a740_hwcg[];
struct adreno_speedbin {
uint16_t fuse;
uint16_t speedbin;
};
struct a6xx_info;
struct adreno_info {
const char *machine;
/**
@ -101,7 +100,9 @@ struct adreno_info {
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
u32 inactive_period;
const struct adreno_reglist *hwcg;
union {
const struct a6xx_info *a6xx;
};
u64 address_space_size;
/**
* @speedbins: Optional table of fuse to speedbin mappings
@ -114,6 +115,16 @@ struct adreno_info {
#define ADRENO_CHIP_IDS(tbl...) (uint32_t[]) { tbl, 0 }
struct adreno_gpulist {
const struct adreno_info *gpus;
unsigned gpus_count;
};
#define DECLARE_ADRENO_GPULIST(name) \
const struct adreno_gpulist name ## _gpulist = { \
name ## _gpus, ARRAY_SIZE(name ## _gpus) \
}
/*
* Helper to build a speedbin table, ie. the table:
* fuse | speedbin
@ -132,6 +143,19 @@ struct adreno_info {
*/
#define ADRENO_SPEEDBINS(tbl...) (struct adreno_speedbin[]) { tbl {SHRT_MAX, 0} }
struct adreno_protect {
const uint32_t *regs;
uint32_t count;
uint32_t count_max;
};
#define DECLARE_ADRENO_PROTECT(name, __count_max) \
static const struct adreno_protect name = { \
.regs = name ## _regs, \
.count = ARRAY_SIZE(name ## _regs), \
.count_max = __count_max, \
};
struct adreno_gpu {
struct msm_gpu base;
const struct adreno_info *info;
@ -182,6 +206,8 @@ struct adreno_gpu {
*/
const unsigned int *reg_offsets;
bool gmu_is_wrapper;
bool has_ray_tracing;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
@ -298,6 +324,11 @@ static inline int adreno_is_a430(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 430);
}
static inline int adreno_is_a505(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 505);
}
static inline int adreno_is_a506(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 506);
@ -448,6 +479,11 @@ static inline int adreno_is_a750(struct adreno_gpu *gpu)
return gpu->info->chip_ids[0] == 0x43051401;
}
static inline int adreno_is_x185(struct adreno_gpu *gpu)
{
return gpu->info->chip_ids[0] == 0x43050c01;
}
static inline int adreno_is_a740_family(struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))

View File

@ -0,0 +1,335 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
*/
#ifndef _DPU_5_2_SM7150_H
#define _DPU_5_2_SM7150_H
static const struct dpu_caps sm7150_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
.has_src_split = true,
.has_dim_layer = true,
.has_idle_pc = true,
.has_3d_merge = true,
.max_linewidth = 2880,
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
.max_hdeci_exp = MAX_HORZ_DECIMATION,
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
static const struct dpu_mdp_cfg sm7150_mdp = {
.name = "top_0",
.base = 0x0, .len = 0x45c,
.features = BIT(DPU_MDP_AUDIO_SELECT),
.clk_ctrls = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
static const struct dpu_ctl_cfg sm7150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
.base = 0x1000, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
}, {
.name = "ctl_1", .id = CTL_1,
.base = 0x1200, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
}, {
.name = "ctl_2", .id = CTL_2,
.base = 0x1400, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
}, {
.name = "ctl_3", .id = CTL_3,
.base = 0x1600, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
}, {
.name = "ctl_4", .id = CTL_4,
.base = 0x1800, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
}, {
.name = "ctl_5", .id = CTL_5,
.base = 0x1a00, .len = 0x1e0,
.features = BIT(DPU_CTL_ACTIVE_CFG),
.intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
},
};
static const struct dpu_sspp_cfg sm7150_sspp[] = {
{
.name = "sspp_0", .id = SSPP_VIG0,
.base = 0x4000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 0,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG0,
}, {
.name = "sspp_1", .id = SSPP_VIG1,
.base = 0x6000, .len = 0x1f0,
.features = VIG_SDM845_MASK,
.sblk = &dpu_vig_sblk_qseed3_2_4,
.xin_id = 4,
.type = SSPP_TYPE_VIG,
.clk_ctrl = DPU_CLK_CTRL_VIG1,
}, {
.name = "sspp_2", .id = SSPP_DMA0,
.base = 0x24000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
.sblk = &dpu_dma_sblk,
.xin_id = 1,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA0,
}, {
.name = "sspp_9", .id = SSPP_DMA1,
.base = 0x26000, .len = 0x1f0,
.features = DMA_SDM845_MASK,
.sblk = &dpu_dma_sblk,
.xin_id = 5,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA1,
}, {
.name = "sspp_10", .id = SSPP_DMA2,
.base = 0x28000, .len = 0x1f0,
.features = DMA_CURSOR_SDM845_MASK,
.sblk = &dpu_dma_sblk,
.xin_id = 9,
.type = SSPP_TYPE_DMA,
.clk_ctrl = DPU_CLK_CTRL_DMA2,
},
};
static const struct dpu_lm_cfg sm7150_lm[] = {
{
.name = "lm_0", .id = LM_0,
.base = 0x44000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
.pingpong = PINGPONG_1,
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
.pingpong = PINGPONG_2,
}, {
.name = "lm_3", .id = LM_3,
.base = 0x47000, .len = 0x320,
.features = MIXER_SDM845_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
.pingpong = PINGPONG_3,
},
};
static const struct dpu_dspp_cfg sm7150_dspp[] = {
{
.name = "dspp_0", .id = DSPP_0,
.base = 0x54000, .len = 0x1800,
.features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
}, {
.name = "dspp_1", .id = DSPP_1,
.base = 0x56000, .len = 0x1800,
.features = DSPP_SC7180_MASK,
.sblk = &sdm845_dspp_sblk,
},
};
static const struct dpu_pingpong_cfg sm7150_pp[] = {
{
.name = "pingpong_0", .id = PINGPONG_0,
.base = 0x70000, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
}, {
.name = "pingpong_1", .id = PINGPONG_1,
.base = 0x70800, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_0,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
}, {
.name = "pingpong_2", .id = PINGPONG_2,
.base = 0x71000, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
}, {
.name = "pingpong_3", .id = PINGPONG_3,
.base = 0x71800, .len = 0xd4,
.features = PINGPONG_SM8150_MASK,
.sblk = &sdm845_pp_sblk,
.merge_3d = MERGE_3D_1,
.intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
},
};
static const struct dpu_merge_3d_cfg sm7150_merge_3d[] = {
{
.name = "merge_3d_0", .id = MERGE_3D_0,
.base = 0x83000, .len = 0x8,
}, {
.name = "merge_3d_1", .id = MERGE_3D_1,
.base = 0x83100, .len = 0x8,
},
};
static const struct dpu_dsc_cfg sm7150_dsc[] = {
{
.name = "dsc_0", .id = DSC_0,
.base = 0x80000, .len = 0x140,
.features = BIT(DPU_DSC_OUTPUT_CTRL),
}, {
.name = "dsc_1", .id = DSC_1,
.base = 0x80400, .len = 0x140,
.features = BIT(DPU_DSC_OUTPUT_CTRL),
},
};
static const struct dpu_intf_cfg sm7150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
.base = 0x6a000, .len = 0x280,
.features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
}, {
.name = "intf_1", .id = INTF_1,
.base = 0x6a800, .len = 0x2bc,
.features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_0,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
.intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
}, {
.name = "intf_2", .id = INTF_2,
.base = 0x6b000, .len = 0x2bc,
.features = INTF_SC7180_MASK,
.type = INTF_DSI,
.controller_id = MSM_DSI_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
.intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
}, {
.name = "intf_3", .id = INTF_3,
.base = 0x6b800, .len = 0x280,
.features = INTF_SC7180_MASK,
.type = INTF_DP,
.controller_id = MSM_DP_CONTROLLER_1,
.prog_fetch_lines_worst_case = 24,
.intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
.intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
},
};
static const struct dpu_wb_cfg sm7150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
.features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
.xin_id = 6,
.vbif_idx = VBIF_RT,
.maxlinewidth = 4096,
.intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
},
};
static const struct dpu_perf_cfg sm7150_perf_data = {
.max_bw_low = 7100000,
.max_bw_high = 7100000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
.min_prefill_lines = 24,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
.qos_lut_tbl = {
{
.nentry = ARRAY_SIZE(sm8150_qos_linear),
.entries = sm8150_qos_linear
}, {
.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
.entries = sc7180_qos_macrotile
}, {
.nentry = ARRAY_SIZE(sc7180_qos_nrt),
.entries = sc7180_qos_nrt
},
},
.cdp_cfg = {
{.rd_enable = 1, .wr_enable = 1},
{.rd_enable = 1, .wr_enable = 0}
},
.clk_inefficiency_factor = 105,
.bw_inefficiency_factor = 120,
};
static const struct dpu_mdss_version sm7150_mdss_ver = {
.core_major_ver = 5,
.core_minor_ver = 2,
};
const struct dpu_mdss_cfg dpu_sm7150_cfg = {
.mdss_ver = &sm7150_mdss_ver,
.caps = &sm7150_dpu_caps,
.mdp = &sm7150_mdp,
.ctl_count = ARRAY_SIZE(sm7150_ctl),
.ctl = sm7150_ctl,
.sspp_count = ARRAY_SIZE(sm7150_sspp),
.sspp = sm7150_sspp,
.mixer_count = ARRAY_SIZE(sm7150_lm),
.mixer = sm7150_lm,
.dspp_count = ARRAY_SIZE(sm7150_dspp),
.dspp = sm7150_dspp,
.pingpong_count = ARRAY_SIZE(sm7150_pp),
.pingpong = sm7150_pp,
.merge_3d_count = ARRAY_SIZE(sm7150_merge_3d),
.merge_3d = sm7150_merge_3d,
.dsc_count = ARRAY_SIZE(sm7150_dsc),
.dsc = sm7150_dsc,
.intf_count = ARRAY_SIZE(sm7150_intf),
.intf = sm7150_intf,
.wb_count = ARRAY_SIZE(sm7150_wb),
.wb = sm7150_wb,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
.vbif = sdm845_vbif,
.perf = &sm7150_perf_data,
};
#endif

View File

@ -658,18 +658,18 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
DPU_ATRACE_END("crtc_frame_event");
}
/*
* dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
* registers this API to encoder for all frame event callbacks like
* frame_error, frame_done, idle_timeout, etc. Encoder may call different events
* from different context - IRQ, user thread, commit_thread, etc. Each event
* should be carefully reviewed and should be processed in proper task context
* to avoid schedulin delay or properly manage the irq context's bottom half
* processing.
/**
* dpu_crtc_frame_event_cb - crtc frame event callback API
* @crtc: Pointer to crtc
* @event: Event to process
*
* Encoder may call this for different events from different context - IRQ,
* user thread, commit_thread, etc. Each event should be carefully reviewed and
* should be processed in proper task context to avoid schedulin delay or
* properly manage the irq context's bottom half processing.
*/
static void dpu_crtc_frame_event_cb(void *data, u32 event)
void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
{
struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc;
struct msm_drm_private *priv;
struct dpu_crtc_frame_event *fevent;
@ -1091,9 +1091,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
dpu_core_perf_crtc_update(crtc, 0);
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
memset(cstate->mixers, 0, sizeof(cstate->mixers));
cstate->num_mixers = 0;
@ -1132,8 +1129,6 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
*/
if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
request_bandwidth = true;
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
}
if (request_bandwidth)

View File

@ -300,4 +300,6 @@ static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
}
void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event);
#endif /* _DPU_CRTC_H_ */

View File

@ -151,8 +151,6 @@ enum dpu_enc_rc_states {
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
* @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timeout_cnt: atomic counter tracking the number of frame
* done timeouts
@ -192,8 +190,6 @@ struct dpu_encoder_virt {
struct mutex enc_lock;
DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
atomic_t frame_done_timeout_ms;
atomic_t frame_done_timeout_cnt;
@ -428,7 +424,7 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
return -EWOULDBLOCK;
}
if (irq_idx < 0) {
if (irq_idx == 0) {
DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
DRMID(phys_enc->parent), func);
return 0;
@ -564,7 +560,7 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
return (num_dsc > 0) && (num_dsc > intf_count);
}
static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
{
struct msm_drm_private *priv = drm_enc->dev->dev_private;
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
@ -736,18 +732,14 @@ static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
return;
}
if (hw_mdptop->ops.setup_vsync_source &&
disp_info->is_cmd_mode) {
if (hw_mdptop->ops.setup_vsync_source) {
for (i = 0; i < dpu_enc->num_phys_encs; i++)
vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
vsync_cfg.pp_count = dpu_enc->num_phys_encs;
vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
if (disp_info->is_te_using_watchdog_timer)
vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
else
vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
vsync_cfg.vsync_source = disp_info->vsync_source;
hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
@ -1200,6 +1192,8 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
phys->cached_mode = crtc_state->adjusted_mode;
if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
}
}
@ -1226,7 +1220,8 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
dpu_enc->cur_master->hw_mdptop);
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
if (dpu_enc->disp_info.is_cmd_mode)
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
if (dpu_enc->disp_info.intf_type == INTF_DSI &&
!WARN_ON(dpu_enc->num_phys_encs == 0)) {
@ -1454,28 +1449,6 @@ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
}
}
void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
void (*frame_event_cb)(void *, u32 event),
void *frame_event_cb_data)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
bool enable;
enable = frame_event_cb ? true : false;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
dpu_enc->crtc_frame_event_cb = frame_event_cb;
dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event)
@ -1515,15 +1488,12 @@ void dpu_encoder_frame_done_callback(
dpu_encoder_resource_control(drm_enc,
DPU_ENC_RC_EVENT_FRAME_DONE);
if (dpu_enc->crtc_frame_event_cb)
dpu_enc->crtc_frame_event_cb(
dpu_enc->crtc_frame_event_cb_data,
event);
if (dpu_enc->crtc)
dpu_crtc_frame_event_cb(dpu_enc->crtc, event);
}
} else {
if (dpu_enc->crtc_frame_event_cb)
dpu_enc->crtc_frame_event_cb(
dpu_enc->crtc_frame_event_cb_data, event);
if (dpu_enc->crtc)
dpu_crtc_frame_event_cb(dpu_enc->crtc, event);
}
}
@ -1741,8 +1711,7 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
phys = dpu_enc->phys_encs[i];
ctl = phys->hw_ctl;
if (ctl->ops.clear_pending_flush)
ctl->ops.clear_pending_flush(ctl);
ctl->ops.clear_pending_flush(ctl);
/* update only for command mode primary ctl */
if ((phys == dpu_enc->cur_master) &&
@ -2457,7 +2426,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
return;
}
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc) {
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
@ -2473,7 +2442,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
dpu_crtc_frame_event_cb(dpu_enc->crtc, event);
}
static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {

View File

@ -26,15 +26,14 @@
* @h_tile_instance: Controller instance used per tile. Number of elements is
* based on num_of_h_tiles
* @is_cmd_mode Boolean to indicate if the CMD mode is requested
* @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
* used instead of panel TE in cmd mode panels
* @vsync_source: Source of the TE signal for DSI CMD devices
*/
struct msm_display_info {
enum dpu_intf_type intf_type;
uint32_t num_of_h_tiles;
uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
bool is_cmd_mode;
bool is_te_using_watchdog_timer;
enum dpu_vsync_source vsync_source;
};
/**
@ -55,16 +54,6 @@ void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
struct drm_crtc *crtc, bool enable);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
* will be called after the request is complete, or other events.
* @encoder: encoder pointer
* @cb: callback pointer, provide NULL to deregister
* @data: user data provided to callback
*/
void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
void (*cb)(void *, u32), void *data);
/**
* dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
* path (i.e. ctl flush and start) at next appropriate time.

View File

@ -69,6 +69,8 @@ struct dpu_encoder_phys;
* @is_master: Whether this phys_enc is the current master
* encoder. Can be switched at enable time. Based
* on split_role and current mode (CMD/VID).
* @atomic_mode_set: DRM Call. Set a DRM mode.
* This likely caches the mode, for use at enable.
* @enable: DRM Call. Enable a DRM mode.
* @disable: DRM Call. Disable mode.
* @control_vblank_irq Register/Deregister for VBLANK IRQ
@ -93,6 +95,9 @@ struct dpu_encoder_phys;
struct dpu_encoder_phys_ops {
void (*prepare_commit)(struct dpu_encoder_phys *encoder);
bool (*is_master)(struct dpu_encoder_phys *encoder);
void (*atomic_mode_set)(struct dpu_encoder_phys *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
void (*enable)(struct dpu_encoder_phys *encoder);
void (*disable)(struct dpu_encoder_phys *encoder);
int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
@ -334,6 +339,14 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
*/
unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc);
/**
* dpu_encoder_get_dsc_config - get DSC config for the DPU encoder
* This helper function is used by physical encoder to get DSC config
* used for this encoder.
* @drm_enc: Pointer to encoder structure
*/
struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc);
/**
* dpu_encoder_get_drm_fmt - return DRM fourcc format
* @phys_enc: Pointer to physical encoder structure

View File

@ -142,6 +142,23 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg)
dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
static void dpu_encoder_phys_cmd_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
if (phys_enc->has_intf_te)
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
else
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
@ -280,14 +297,6 @@ static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->vblank_refcount);
phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
if (phys_enc->has_intf_te)
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
else
phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_PINGPONG],
dpu_encoder_phys_cmd_pp_tx_done_irq,
@ -298,7 +307,7 @@ static void dpu_encoder_phys_cmd_irq_enable(struct dpu_encoder_phys *phys_enc)
phys_enc);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
if (dpu_encoder_phys_cmd_is_master(phys_enc) && phys_enc->irq[INTR_IDX_CTL_START])
dpu_core_irq_register_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_CTL_START],
dpu_encoder_phys_cmd_ctl_start_irq,
@ -311,17 +320,13 @@ static void dpu_encoder_phys_cmd_irq_disable(struct dpu_encoder_phys *phys_enc)
phys_enc->hw_pp->idx - PINGPONG_0,
phys_enc->vblank_refcount);
if (dpu_encoder_phys_cmd_is_master(phys_enc))
if (dpu_encoder_phys_cmd_is_master(phys_enc) && phys_enc->irq[INTR_IDX_CTL_START])
dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
phys_enc->irq[INTR_IDX_CTL_START]);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_UNDERRUN]);
dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
dpu_core_irq_unregister_callback(phys_enc->dpu_kms, phys_enc->irq[INTR_IDX_PINGPONG]);
phys_enc->irq[INTR_IDX_CTL_START] = 0;
phys_enc->irq[INTR_IDX_PINGPONG] = 0;
phys_enc->irq[INTR_IDX_RDPTR] = 0;
}
static void dpu_encoder_phys_cmd_tearcheck_config(
@ -698,6 +703,7 @@ static void dpu_encoder_phys_cmd_init_ops(
struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_cmd_is_master;
ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
ops->enable = dpu_encoder_phys_cmd_enable;
ops->disable = dpu_encoder_phys_cmd_disable;
ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
@ -736,8 +742,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(struct drm_device *dev,
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_CMD;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
cmd_enc->stream_sel = 0;
if (!phys_enc->hw_intf) {

View File

@ -11,6 +11,7 @@
#include "dpu_trace.h"
#include "disp/msm_disp_snapshot.h"
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_managed.h>
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
@ -115,6 +116,23 @@ static void drm_mode_to_intf_timing_params(
timing->h_front_porch = timing->h_front_porch >> 1;
timing->hsync_pulse_width = timing->hsync_pulse_width >> 1;
}
/*
* for DSI, if compression is enabled, then divide the horizonal active
* timing parameters by compression ratio. bits of 3 components(R/G/B)
* is compressed into bits of 1 pixel.
*/
if (phys_enc->hw_intf->cap->type != INTF_DP && timing->compression_en) {
struct drm_dsc_config *dsc =
dpu_encoder_get_dsc_config(phys_enc->parent);
/*
* TODO: replace drm_dsc_get_bpp_int with logic to handle
* fractional part if there is fraction
*/
timing->width = timing->width * drm_dsc_get_bpp_int(dsc) /
(dsc->bits_per_component * 3);
timing->xres = timing->width;
}
}
static u32 get_horizontal_total(const struct dpu_hw_intf_timing_params *timing)
@ -289,7 +307,8 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
&timing_params, fmt,
phys_enc->dpu_kms->catalog->mdss_ver);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
@ -356,6 +375,16 @@ static bool dpu_encoder_phys_vid_needs_single_flush(
return phys_enc->split_role != ENC_ROLE_SOLO;
}
static void dpu_encoder_phys_vid_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
}
static int dpu_encoder_phys_vid_control_vblank_irq(
struct dpu_encoder_phys *phys_enc,
bool enable)
@ -699,6 +728,7 @@ static int dpu_encoder_phys_vid_get_frame_count(
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_vid_is_master;
ops->atomic_mode_set = dpu_encoder_phys_vid_atomic_mode_set;
ops->enable = dpu_encoder_phys_vid_enable;
ops->disable = dpu_encoder_phys_vid_disable;
ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
@ -737,8 +767,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(struct drm_device *dev,
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_VIDEO;
phys_enc->irq[INTR_IDX_VSYNC] = phys_enc->hw_intf->cap->intr_vsync;
phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->hw_intf->idx);

View File

@ -404,6 +404,15 @@ static void dpu_encoder_phys_wb_irq_disable(struct dpu_encoder_phys *phys)
dpu_core_irq_unregister_callback(phys->dpu_kms, phys->irq[INTR_IDX_WB_DONE]);
}
static void dpu_encoder_phys_wb_atomic_mode_set(
struct dpu_encoder_phys *phys_enc,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
}
static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
struct dpu_encoder_phys *phys_enc)
{
@ -529,8 +538,7 @@ static void dpu_encoder_phys_wb_disable(struct dpu_encoder_phys *phys_enc)
}
/* reset h/w before final flush */
if (phys_enc->hw_ctl->ops.clear_pending_flush)
phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
phys_enc->hw_ctl->ops.clear_pending_flush(phys_enc->hw_ctl);
/*
* New CTL reset sequence from 5.0 MDP onwards.
@ -640,6 +648,7 @@ static bool dpu_encoder_phys_wb_is_valid_for_commit(struct dpu_encoder_phys *phy
static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
{
ops->is_master = dpu_encoder_phys_wb_is_master;
ops->atomic_mode_set = dpu_encoder_phys_wb_atomic_mode_set;
ops->enable = dpu_encoder_phys_wb_enable;
ops->disable = dpu_encoder_phys_wb_disable;
ops->wait_for_commit_done = dpu_encoder_phys_wb_wait_for_commit_done;
@ -685,7 +694,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(struct drm_device *dev,
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
phys_enc->intf_mode = INTF_MODE_WB_LINE;
phys_enc->irq[INTR_IDX_WB_DONE] = phys_enc->hw_wb->caps->intr_wb_done;
atomic_set(&wb_enc->wbirq_refcount, 0);

View File

@ -220,12 +220,9 @@ static const u32 wb2_formats_rgb[] = {
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XBGR1555,
@ -254,12 +251,9 @@ static const u32 wb2_formats_rgb_yuv[] = {
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBX4444,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR1555,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_XBGR1555,
@ -688,6 +682,7 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_5_0_sm8150.h"
#include "catalog/dpu_5_1_sc8180x.h"
#include "catalog/dpu_5_2_sm7150.h"
#include "catalog/dpu_5_4_sm6125.h"
#include "catalog/dpu_6_0_sm8250.h"

View File

@ -838,6 +838,7 @@ extern const struct dpu_mdss_cfg dpu_sdm845_cfg;
extern const struct dpu_mdss_cfg dpu_sdm670_cfg;
extern const struct dpu_mdss_cfg dpu_sm8150_cfg;
extern const struct dpu_mdss_cfg dpu_sc8180x_cfg;
extern const struct dpu_mdss_cfg dpu_sm7150_cfg;
extern const struct dpu_mdss_cfg dpu_sm8250_cfg;
extern const struct dpu_mdss_cfg dpu_sc7180_cfg;
extern const struct dpu_mdss_cfg dpu_sm6115_cfg;

View File

@ -83,7 +83,8 @@ struct dpu_hw_ctl_ops {
/**
* Clear the value of the cached pending_flush_mask
* No effect on hardware
* No effect on hardware.
* Required to be implemented.
* @ctx : ctl path ctx pointer
*/
void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);

View File

@ -98,7 +98,8 @@
static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
const struct msm_format *fmt)
const struct msm_format *fmt,
const struct dpu_mdss_version *mdss_ver)
{
struct dpu_hw_blk_reg_map *c = &intf->hw;
u32 hsync_period, vsync_period;
@ -168,6 +169,20 @@ static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *intf,
data_width = p->width;
/*
* If widebus is enabled, data is valid for only half the active window
* since the data rate is doubled in this mode. But for the compression
* mode in DP case, the p->width is already adjusted in
* drm_mode_to_intf_timing_params()
*/
if (p->wide_bus_en && !dp_intf)
data_width = p->width >> 1;
/* TODO: handle DSC+DP case, we only handle DSC+DSI case so far */
if (p->compression_en && !dp_intf &&
mdss_ver->core_major_ver >= 7)
intf_cfg2 |= INTF_CFG2_DCE_DATA_COMPRESS;
hsync_data_start_x = hsync_start_x;
hsync_data_end_x = hsync_start_x + data_width - 1;
@ -462,7 +477,7 @@ static int dpu_hw_intf_get_vsync_info(struct dpu_hw_intf *intf,
}
static void dpu_hw_intf_vsync_sel(struct dpu_hw_intf *intf,
u32 vsync_source)
enum dpu_vsync_source vsync_source)
{
struct dpu_hw_blk_reg_map *c;

View File

@ -81,7 +81,8 @@ struct dpu_hw_intf_cmd_mode_cfg {
struct dpu_hw_intf_ops {
void (*setup_timing_gen)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_timing_params *p,
const struct msm_format *fmt);
const struct msm_format *fmt,
const struct dpu_mdss_version *mdss_ver);
void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
const struct dpu_hw_intf_prog_fetch *fetch);
@ -107,7 +108,7 @@ struct dpu_hw_intf_ops {
int (*connect_external_te)(struct dpu_hw_intf *intf, bool enable_external_te);
void (*vsync_sel)(struct dpu_hw_intf *intf, u32 vsync_source);
void (*vsync_sel)(struct dpu_hw_intf *intf, enum dpu_vsync_source vsync_source);
/**
* Disable autorefresh if enabled

View File

@ -54,18 +54,20 @@
#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
#define DPU_VSYNC0_SOURCE_GPIO 0
#define DPU_VSYNC1_SOURCE_GPIO 1
#define DPU_VSYNC2_SOURCE_GPIO 2
#define DPU_VSYNC_SOURCE_INTF_0 3
#define DPU_VSYNC_SOURCE_INTF_1 4
#define DPU_VSYNC_SOURCE_INTF_2 5
#define DPU_VSYNC_SOURCE_INTF_3 6
#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
enum dpu_vsync_source {
DPU_VSYNC_SOURCE_GPIO_0,
DPU_VSYNC_SOURCE_GPIO_1,
DPU_VSYNC_SOURCE_GPIO_2,
DPU_VSYNC_SOURCE_INTF_0 = 3,
DPU_VSYNC_SOURCE_INTF_1,
DPU_VSYNC_SOURCE_INTF_2,
DPU_VSYNC_SOURCE_INTF_3,
DPU_VSYNC_SOURCE_WD_TIMER_4 = 11,
DPU_VSYNC_SOURCE_WD_TIMER_3,
DPU_VSYNC_SOURCE_WD_TIMER_2,
DPU_VSYNC_SOURCE_WD_TIMER_1,
DPU_VSYNC_SOURCE_WD_TIMER_0,
};
enum dpu_hw_blk_type {
DPU_HW_BLK_TOP = 0,

View File

@ -107,8 +107,8 @@ static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
}
static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
static void dpu_hw_setup_wd_timer(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 reg, wd_load_value, wd_ctl, wd_ctl2;
@ -163,8 +163,8 @@ static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
}
}
static void dpu_hw_setup_vsync_source_and_vsync_sel(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
static void dpu_hw_setup_vsync_sel(struct dpu_hw_mdp *mdp,
struct dpu_vsync_source_cfg *cfg)
{
struct dpu_hw_blk_reg_map *c;
u32 reg, i;
@ -187,7 +187,7 @@ static void dpu_hw_setup_vsync_source_and_vsync_sel(struct dpu_hw_mdp *mdp,
}
DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
dpu_hw_setup_vsync_source(mdp, cfg);
dpu_hw_setup_wd_timer(mdp, cfg);
}
static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
@ -239,9 +239,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_danger_status = dpu_hw_get_danger_status;
if (cap & BIT(DPU_MDP_VSYNC_SEL))
ops->setup_vsync_source = dpu_hw_setup_vsync_source_and_vsync_sel;
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
else
ops->setup_vsync_source = dpu_hw_setup_vsync_source;
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;

View File

@ -64,7 +64,7 @@ struct dpu_vsync_source_cfg {
u32 pp_count;
u32 frame_rate;
u32 ppnumber[PINGPONG_MAX];
u32 vsync_source;
enum dpu_vsync_source vsync_source;
};
/**

View File

@ -505,6 +505,44 @@ static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
dpu_kms_wait_for_commit_done(kms, crtc);
}
static const char *dpu_vsync_sources[] = {
[DPU_VSYNC_SOURCE_GPIO_0] = "mdp_vsync_p",
[DPU_VSYNC_SOURCE_GPIO_1] = "mdp_vsync_s",
[DPU_VSYNC_SOURCE_GPIO_2] = "mdp_vsync_e",
[DPU_VSYNC_SOURCE_INTF_0] = "mdp_intf0",
[DPU_VSYNC_SOURCE_INTF_1] = "mdp_intf1",
[DPU_VSYNC_SOURCE_INTF_2] = "mdp_intf2",
[DPU_VSYNC_SOURCE_INTF_3] = "mdp_intf3",
[DPU_VSYNC_SOURCE_WD_TIMER_0] = "timer0",
[DPU_VSYNC_SOURCE_WD_TIMER_1] = "timer1",
[DPU_VSYNC_SOURCE_WD_TIMER_2] = "timer2",
[DPU_VSYNC_SOURCE_WD_TIMER_3] = "timer3",
[DPU_VSYNC_SOURCE_WD_TIMER_4] = "timer4",
};
static int dpu_kms_dsi_set_te_source(struct msm_display_info *info,
struct msm_dsi *dsi)
{
const char *te_source = msm_dsi_get_te_source(dsi);
int i;
if (!te_source) {
info->vsync_source = DPU_VSYNC_SOURCE_GPIO_0;
return 0;
}
/* we can not use match_string since dpu_vsync_sources is a sparse array */
for (i = 0; i < ARRAY_SIZE(dpu_vsync_sources); i++) {
if (dpu_vsync_sources[i] &&
!strcmp(dpu_vsync_sources[i], te_source)) {
info->vsync_source = i;
return 0;
}
}
return -EINVAL;
}
static int _dpu_kms_initialize_dsi(struct drm_device *dev,
struct msm_drm_private *priv,
struct dpu_kms *dpu_kms)
@ -543,6 +581,12 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
info.is_cmd_mode = msm_dsi_is_cmd_mode(priv->dsi[i]);
rc = dpu_kms_dsi_set_te_source(&info, priv->dsi[i]);
if (rc) {
DPU_ERROR("failed to identify TE source for dsi display\n");
return rc;
}
encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI, &info);
if (IS_ERR(encoder)) {
DPU_ERROR("encoder init failed for dsi display\n");
@ -1405,6 +1449,7 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sm6125-dpu", .data = &dpu_sm6125_cfg, },
{ .compatible = "qcom,sm6350-dpu", .data = &dpu_sm6350_cfg, },
{ .compatible = "qcom,sm6375-dpu", .data = &dpu_sm6375_cfg, },
{ .compatible = "qcom,sm7150-dpu", .data = &dpu_sm7150_cfg, },
{ .compatible = "qcom,sm8150-dpu", .data = &dpu_sm8150_cfg, },
{ .compatible = "qcom,sm8250-dpu", .data = &dpu_sm8250_cfg, },
{ .compatible = "qcom,sm8350-dpu", .data = &dpu_sm8350_cfg, },

View File

@ -18,6 +18,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include "msm_drv.h"
#include "msm_mdss.h"
#include "dpu_kms.h"
#include "dpu_formats.h"
#include "dpu_hw_sspp.h"
@ -1342,10 +1343,14 @@ void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
static bool dpu_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format, uint64_t modifier)
{
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
bool has_no_ubwc = (dpu_kms->mdss->ubwc_enc_version == 0) &&
(dpu_kms->mdss->ubwc_dec_version == 0);
if (modifier == DRM_FORMAT_MOD_LINEAR)
return true;
if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED)
if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED && !has_no_ubwc)
return dpu_find_format(format, qcom_compressed_supported_formats,
ARRAY_SIZE(qcom_compressed_supported_formats));

View File

@ -354,10 +354,6 @@ DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
TP_PROTO(uint32_t drm_id, bool enable),
TP_ARGS(drm_id, enable)
);
DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
TP_PROTO(uint32_t drm_id, bool enable),
TP_ARGS(drm_id, enable)
);
DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
TP_PROTO(uint32_t drm_id, bool enable),
TP_ARGS(drm_id, enable)

View File

@ -837,8 +837,7 @@ static const struct mdp5_cfg_hw msm8x53_config = {
.name = "msm8x53",
.mdp = {
.count = 1,
.caps = MDP_CAP_CDM |
MDP_CAP_SRC_SPLIT,
.caps = MDP_CAP_CDM,
},
.ctl = {
.count = 3,
@ -1011,6 +1010,93 @@ static const struct mdp5_cfg_hw msm8917_config = {
.max_clk = 320000000,
};
static const struct mdp5_cfg_hw msm8937_config = {
.name = "msm8937",
.mdp = {
.count = 1,
.caps = MDP_CAP_CDM,
},
.ctl = {
.count = 3,
.base = { 0x01000, 0x01200, 0x01400 },
.flush_hw_mask = 0xffffffff,
},
.pipe_vig = {
.count = 1,
.base = { 0x04000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE |
MDP_PIPE_CAP_CSC |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_rgb = {
.count = 2,
.base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_DECIMATION |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_dma = {
.count = 1,
.base = { 0x24000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
0,
},
.pipe_cursor = {
.count = 1,
.base = { 0x34000 },
.caps = MDP_PIPE_CAP_HFLIP |
MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SW_PIX_EXT |
MDP_PIPE_CAP_CURSOR |
0,
},
.lm = {
.count = 2,
.base = { 0x44000, 0x45000 },
.instances = {
{ .id = 0, .pp = 0, .dspp = 0,
.caps = MDP_LM_CAP_DISPLAY |
MDP_LM_CAP_PAIR },
{ .id = 1, .pp = 1, .dspp = -1,
.caps = MDP_LM_CAP_DISPLAY },
},
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
},
.dspp = {
.count = 1,
.base = { 0x54000 },
},
.pp = {
.count = 2,
.base = { 0x70000, 0x70800 },
},
.cdm = {
.count = 1,
.base = { 0x79200 },
},
.intf = {
.base = { 0x00000, 0x6a800, 0x6b000 },
.connect = {
[0] = INTF_DISABLED,
[1] = INTF_DSI,
[2] = INTF_DSI,
},
},
.max_clk = 320000000,
};
static const struct mdp5_cfg_hw msm8998_config = {
.name = "msm8998",
.mdp = {
@ -1325,6 +1411,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 14, .config = { .hw = &msm8937_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
{ .revision = 16, .config = { .hw = &msm8x53_config } },
};

View File

@ -513,7 +513,10 @@ static int dp_wait_hpd_asserted(struct drm_dp_aux *dp_aux,
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
pm_runtime_get_sync(aux->dev);
ret = pm_runtime_resume_and_get(aux->dev);
if (ret)
return ret;
ret = dp_catalog_aux_wait_for_hpd_connect_state(aux->catalog, wait_us);
pm_runtime_put_sync(aux->dev);

View File

@ -360,26 +360,25 @@ static int dp_display_send_hpd_notification(struct dp_display_private *dp,
static int dp_display_process_hpd_high(struct dp_display_private *dp)
{
struct drm_connector *connector = dp->dp_display.connector;
const struct drm_display_info *info = &connector->display_info;
int rc = 0;
struct edid *edid;
rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
rc = dp_panel_read_sink_caps(dp->panel, connector);
if (rc)
goto end;
dp_link_process_request(dp->link);
if (!dp->dp_display.is_edp)
drm_dp_set_subconnector_property(dp->dp_display.connector,
drm_dp_set_subconnector_property(connector,
connector_status_connected,
dp->panel->dpcd,
dp->panel->downstream_ports);
edid = dp->panel->edid;
dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
dp->audio_supported = drm_detect_monitor_audio(edid);
dp->audio_supported = info->has_audio;
dp_panel_handle_sink_request(dp->panel);
/*

View File

@ -108,28 +108,6 @@ static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
return bpp;
}
static int dp_panel_update_modes(struct drm_connector *connector,
struct edid *edid)
{
int rc = 0;
if (edid) {
rc = drm_connector_update_edid_property(connector, edid);
if (rc) {
DRM_ERROR("failed to update edid property %d\n", rc);
return rc;
}
rc = drm_add_edid_modes(connector, edid);
return rc;
}
rc = drm_connector_update_edid_property(connector, NULL);
if (rc)
DRM_ERROR("failed to update edid property %d\n", rc);
return rc;
}
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@ -175,12 +153,13 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
if (rc)
return rc;
kfree(dp_panel->edid);
dp_panel->edid = NULL;
drm_edid_free(dp_panel->drm_edid);
dp_panel->edid = drm_get_edid(connector,
&panel->aux->ddc);
if (!dp_panel->edid) {
dp_panel->drm_edid = drm_edid_read_ddc(connector, &panel->aux->ddc);
drm_edid_connector_update(connector, dp_panel->drm_edid);
if (!dp_panel->drm_edid) {
DRM_ERROR("panel edid read failed\n");
/* check edid read fail is due to unplug */
if (!dp_catalog_link_is_connected(panel->catalog)) {
@ -224,13 +203,13 @@ int dp_panel_get_modes(struct dp_panel *dp_panel,
return -EINVAL;
}
if (dp_panel->edid)
return dp_panel_update_modes(connector, dp_panel->edid);
if (dp_panel->drm_edid)
return drm_edid_connector_add_modes(connector);
return 0;
}
static u8 dp_panel_get_edid_checksum(struct edid *edid)
static u8 dp_panel_get_edid_checksum(const struct edid *edid)
{
edid += edid->extensions;
@ -249,10 +228,12 @@ void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
/* FIXME: get rid of drm_edid_raw() */
const struct edid *edid = drm_edid_raw(dp_panel->drm_edid);
u8 checksum;
if (dp_panel->edid)
checksum = dp_panel_get_edid_checksum(dp_panel->edid);
if (edid)
checksum = dp_panel_get_edid_checksum(edid);
else
checksum = dp_panel->connector->real_edid_checksum;
@ -539,5 +520,5 @@ void dp_panel_put(struct dp_panel *dp_panel)
if (!dp_panel)
return;
kfree(dp_panel->edid);
drm_edid_free(dp_panel->drm_edid);
}

View File

@ -39,7 +39,7 @@ struct dp_panel {
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct dp_link_info link_info;
struct edid *edid;
const struct drm_edid *drm_edid;
struct drm_connector *connector;
struct dp_display_mode dp_mode;
struct dp_panel_psr psr_cap;

View File

@ -37,6 +37,7 @@ struct msm_dsi {
struct mipi_dsi_host *host;
struct msm_dsi_phy *phy;
const char *te_source;
struct drm_bridge *next_bridge;

View File

@ -7,7 +7,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
@ -130,9 +129,6 @@ struct msm_dsi_host {
unsigned long src_clk_rate;
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
const struct msm_dsi_cfg_handler *cfg_hnd;
struct completion dma_comp;
@ -754,6 +750,8 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
data |= DSI_VID_CFG0_TRAFFIC_MODE(dsi_get_traffic_mode(flags));
data |= DSI_VID_CFG0_DST_FORMAT(dsi_get_vid_fmt(mipi_fmt));
data |= DSI_VID_CFG0_VIRT_CHANNEL(msm_host->channel);
if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base))
data |= DSI_VID_CFG0_DATABUS_WIDEN;
dsi_write(msm_host, REG_DSI_VID_CFG0, data);
/* Do not swap RGB colors */
@ -778,7 +776,6 @@ static void dsi_ctrl_enable(struct msm_dsi_host *msm_host,
if (cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V1_3)
data |= DSI_CMD_MODE_MDP_CTRL2_BURST_MODE;
/* TODO: Allow for video-mode support once tested/fixed */
if (msm_dsi_host_is_wide_bus_enabled(&msm_host->base))
data |= DSI_CMD_MODE_MDP_CTRL2_DATABUS_WIDEN;
@ -856,6 +853,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
u32 slice_per_intf, total_bytes_per_intf;
u32 pkt_per_line;
u32 eol_byte_num;
u32 bytes_per_pkt;
/* first calculate dsc parameters and then program
* compress mode registers
@ -863,6 +861,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
slice_per_intf = msm_dsc_get_slices_per_intf(dsc, hdisplay);
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
bytes_per_pkt = dsc->slice_chunk_size; /* * slice_per_pkt; */
eol_byte_num = total_bytes_per_intf % 3;
@ -882,7 +881,11 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
/* DSI_VIDEO_COMPRESSION_MODE & DSI_COMMAND_COMPRESSION_MODE
* registers have similar offsets, so for below common code use
* DSI_VIDEO_COMPRESSION_MODE_XXXX for setting bits
*
* pkt_per_line is log2 encoded, >>1 works for supported values (1,2,4)
*/
if (pkt_per_line > 4)
drm_warn_once(msm_host->dev, "pkt_per_line too big");
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_PKT_PER_LINE(pkt_per_line >> 1);
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EOL_BYTE_NUM(eol_byte_num);
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_EN;
@ -900,6 +903,7 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL, reg_ctrl);
dsi_write(msm_host, REG_DSI_COMMAND_COMPRESSION_MODE_CTRL2, reg_ctrl2);
} else {
reg |= DSI_VIDEO_COMPRESSION_MODE_CTRL_WC(bytes_per_pkt);
dsi_write(msm_host, REG_DSI_VIDEO_COMPRESSION_MODE_CTRL, reg);
}
}
@ -1613,28 +1617,6 @@ static irqreturn_t dsi_host_irq(int irq, void *ptr)
return IRQ_HANDLED;
}
static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
struct device *panel_device)
{
msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
"disp-enable",
GPIOD_OUT_LOW);
if (IS_ERR(msm_host->disp_en_gpio)) {
DBG("cannot get disp-enable-gpios %ld",
PTR_ERR(msm_host->disp_en_gpio));
return PTR_ERR(msm_host->disp_en_gpio);
}
msm_host->te_gpio = devm_gpiod_get_optional(panel_device, "disp-te",
GPIOD_IN);
if (IS_ERR(msm_host->te_gpio)) {
DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
return PTR_ERR(msm_host->te_gpio);
}
return 0;
}
static int dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *dsi)
{
@ -1651,11 +1633,6 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
if (dsi->dsc)
msm_host->dsc = dsi->dsc;
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
return ret;
ret = dsi_dev_attach(msm_host->pdev);
if (ret)
return ret;
@ -1817,9 +1794,11 @@ static int dsi_populate_dsc_params(struct msm_dsi_host *msm_host, struct drm_dsc
static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct msm_dsi *msm_dsi = platform_get_drvdata(msm_host->pdev);
struct device *dev = &msm_host->pdev->dev;
struct device_node *np = dev->of_node;
struct device_node *endpoint;
const char *te_source;
int ret = 0;
/*
@ -1842,6 +1821,16 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
goto err;
}
ret = of_property_read_string(endpoint, "qcom,te-source", &te_source);
if (ret && ret != -EINVAL) {
DRM_DEV_ERROR(dev, "%s: invalid TE source configuration %d\n",
__func__, ret);
goto err;
}
if (!ret)
msm_dsi->te_source = devm_kstrdup(dev, te_source, GFP_KERNEL);
ret = 0;
if (of_property_read_bool(np, "syscon-sfpb")) {
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
@ -2422,9 +2411,6 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
dsi_sw_reset(msm_host);
dsi_ctrl_enable(msm_host, phy_shared_timings, phy);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 1);
msm_host->power_on = true;
mutex_unlock(&msm_host->dev_mutex);
@ -2454,9 +2440,6 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
dsi_ctrl_disable(msm_host);
if (msm_host->disp_en_gpio)
gpiod_set_value(msm_host->disp_en_gpio, 0);
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
cfg_hnd->ops->link_clk_disable(msm_host);

View File

@ -603,3 +603,8 @@ bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi)
{
return IS_MASTER_DSI_LINK(msm_dsi->id);
}
const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
{
return msm_dsi->te_source;
}

View File

@ -545,6 +545,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_28nm_lp_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-8226",
.data = &dsi_phy_28nm_8226_cfgs },
{ .compatible = "qcom,dsi-phy-28nm-8937",
.data = &dsi_phy_28nm_8937_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
{ .compatible = "qcom,dsi-phy-20nm",

View File

@ -12,11 +12,6 @@
#include "dsi.h"
#define dsi_phy_read(offset) readl((offset))
#define dsi_phy_write(offset, data) writel((data), (offset))
#define dsi_phy_write_udelay(offset, data, delay_us) { writel((data), (offset)); udelay(delay_us); }
#define dsi_phy_write_ndelay(offset, data, delay_ns) { writel((data), (offset)); ndelay(delay_ns); }
struct msm_dsi_phy_ops {
int (*pll_init)(struct msm_dsi_phy *phy);
int (*enable)(struct msm_dsi_phy *phy,
@ -47,6 +42,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_famb_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;

View File

@ -187,20 +187,20 @@ static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *
if (config->enable_ssc) {
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
writel(config->ssc_stepsize & 0xff,
base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1);
writel(config->ssc_stepsize >> 8,
base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1);
writel(config->ssc_div_per & 0xff,
base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1);
writel(config->ssc_div_per >> 8,
base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1);
writel(config->ssc_adj_per & 0xff,
base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1);
writel(config->ssc_adj_per >> 8,
base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1);
writel(SSC_EN | (config->ssc_center ? SSC_CENTER : 0),
base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL);
}
}
@ -208,49 +208,43 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
{
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
0xba);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE,
0x0c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO,
0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1,
0xfa);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
0x4c);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
writel(0x80, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE);
writel(0x03, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO);
writel(0x00, base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE);
writel(0x00, base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER);
writel(0x4e, base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER);
writel(0x40, base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS);
writel(0xba, base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE);
writel(0x0c, base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE);
writel(0x00, base + REG_DSI_10nm_PHY_PLL_OUTDIV);
writel(0x00, base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE);
writel(0x08, base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO);
writel(0x08, base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1);
writel(0xc0, base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1);
writel(0xfa, base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1);
writel(0x4c, base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1);
writel(0x80, base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE);
writel(0x29, base + REG_DSI_10nm_PHY_PLL_PFILT);
writel(0x3f, base + REG_DSI_10nm_PHY_PLL_IFILT);
}
static void dsi_pll_commit(struct dsi_pll_10nm *pll, struct dsi_pll_config *config)
{
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
config->decimal_div_start);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
(config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 64);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
dsi_phy_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
config->pll_clock_inverters);
writel(0x12, base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE);
writel(config->decimal_div_start,
base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
writel(config->frac_div_start & 0xff,
base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
writel((config->frac_div_start & 0xff00) >> 8,
base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1);
writel((config->frac_div_start & 0x30000) >> 16,
base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1);
writel(64, base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1);
writel(0x06, base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY);
writel(0x10, base + REG_DSI_10nm_PHY_PLL_CMODE);
writel(config->pll_clock_inverters, base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS);
}
static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@ -305,21 +299,19 @@ static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
u32 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
data & ~BIT(5));
writel(0, pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES);
writel(data & ~BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
u32 data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0,
data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
writel(data | BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CTRL_0);
writel(0xc0, pll->phy->pll_base + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES);
ndelay(250);
}
@ -327,18 +319,16 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
{
u32 data;
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
data & ~BIT(5));
data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
writel(data & ~BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
}
static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
{
u32 data;
data = dsi_phy_read(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
data | BIT(5));
data = readl(pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
writel(data | BIT(5), pll->phy->base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
}
static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
@ -358,8 +348,7 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
}
/* Start PLL */
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
0x01);
writel(0x01, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
/*
* ensure all PLL configurations are written prior to checking
@ -380,11 +369,9 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
if (pll_10nm->slave)
dsi_pll_enable_global_clk(pll_10nm->slave);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
0x01);
writel(0x01, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL);
if (pll_10nm->slave)
dsi_phy_write(pll_10nm->slave->phy->base +
REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
writel(0x01, pll_10nm->slave->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL);
error:
return rc;
@ -392,7 +379,7 @@ error:
static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
{
dsi_phy_write(pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
writel(0, pll->phy->base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL);
dsi_pll_disable_pll_bias(pll);
}
@ -406,7 +393,7 @@ static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
* powering down the PLL
*/
dsi_pll_disable_global_clk(pll_10nm);
dsi_phy_write(pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
writel(0, pll_10nm->phy->base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
dsi_pll_disable_sub(pll_10nm);
if (pll_10nm->slave) {
dsi_pll_disable_global_clk(pll_10nm->slave);
@ -429,13 +416,13 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
u32 dec;
u64 pll_freq, tmp64;
dec = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
dec = readl(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
frac = dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
frac = readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
0xff) << 8);
frac |= ((dsi_phy_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
frac |= ((readl(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
0x3) << 16);
/*
@ -488,15 +475,15 @@ static void dsi_10nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *phy_base = pll_10nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_10nm->phy->pll_base +
cached->pll_out_div = readl(pll_10nm->phy->pll_base +
REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
cmn_clk_cfg0 = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
cmn_clk_cfg1 = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = cmn_clk_cfg1 & 0x3;
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
@ -512,18 +499,18 @@ static int dsi_10nm_pll_restore_state(struct msm_dsi_phy *phy)
u32 val;
int ret;
val = dsi_phy_read(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
val = readl(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
val |= cached->pll_out_div;
dsi_phy_write(pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
writel(val, pll_10nm->phy->pll_base + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
val = dsi_phy_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val = readl(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
val |= cached->pll_mux;
dsi_phy_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
writel(val, phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
ret = dsi_pll_10nm_vco_set_rate(phy->vco_hw,
pll_10nm->vco_current_rate,
@ -561,7 +548,7 @@ static int dsi_10nm_set_usecase(struct msm_dsi_phy *phy)
}
/* set PLL src */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
writel(data << 2, base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
return 0;
}
@ -724,7 +711,7 @@ static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
void __iomem *base = phy->base;
u32 data = 0;
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
data = readl(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
mb(); /* make sure read happened */
return (data & BIT(0));
@ -740,11 +727,9 @@ static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
* corresponding to the logical data lane 0
*/
if (enable)
dsi_phy_write(lane_base +
REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
writel(0x3, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0));
else
dsi_phy_write(lane_base +
REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
writel(0, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0));
}
static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
@ -759,43 +744,40 @@ static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
/* Strength ctrl settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
0x55);
writel(0x55, lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i));
/*
* Disable LPRX and CDRX for all lanes. And later on, it will
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
0x88);
writel(0, lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i));
writel(0x0, lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i));
writel(0x88, lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i));
}
dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
/* other settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
i == 4 ? 0x80 : 0x0);
writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG0(i));
writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG1(i));
writel(0, lane_base + REG_DSI_10nm_PHY_LN_CFG2(i));
writel(i == 4 ? 0x80 : 0x0, lane_base + REG_DSI_10nm_PHY_LN_CFG3(i));
/* platform specific dsi phy drive strength adjustment */
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i),
tuning_cfg->rescode_offset_top[i]);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i),
tuning_cfg->rescode_offset_bot[i]);
writel(tuning_cfg->rescode_offset_top[i],
lane_base + REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i));
writel(tuning_cfg->rescode_offset_bot[i],
lane_base + REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i));
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
tx_dctrl[i]);
writel(tx_dctrl[i],
lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i));
}
if (!(phy->cfg->quirks & DSI_PHY_10NM_QUIRK_OLD_TIMINGS)) {
/* Toggle BIT 0 to release freeze I/0 */
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
writel(0x05, lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3));
writel(0x04, lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3));
}
}
@ -833,64 +815,51 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy,
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* Assert PLL core reset */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
writel(0x00, base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
/* turn off resync FIFO */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
writel(0x00, base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL);
/* Select MS1 byte-clk */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
writel(0x10, base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL);
/* Enable LDO with platform specific drive level/amplitude adjustment */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL,
tuning_cfg->vreg_ctrl);
writel(tuning_cfg->vreg_ctrl, base + REG_DSI_10nm_PHY_CMN_VREG_CTRL);
/* Configure PHY lane swap (TODO: we need to calculate this) */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
writel(0x21, base + REG_DSI_10nm_PHY_CMN_LANE_CFG0);
writel(0x84, base + REG_DSI_10nm_PHY_CMN_LANE_CFG1);
/* DSI PHY timings */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
timing->hs_halfbyte_en);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
timing->clk_zero);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
timing->clk_prepare);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
timing->clk_trail);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
timing->hs_exit);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
timing->hs_zero);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
timing->hs_prepare);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
timing->hs_trail);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
timing->hs_rqst);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
timing->ta_go | (timing->ta_sure << 3));
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
timing->ta_get);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
0x00);
writel(timing->hs_halfbyte_en, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0);
writel(timing->clk_zero, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1);
writel(timing->clk_prepare, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2);
writel(timing->clk_trail, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3);
writel(timing->hs_exit, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4);
writel(timing->hs_zero, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5);
writel(timing->hs_prepare, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6);
writel(timing->hs_trail, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7);
writel(timing->hs_rqst, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8);
writel(timing->ta_go | (timing->ta_sure << 3), base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9);
writel(timing->ta_get, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10);
writel(0x00, base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11);
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
writel(0x7f, base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* power up lanes */
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
data = readl(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* TODO: only power up lanes that are used */
data |= 0x1F;
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0);
writel(0x1F, base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0);
/* Select full-rate mode */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
writel(0x40, base + REG_DSI_10nm_PHY_CMN_CTRL_2);
ret = dsi_10nm_set_usecase(phy);
if (ret) {
@ -918,15 +887,15 @@ static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
pr_warn("Turning OFF PHY while PLL is on\n");
dsi_phy_hw_v3_0_config_lpcdrx(phy, false);
data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
data = readl(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* disable all lanes */
data &= ~0x1F;
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0);
writel(data, base + REG_DSI_10nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0);
/* Turn off all PHY blocks */
dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x00);
writel(0x00, base + REG_DSI_10nm_PHY_CMN_CTRL_0);
/* make sure phy is turned off */
wmb();

View File

@ -116,7 +116,7 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
tries = nb_tries;
while (tries--) {
val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
val = readl(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_locked = !!(val & BIT(5));
if (pll_locked)
@ -130,7 +130,7 @@ static bool pll_14nm_poll_for_ready(struct dsi_pll_14nm *pll_14nm,
tries = nb_tries;
while (tries--) {
val = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
val = readl(base + REG_DSI_14nm_PHY_PLL_RESET_SM_READY_STATUS);
pll_ready = !!(val & BIT(0));
if (pll_ready)
@ -288,29 +288,29 @@ static void pll_db_commit_ssc(struct dsi_pll_14nm *pll, struct dsi_pll_config *p
data = pconf->ssc_adj_period;
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER1);
data = (pconf->ssc_adj_period >> 8);
data &= 0x03;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_ADJ_PER2);
data = pconf->ssc_period;
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_PER1);
data = (pconf->ssc_period >> 8);
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_PER2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_PER2);
data = pconf->ssc_step_size;
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE1);
data = (pconf->ssc_step_size >> 8);
data &= 0x0ff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_STEP_SIZE2);
data = (pconf->ssc_center & 0x01);
data <<= 1;
data |= 0x01; /* enable */
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SSC_EN_CENTER);
wmb(); /* make sure register committed */
}
@ -323,43 +323,45 @@ static void pll_db_commit_common(struct dsi_pll_14nm *pll,
/* confgiure the non frequency dependent pll registers */
data = 0;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_SYSCLK_EN_RESET);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_TXCLK_EN, 1);
writel(1, base + REG_DSI_14nm_PHY_PLL_TXCLK_EN);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL, 48);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2, 4 << 3); /* bandgap_timer */
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5, 5); /* pll_wakeup_timer */
writel(48, base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL);
/* bandgap_timer */
writel(4 << 3, base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL2);
/* pll_wakeup_timer */
writel(5, base + REG_DSI_14nm_PHY_PLL_RESETSM_CNTRL5);
data = pconf->pll_vco_div_ref & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF1);
data = (pconf->pll_vco_div_ref >> 8) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_DIV_REF2);
data = pconf->pll_kvco_div_ref & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF1);
data = (pconf->pll_kvco_div_ref >> 8) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_DIV_REF2);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_MISC1, 16);
writel(16, base + REG_DSI_14nm_PHY_PLL_PLL_MISC1);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IE_TRIM, 4);
writel(4, base + REG_DSI_14nm_PHY_PLL_IE_TRIM);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IP_TRIM, 4);
writel(4, base + REG_DSI_14nm_PHY_PLL_IP_TRIM);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR, 1 << 3 | 1);
writel(1 << 3 | 1, base + REG_DSI_14nm_PHY_PLL_CP_SET_CUR);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET, 0 << 3 | 0);
writel(0 << 3 | 0, base + REG_DSI_14nm_PHY_PLL_PLL_ICPCSET);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET, 0 << 3 | 0);
writel(0 << 3 | 0, base + REG_DSI_14nm_PHY_PLL_PLL_ICPMSET);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET, 4 << 3 | 4);
writel(4 << 3 | 4, base + REG_DSI_14nm_PHY_PLL_PLL_ICP_SET);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF1, 1 << 4 | 11);
writel(1 << 4 | 11, base + REG_DSI_14nm_PHY_PLL_PLL_LPF1);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM, 7);
writel(7, base + REG_DSI_14nm_PHY_PLL_IPTAT_TRIM);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL, 1 << 4 | 2);
writel(1 << 4 | 2, base + REG_DSI_14nm_PHY_PLL_PLL_CRCTRL);
}
static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
@ -369,13 +371,14 @@ static void pll_14nm_software_reset(struct dsi_pll_14nm *pll_14nm)
/* de assert pll start and apply pll sw reset */
/* stop pll */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
writel(0, cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL);
/* pll sw reset */
dsi_phy_write_udelay(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x20, 10);
writel(0x20, cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1);
udelay(10);
wmb(); /* make sure register committed */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0);
writel(0, cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_1);
wmb(); /* make sure register committed */
}
@ -388,50 +391,50 @@ static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
DBG("DSI%d PLL", pll->phy->id);
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, 0x3c);
writel(0x3c, cmn_base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL);
pll_db_commit_common(pll, pconf);
pll_14nm_software_reset(pll);
/* Use the /2 path in Mux */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1, 1);
writel(1, cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG1);
data = 0xff; /* data, clk, pll normal operation */
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0, data);
writel(data, cmn_base + REG_DSI_14nm_PHY_CMN_CTRL_0);
/* configure the frequency dependent pll registers */
data = pconf->dec_start;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DEC_START, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_DEC_START);
data = pconf->div_frac_start & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1);
data = (pconf->div_frac_start >> 8) & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2);
data = (pconf->div_frac_start >> 16) & 0xf;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3);
data = pconf->plllock_cmp & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP1);
data = (pconf->plllock_cmp >> 8) & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP2);
data = (pconf->plllock_cmp >> 16) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP3);
data = pconf->plllock_cnt << 1 | 0 << 3; /* plllock_rng */
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_PLLLOCK_CMP_EN);
data = pconf->pll_vco_count & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_COUNT1);
data = (pconf->pll_vco_count >> 8) & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_VCO_COUNT2);
data = pconf->pll_kvco_count & 0xff;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT1);
data = (pconf->pll_kvco_count >> 8) & 0x3;
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2, data);
writel(data, base + REG_DSI_14nm_PHY_PLL_KVCO_COUNT2);
/*
* High nibble configures the post divider internal to the VCO. It's
@ -442,7 +445,7 @@ static void pll_db_commit_14nm(struct dsi_pll_14nm *pll,
* 2: divided by 4
* 3: divided by 8
*/
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV, 0 << 4 | 3);
writel(0 << 4 | 3, base + REG_DSI_14nm_PHY_PLL_PLL_LPF2_POSTDIV);
if (pconf->ssc_en)
pll_db_commit_ssc(pll, pconf);
@ -497,16 +500,16 @@ static unsigned long dsi_pll_14nm_vco_recalc_rate(struct clk_hw *hw,
u32 dec_start;
u64 ref_clk = parent_rate;
dec_start = dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DEC_START);
dec_start = readl(base + REG_DSI_14nm_PHY_PLL_DEC_START);
dec_start &= 0x0ff;
DBG("dec_start = %x", dec_start);
div_frac_start = (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
div_frac_start = (readl(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START3)
& 0xf) << 16;
div_frac_start |= (dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
div_frac_start |= (readl(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START2)
& 0xff) << 8;
div_frac_start |= dsi_phy_read(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
div_frac_start |= readl(base + REG_DSI_14nm_PHY_PLL_DIV_FRAC_START1)
& 0xff;
DBG("div_frac_start = %x", div_frac_start);
@ -542,8 +545,8 @@ static int dsi_pll_14nm_vco_prepare(struct clk_hw *hw)
if (dsi_pll_14nm_vco_recalc_rate(hw, VCO_REF_CLK_RATE) == 0)
dsi_pll_14nm_vco_set_rate(hw, pll_14nm->phy->cfg->min_pll_rate, VCO_REF_CLK_RATE);
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_VREF_CFG1, 0x10);
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 1);
writel(0x10, base + REG_DSI_14nm_PHY_PLL_VREF_CFG1);
writel(1, cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL);
locked = pll_14nm_poll_for_ready(pll_14nm, POLL_MAX_READS,
POLL_TIMEOUT_US);
@ -569,7 +572,7 @@ static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
if (unlikely(!pll_14nm->phy->pll_on))
return;
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0);
writel(0, cmn_base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL);
pll_14nm->phy->pll_on = false;
}
@ -611,7 +614,7 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, parent_rate);
val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
val = readl(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0) >> shift;
val &= div_mask(width);
return divider_recalc_rate(hw, parent_rate, val, NULL,
@ -653,11 +656,11 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(lock, flags);
val = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
val = readl(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
val &= ~(div_mask(width) << shift);
val |= value << shift;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
writel(val, base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
/* If we're master in bonded DSI mode, then the slave PLL's post-dividers
* follow the master's post dividers
@ -666,7 +669,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
void __iomem *slave_base = pll_14nm_slave->phy->base;
dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, val);
writel(val, slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
}
spin_unlock_irqrestore(lock, flags);
@ -691,7 +694,7 @@ static void dsi_14nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *cmn_base = pll_14nm->phy->base;
u32 data;
data = dsi_phy_read(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
data = readl(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
cached_state->n1postdiv = data & 0xf;
cached_state->n2postdiv = (data >> 4) & 0xf;
@ -723,14 +726,14 @@ static int dsi_14nm_pll_restore_state(struct msm_dsi_phy *phy)
DBG("DSI%d PLL restore state %x %x", pll_14nm->phy->id,
cached_state->n1postdiv, cached_state->n2postdiv);
dsi_phy_write(cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
writel(data, cmn_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
/* also restore post-dividers for slave DSI PLL */
if (phy->usecase == MSM_DSI_PHY_MASTER) {
struct dsi_pll_14nm *pll_14nm_slave = pll_14nm->slave;
void __iomem *slave_base = pll_14nm_slave->phy->base;
dsi_phy_write(slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0, data);
writel(data, slave_base + REG_DSI_14nm_PHY_CMN_CLK_CFG0);
}
return 0;
@ -758,9 +761,9 @@ static int dsi_14nm_set_usecase(struct msm_dsi_phy *phy)
return -EINVAL;
}
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN, clkbuflr_en);
writel(clkbuflr_en, base + REG_DSI_14nm_PHY_PLL_CLKBUFLR_EN);
if (bandgap)
dsi_phy_write(base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP, bandgap);
writel(bandgap, base + REG_DSI_14nm_PHY_PLL_PLL_BANDGAP);
return 0;
}
@ -917,27 +920,27 @@ static void dsi_14nm_dphy_set_timing(struct msm_dsi_phy *phy,
u32 halfbyte_en = clk_ln ? timing->hs_halfbyte_en_ckln :
timing->hs_halfbyte_en;
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx),
DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx),
halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0);
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx),
DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_4(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_5_HS_ZERO(zero),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_5(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_6_HS_PREPARE(prepare),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_6(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_7_HS_TRAIL(trail),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_7(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_8_HS_RQST(rqst),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_8(lane_idx));
writel(DSI_14nm_PHY_LN_CFG0_PREPARE_DLY(prep_dly),
base + REG_DSI_14nm_PHY_LN_CFG0(lane_idx));
writel(halfbyte_en ? DSI_14nm_PHY_LN_CFG1_HALFBYTECLK_EN : 0,
base + REG_DSI_14nm_PHY_LN_CFG1(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_14nm_PHY_LN_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_9(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_10_TA_GET(timing->ta_get),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_10(lane_idx));
writel(DSI_14nm_PHY_LN_TIMING_CTRL_11_TRIG3_CMD(0xa0),
base + REG_DSI_14nm_PHY_LN_TIMING_CTRL_11(lane_idx));
}
static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
@ -961,49 +964,44 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
data = 0x1c;
if (phy->usecase != MSM_DSI_PHY_STANDALONE)
data |= DSI_14nm_PHY_CMN_LDO_CNTRL_VREG_CTRL(32);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL, data);
writel(data, base + REG_DSI_14nm_PHY_CMN_LDO_CNTRL);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0x1);
writel(0x1, base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
/* 4 data lanes + 1 clk lane configuration */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i),
0x1d);
writel(0x1d, lane_base + REG_DSI_14nm_PHY_LN_VREG_CNTRL(i));
dsi_phy_write(lane_base +
REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i), 0xff);
dsi_phy_write(lane_base +
REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i),
(i == PHY_14NM_CKLN_IDX) ? 0x00 : 0x06);
writel(0xff, lane_base + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_0(i));
writel(i == PHY_14NM_CKLN_IDX ? 0x00 : 0x06,
lane_base + REG_DSI_14nm_PHY_LN_STRENGTH_CTRL_1(i));
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG3(i),
(i == PHY_14NM_CKLN_IDX) ? 0x8f : 0x0f);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_CFG2(i), 0x10);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i),
0);
dsi_phy_write(lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i),
0x88);
writel(i == PHY_14NM_CKLN_IDX ? 0x8f : 0x0f,
lane_base + REG_DSI_14nm_PHY_LN_CFG3(i));
writel(0x10, lane_base + REG_DSI_14nm_PHY_LN_CFG2(i));
writel(0, lane_base + REG_DSI_14nm_PHY_LN_TEST_DATAPATH(i));
writel(0x88, lane_base + REG_DSI_14nm_PHY_LN_TEST_STR(i));
dsi_14nm_dphy_set_timing(phy, timing, i);
}
/* Make sure PLL is not start */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL, 0x00);
writel(0x00, base + REG_DSI_14nm_PHY_CMN_PLL_CNTRL);
wmb(); /* make sure everything is written before reset and enable */
/* reset digital block */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x80);
writel(0x80, base + REG_DSI_14nm_PHY_CMN_CTRL_1);
wmb(); /* ensure reset is asserted */
udelay(100);
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_1, 0x00);
writel(0x00, base + REG_DSI_14nm_PHY_CMN_CTRL_1);
glbl_test_ctrl = dsi_phy_read(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
glbl_test_ctrl = readl(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
glbl_test_ctrl |= DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
glbl_test_ctrl &= ~DSI_14nm_PHY_CMN_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, glbl_test_ctrl);
writel(glbl_test_ctrl, base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
ret = dsi_14nm_set_usecase(phy);
if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
@ -1012,15 +1010,15 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy,
}
/* Remove power down from PLL and all lanes */
dsi_phy_write(base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0xff);
writel(0xff, base + REG_DSI_14nm_PHY_CMN_CTRL_0);
return 0;
}
static void dsi_14nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL, 0);
dsi_phy_write(phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0, 0);
writel(0, phy->base + REG_DSI_14nm_PHY_CMN_GLBL_TEST_CTRL);
writel(0, phy->base + REG_DSI_14nm_PHY_CMN_CTRL_0);
/* ensure that the phy is completely disabled */
wmb();

View File

@ -12,32 +12,32 @@ static void dsi_20nm_dphy_set_timing(struct msm_dsi_phy *phy,
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_0,
DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_1,
DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_2,
DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
writel(DSI_20nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero),
base + REG_DSI_20nm_PHY_TIMING_CTRL_0);
writel(DSI_20nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail),
base + REG_DSI_20nm_PHY_TIMING_CTRL_1);
writel(DSI_20nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare),
base + REG_DSI_20nm_PHY_TIMING_CTRL_2);
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_3,
DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_4,
DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_5,
DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_6,
DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_7,
DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_8,
DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_9,
DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_10,
DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_20nm_PHY_TIMING_CTRL_11,
DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
writel(DSI_20nm_PHY_TIMING_CTRL_3_CLK_ZERO_8,
base + REG_DSI_20nm_PHY_TIMING_CTRL_3);
writel(DSI_20nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
base + REG_DSI_20nm_PHY_TIMING_CTRL_4);
writel(DSI_20nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero),
base + REG_DSI_20nm_PHY_TIMING_CTRL_5);
writel(DSI_20nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare),
base + REG_DSI_20nm_PHY_TIMING_CTRL_6);
writel(DSI_20nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail),
base + REG_DSI_20nm_PHY_TIMING_CTRL_7);
writel(DSI_20nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst),
base + REG_DSI_20nm_PHY_TIMING_CTRL_8);
writel(DSI_20nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_20nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
base + REG_DSI_20nm_PHY_TIMING_CTRL_9);
writel(DSI_20nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get),
base + REG_DSI_20nm_PHY_TIMING_CTRL_10);
writel(DSI_20nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0),
base + REG_DSI_20nm_PHY_TIMING_CTRL_11);
}
static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
@ -45,23 +45,23 @@ static void dsi_20nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
void __iomem *base = phy->reg_base;
if (!enable) {
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
writel(0, base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG);
return;
}
if (phy->regulator_ldo_mode) {
dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x1d);
writel(0x1d, phy->base + REG_DSI_20nm_PHY_LDO_CNTRL);
return;
}
/* non LDO mode */
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1, 0x03);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2, 0x03);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4, 0x20);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG, 0x01);
dsi_phy_write(phy->base + REG_DSI_20nm_PHY_LDO_CNTRL, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0, 0x03);
writel(0x03, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_1);
writel(0x03, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_2);
writel(0x00, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_3);
writel(0x20, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_4);
writel(0x01, base + REG_DSI_20nm_PHY_REGULATOR_CAL_PWR_CFG);
writel(0x00, phy->base + REG_DSI_20nm_PHY_LDO_CNTRL);
writel(0x03, base + REG_DSI_20nm_PHY_REGULATOR_CTRL_0);
}
static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
@ -83,49 +83,48 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy,
dsi_20nm_phy_regulator_ctrl(phy, true);
dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_0, 0xff);
writel(0xff, base + REG_DSI_20nm_PHY_STRENGTH_0);
val = dsi_phy_read(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
val = readl(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_STANDALONE)
val |= DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
val &= ~DSI_20nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL, val);
writel(val, base + REG_DSI_20nm_PHY_GLBL_TEST_CTRL);
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_3(i),
(i >> 1) * 0x40);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i), 0x01);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i), 0x46);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_0(i), 0x02);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_1(i), 0xa0);
dsi_phy_write(base + REG_DSI_20nm_PHY_LN_CFG_4(i), cfg_4[i]);
writel((i >> 1) * 0x40, base + REG_DSI_20nm_PHY_LN_CFG_3(i));
writel(0x01, base + REG_DSI_20nm_PHY_LN_TEST_STR_0(i));
writel(0x46, base + REG_DSI_20nm_PHY_LN_TEST_STR_1(i));
writel(0x02, base + REG_DSI_20nm_PHY_LN_CFG_0(i));
writel(0xa0, base + REG_DSI_20nm_PHY_LN_CFG_1(i));
writel(cfg_4[i], base + REG_DSI_20nm_PHY_LN_CFG_4(i));
}
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_3, 0x80);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR0, 0x01);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_TEST_STR1, 0x46);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_0, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_1, 0xa0);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_2, 0x00);
dsi_phy_write(base + REG_DSI_20nm_PHY_LNCK_CFG_4, 0x00);
writel(0x80, base + REG_DSI_20nm_PHY_LNCK_CFG_3);
writel(0x01, base + REG_DSI_20nm_PHY_LNCK_TEST_STR0);
writel(0x46, base + REG_DSI_20nm_PHY_LNCK_TEST_STR1);
writel(0x00, base + REG_DSI_20nm_PHY_LNCK_CFG_0);
writel(0xa0, base + REG_DSI_20nm_PHY_LNCK_CFG_1);
writel(0x00, base + REG_DSI_20nm_PHY_LNCK_CFG_2);
writel(0x00, base + REG_DSI_20nm_PHY_LNCK_CFG_4);
dsi_20nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_1, 0x00);
writel(0x00, base + REG_DSI_20nm_PHY_CTRL_1);
dsi_phy_write(base + REG_DSI_20nm_PHY_STRENGTH_1, 0x06);
writel(0x06, base + REG_DSI_20nm_PHY_STRENGTH_1);
/* make sure everything is written before enable */
wmb();
dsi_phy_write(base + REG_DSI_20nm_PHY_CTRL_0, 0x7f);
writel(0x7f, base + REG_DSI_20nm_PHY_CTRL_0);
return 0;
}
static void dsi_20nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_20nm_PHY_CTRL_0, 0);
writel(0, phy->base + REG_DSI_20nm_PHY_CTRL_0);
dsi_20nm_phy_regulator_ctrl(phy, false);
}

View File

@ -83,7 +83,7 @@ static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
u32 val;
while (nb_tries--) {
val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_STATUS);
pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
if (pll_locked)
@ -104,9 +104,10 @@ static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
* Add HW recommended delays after toggling the software
* reset bit off and back on.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
writel(DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
udelay(1);
writel(0, base + REG_DSI_28nm_PHY_PLL_TEST_CFG);
udelay(1);
}
/*
@ -128,7 +129,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
VERB("rate=%lu, parent's=%lu", rate, parent_rate);
/* Force postdiv2 to be div-4 */
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
writel(3, base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG);
/* Configure the Loop filter resistance */
for (i = 0; i < LPFR_LUT_SIZE; i++)
@ -139,11 +140,11 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
rate);
return -EINVAL;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
writel(lpfr_lut[i].resistance, base + REG_DSI_28nm_PHY_PLL_LPFR_CFG);
/* Loop filter capacitance values : c1 and c2 */
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
writel(0x70, base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG);
writel(0x15, base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG);
rem = rate % VCO_REF_CLK_RATE;
if (rem) {
@ -168,7 +169,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
DBG("Generated VCO Clock: %lu", gen_vco_clk);
rem = 0;
sdm_cfg1 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
sdm_cfg1 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
if (frac_n_mode) {
sdm_cfg0 = 0x0;
@ -195,17 +196,17 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
writel(0x02, base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG);
writel(0x2b, base + REG_DSI_28nm_PHY_PLL_CAL_CFG3);
writel(0x06, base + REG_DSI_28nm_PHY_PLL_CAL_CFG4);
writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
writel(sdm_cfg1, base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
writel(DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2),
base + REG_DSI_28nm_PHY_PLL_SDM_CFG2);
writel(DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3),
base + REG_DSI_28nm_PHY_PLL_SDM_CFG3);
writel(0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG4);
/* Add hardware recommended delay for correct PLL configuration */
if (pll_28nm->phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
@ -213,18 +214,18 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
else
udelay(1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
writel(refclk_cfg, base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG);
writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
writel(0x31, base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG);
writel(sdm_cfg0, base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
writel(0x12, base + REG_DSI_28nm_PHY_PLL_CAL_CFG0);
writel(0x30, base + REG_DSI_28nm_PHY_PLL_CAL_CFG6);
writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG7);
writel(0x60, base + REG_DSI_28nm_PHY_PLL_CAL_CFG8);
writel(0x00, base + REG_DSI_28nm_PHY_PLL_CAL_CFG9);
writel(cal_cfg10 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG10);
writel(cal_cfg11 & 0xff, base + REG_DSI_28nm_PHY_PLL_CAL_CFG11);
writel(0x20, base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG);
return 0;
}
@ -250,27 +251,27 @@ static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
VERB("parent_rate=%lu", parent_rate);
/* Check to see if the ref clk doubler is enabled */
doubler = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
doubler = readl(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
ref_clk += (doubler * VCO_REF_CLK_RATE);
/* see if it is integer mode or sdm mode */
sdm0 = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
sdm0 = readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
/* integer mode */
sdm_byp_div = FIELD(
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
vco_rate = ref_clk * sdm_byp_div;
} else {
/* sdm mode */
sdm_dc_off = FIELD(
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
DBG("sdm_dc_off = %d", sdm_dc_off);
sdm2 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
sdm2 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
sdm3 = FIELD(dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
sdm3 = FIELD(readl(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
sdm_freq_seed = (sdm3 << 8) | sdm2;
DBG("sdm_freq_seed = %d", sdm_freq_seed);
@ -303,22 +304,26 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
* Add necessary delays recommended by hardware.
*/
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(1);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(600);
for (i = 0; i < 2; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
udelay(100);
writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
/* poll for PLL ready status */
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads,
@ -333,22 +338,28 @@ static int _dsi_pll_28nm_vco_prepare_hpm(struct dsi_pll_28nm *pll_28nm)
* Add necessary delays recommended by hardware.
*/
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(1);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(250);
val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(600);
}
if (unlikely(!locked))
@ -396,24 +407,27 @@ static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34);
writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(200);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(600);
for (i = 0; i < 7; i++) {
/* DSI Uniphy lock detect setting */
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
0x0c, 100);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
writel(0x0c, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
udelay(100);
writel(0x0d, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
/* poll for PLL ready status */
locked = pll_28nm_poll_for_ready(pll_28nm,
@ -427,15 +441,18 @@ static int dsi_pll_28nm_vco_prepare_8226(struct clk_hw *hw)
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00, 50);
writel(0x00, base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG);
udelay(50);
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 100);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(100);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
udelay(600);
}
if (unlikely(!locked))
@ -466,21 +483,27 @@ static int dsi_pll_28nm_vco_prepare_lp(struct clk_hw *hw)
* PLL power up sequence.
* Add necessary delays recommended by hardware.
*/
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
writel(0x34, base + REG_DSI_28nm_PHY_PLL_CAL_CFG1);
ndelay(500);
val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
ndelay(500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
ndelay(500);
val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
writel(val, base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
ndelay(500);
/* DSI PLL toggle lock detect setting */
dsi_phy_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
dsi_phy_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
writel(0x04, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
ndelay(500);
writel(0x05, base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2);
udelay(512);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
@ -504,7 +527,7 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
if (unlikely(!pll_28nm->phy->pll_on))
return;
dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
writel(0, pll_28nm->phy->pll_base + REG_DSI_28nm_PHY_PLL_GLB_CFG);
pll_28nm->phy->pll_on = false;
}
@ -560,10 +583,10 @@ static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *base = pll_28nm->phy->pll_base;
cached_state->postdiv3 =
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
cached_state->postdiv1 =
dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
cached_state->byte_mux = dsi_phy_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
readl(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
cached_state->byte_mux = readl(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
if (dsi_pll_28nm_clk_is_enabled(phy->vco_hw))
cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
else
@ -585,12 +608,9 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
cached_state->postdiv1);
dsi_phy_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
cached_state->byte_mux);
writel(cached_state->postdiv3, base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
writel(cached_state->postdiv1, base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
writel(cached_state->byte_mux, base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
return 0;
}
@ -700,72 +720,71 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
writel(DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero),
base + REG_DSI_28nm_PHY_TIMING_CTRL_0);
writel(DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail),
base + REG_DSI_28nm_PHY_TIMING_CTRL_1);
writel(DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare),
base + REG_DSI_28nm_PHY_TIMING_CTRL_2);
if (timing->clk_zero & BIT(8))
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
writel(DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8,
base + REG_DSI_28nm_PHY_TIMING_CTRL_3);
writel(DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
base + REG_DSI_28nm_PHY_TIMING_CTRL_4);
writel(DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero),
base + REG_DSI_28nm_PHY_TIMING_CTRL_5);
writel(DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare),
base + REG_DSI_28nm_PHY_TIMING_CTRL_6);
writel(DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail),
base + REG_DSI_28nm_PHY_TIMING_CTRL_7);
writel(DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst),
base + REG_DSI_28nm_PHY_TIMING_CTRL_8);
writel(DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
base + REG_DSI_28nm_PHY_TIMING_CTRL_9);
writel(DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get),
base + REG_DSI_28nm_PHY_TIMING_CTRL_10);
writel(DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0),
base + REG_DSI_28nm_PHY_TIMING_CTRL_11);
}
static void dsi_28nm_phy_regulator_enable_dcdc(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
writel(1, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
writel(0x3, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
writel(0x9, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
writel(0x00, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
}
static void dsi_28nm_phy_regulator_enable_ldo(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0x7);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
writel(0x0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0);
writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
writel(0x7, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5);
writel(0, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3);
writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2);
writel(0x1, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1);
writel(0x20, base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4);
if (phy->cfg->quirks & DSI_PHY_28NM_QUIRK_PHY_LP)
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x05);
writel(0x05, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
else
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x0d);
writel(0x0d, phy->base + REG_DSI_28nm_PHY_LDO_CNTRL);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
{
if (!enable) {
dsi_phy_write(phy->reg_base +
REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
writel(0, phy->reg_base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG);
return;
}
@ -792,49 +811,49 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
return -EINVAL;
}
dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
writel(0xff, base + REG_DSI_28nm_PHY_STRENGTH_0);
dsi_28nm_phy_regulator_ctrl(phy, true);
dsi_28nm_dphy_set_timing(phy, timing);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
writel(0x00, base + REG_DSI_28nm_PHY_CTRL_1);
writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
writel(0x6, base + REG_DSI_28nm_PHY_STRENGTH_1);
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
writel(0, base + REG_DSI_28nm_PHY_LN_CFG_0(i));
writel(0, base + REG_DSI_28nm_PHY_LN_CFG_1(i));
writel(0, base + REG_DSI_28nm_PHY_LN_CFG_2(i));
writel(0, base + REG_DSI_28nm_PHY_LN_CFG_3(i));
writel(0, base + REG_DSI_28nm_PHY_LN_CFG_4(i));
writel(0, base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i));
writel(0, base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i));
writel(0x1, base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i));
writel(0x97, base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i));
}
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_4, 0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
writel(0, base + REG_DSI_28nm_PHY_LNCK_CFG_4);
writel(0xc0, base + REG_DSI_28nm_PHY_LNCK_CFG_1);
writel(0x1, base + REG_DSI_28nm_PHY_LNCK_TEST_STR0);
writel(0xbb, base + REG_DSI_28nm_PHY_LNCK_TEST_STR1);
dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
writel(0x5f, base + REG_DSI_28nm_PHY_CTRL_0);
val = dsi_phy_read(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
val = readl(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
if (phy->id == DSI_1 && phy->usecase == MSM_DSI_PHY_SLAVE)
val &= ~DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
else
val |= DSI_28nm_PHY_GLBL_TEST_CTRL_BITCLK_HS_SEL;
dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, val);
writel(val, base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL);
return 0;
}
static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
writel(0, phy->base + REG_DSI_28nm_PHY_CTRL_0);
dsi_28nm_phy_regulator_ctrl(phy, false);
/*
@ -917,3 +936,21 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8226_cfgs = {
.num_dsi_phy = 1,
.quirks = DSI_PHY_28NM_QUIRK_PHY_8226,
};
const struct msm_dsi_phy_cfg dsi_phy_28nm_8937_cfgs = {
.has_phy_regulator = true,
.regulator_data = dsi_phy_28nm_regulators,
.num_regulators = ARRAY_SIZE(dsi_phy_28nm_regulators),
.ops = {
.enable = dsi_28nm_phy_enable,
.disable = dsi_28nm_phy_disable,
.pll_init = dsi_pll_28nm_init,
.save_pll_state = dsi_28nm_pll_save_state,
.restore_pll_state = dsi_28nm_pll_restore_state,
},
.min_pll_rate = VCO_MIN_RATE,
.max_pll_rate = VCO_MAX_RATE,
.io_start = { 0x1a94400, 0x1a96400 },
.num_dsi_phy = 2,
.quirks = DSI_PHY_28NM_QUIRK_PHY_LP,
};

View File

@ -74,7 +74,7 @@ static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
u32 val;
while (nb_tries--) {
val = dsi_phy_read(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
val = readl(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_RDY);
pll_locked = !!(val & DSI_28nm_8960_PHY_PLL_RDY_PLL_RDY);
if (pll_locked)
@ -103,30 +103,25 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = VCO_REF_CLK_RATE / 10;
fb_divider = (temp * VCO_PREF_DIV_RATIO) / val;
fb_divider = fb_divider / 2 - 1;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1,
fb_divider & 0xff);
writel(fb_divider & 0xff, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val |= (fb_divider >> 8) & 0x07;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2,
val);
writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
val |= (VCO_PREF_DIV_RATIO - 1) & 0x3f;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3,
val);
writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6,
0xf);
writel(0xf, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_6);
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val |= 0x7 << 4;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
val);
writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
return 0;
}
@ -149,16 +144,16 @@ static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
VERB("parent_rate=%lu", parent_rate);
status = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
status = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
if (status & DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE) {
fb_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
fb_divider = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_1);
fb_divider &= 0xff;
temp = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
temp = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_2) & 0x07;
fb_divider = (temp << 8) | fb_divider;
fb_divider += 1;
ref_divider = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
ref_divider = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_3);
ref_divider &= 0x3f;
ref_divider += 1;
@ -195,18 +190,18 @@ static int dsi_pll_28nm_vco_prepare(struct clk_hw *hw)
* 2: divide by 8 to get bit clock divider
* 3: write it to POSTDIV1
*/
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
byte_div = val + 1;
bit_div = byte_div / 8;
val = dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val = readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
val &= ~0xf;
val |= (bit_div - 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8, val);
writel(val, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
/* enable the PLL */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0,
DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE);
writel(DSI_28nm_8960_PHY_PLL_CTRL_0_ENABLE,
base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
@ -230,7 +225,7 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
if (unlikely(!pll_28nm->phy->pll_on))
return;
dsi_phy_write(pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0, 0x00);
writel(0x00, pll_28nm->phy->pll_base + REG_DSI_28nm_8960_PHY_PLL_CTRL_0);
pll_28nm->phy->pll_on = false;
}
@ -277,7 +272,7 @@ static unsigned long clk_bytediv_recalc_rate(struct clk_hw *hw,
struct clk_bytediv *bytediv = to_clk_bytediv(hw);
unsigned int div;
div = dsi_phy_read(bytediv->reg) & 0xff;
div = readl(bytediv->reg) & 0xff;
return parent_rate / (div + 1);
}
@ -323,9 +318,9 @@ static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
factor = get_vco_mul_factor(rate);
val = dsi_phy_read(bytediv->reg);
val = readl(bytediv->reg);
val |= (factor - 1) & 0xff;
dsi_phy_write(bytediv->reg, val);
writel(val, bytediv->reg);
return 0;
}
@ -347,11 +342,11 @@ static void dsi_28nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *base = pll_28nm->phy->pll_base;
cached_state->postdiv3 =
dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
cached_state->postdiv2 =
dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
cached_state->postdiv1 =
dsi_phy_read(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
readl(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
cached_state->vco_rate = clk_hw_get_rate(phy->vco_hw);
}
@ -371,12 +366,9 @@ static int dsi_28nm_pll_restore_state(struct msm_dsi_phy *phy)
return ret;
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10,
cached_state->postdiv3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9,
cached_state->postdiv2);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8,
cached_state->postdiv1);
writel(cached_state->postdiv3, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_10);
writel(cached_state->postdiv2, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_9);
writel(cached_state->postdiv1, base + REG_DSI_28nm_8960_PHY_PLL_CTRL_8);
return 0;
}
@ -477,53 +469,52 @@ static void dsi_28nm_dphy_set_timing(struct msm_dsi_phy *phy,
{
void __iomem *base = phy->base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0,
DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1,
DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2,
DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4,
DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5,
DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6,
DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7,
DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8,
DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9,
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10,
DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11,
DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
writel(DSI_28nm_8960_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_0);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_1);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_2);
writel(0, base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_3);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_4);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_5);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_6);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_7);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_8);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
DSI_28nm_8960_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_9);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_10);
writel(DSI_28nm_8960_PHY_TIMING_CTRL_11_TRIG3_CMD(0),
base + REG_DSI_28nm_8960_PHY_TIMING_CTRL_11);
}
static void dsi_28nm_phy_regulator_init(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4,
0x100);
writel(0x3, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0);
writel(1, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1);
writel(1, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2);
writel(0, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3);
writel(0x100, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4);
}
static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy)
{
void __iomem *base = phy->reg_base;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0, 0x3);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1, 0xa);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2, 0x4);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4, 0x20);
writel(0x3, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_0);
writel(0xa, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_1);
writel(0x4, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_2);
writel(0x0, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_3);
writel(0x20, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CTRL_4);
}
static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
@ -532,21 +523,20 @@ static void dsi_28nm_phy_calibration(struct msm_dsi_phy *phy)
u32 status;
int i = 5000;
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG,
0x3);
writel(0x3, base + REG_DSI_28nm_8960_PHY_MISC_REGULATOR_CAL_PWR_CFG);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1, 0x5a);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3, 0x10);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0, 0x1);
writel(0x0, base + REG_DSI_28nm_8960_PHY_MISC_CAL_SW_CFG_2);
writel(0x5a, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_1);
writel(0x10, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_3);
writel(0x1, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_4);
writel(0x1, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_CFG_0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x1);
writel(0x1, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER);
usleep_range(5000, 6000);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER, 0x0);
writel(0x0, base + REG_DSI_28nm_8960_PHY_MISC_CAL_HW_TRIGGER);
do {
status = dsi_phy_read(base +
status = readl(base +
REG_DSI_28nm_8960_PHY_MISC_CAL_STATUS);
if (!(status & DSI_28nm_8960_PHY_MISC_CAL_STATUS_CAL_BUSY))
@ -562,23 +552,20 @@ static void dsi_28nm_phy_lane_config(struct msm_dsi_phy *phy)
int i;
for (i = 0; i < 4; i++) {
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i), 0x80);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i), 0x45);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i), 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i),
0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i),
0x01);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i),
0x66);
writel(0x80, base + REG_DSI_28nm_8960_PHY_LN_CFG_0(i));
writel(0x45, base + REG_DSI_28nm_8960_PHY_LN_CFG_1(i));
writel(0x00, base + REG_DSI_28nm_8960_PHY_LN_CFG_2(i));
writel(0x00, base + REG_DSI_28nm_8960_PHY_LN_TEST_DATAPATH(i));
writel(0x01, base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_0(i));
writel(0x66, base + REG_DSI_28nm_8960_PHY_LN_TEST_STR_1(i));
}
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0, 0x40);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1, 0x67);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH, 0x0);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0, 0x1);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1, 0x88);
writel(0x40, base + REG_DSI_28nm_8960_PHY_LNCK_CFG_0);
writel(0x67, base + REG_DSI_28nm_8960_PHY_LNCK_CFG_1);
writel(0x0, base + REG_DSI_28nm_8960_PHY_LNCK_CFG_2);
writel(0x0, base + REG_DSI_28nm_8960_PHY_LNCK_TEST_DATAPATH);
writel(0x1, base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR0);
writel(0x88, base + REG_DSI_28nm_8960_PHY_LNCK_TEST_STR1);
}
static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
@ -598,18 +585,18 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
dsi_28nm_phy_regulator_init(phy);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_LDO_CTRL, 0x04);
writel(0x04, base + REG_DSI_28nm_8960_PHY_LDO_CTRL);
/* strength control */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_0, 0xff);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_STRENGTH_2, 0x06);
writel(0xff, base + REG_DSI_28nm_8960_PHY_STRENGTH_0);
writel(0x00, base + REG_DSI_28nm_8960_PHY_STRENGTH_1);
writel(0x06, base + REG_DSI_28nm_8960_PHY_STRENGTH_2);
/* phy ctrl */
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x5f);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_1, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_2, 0x00);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_CTRL_3, 0x10);
writel(0x5f, base + REG_DSI_28nm_8960_PHY_CTRL_0);
writel(0x00, base + REG_DSI_28nm_8960_PHY_CTRL_1);
writel(0x00, base + REG_DSI_28nm_8960_PHY_CTRL_2);
writel(0x10, base + REG_DSI_28nm_8960_PHY_CTRL_3);
dsi_28nm_phy_regulator_ctrl(phy);
@ -617,10 +604,10 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
dsi_28nm_phy_lane_config(phy);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0f);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1, 0x03);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0, 0x03);
dsi_phy_write(base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4, 0x0);
writel(0x0f, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4);
writel(0x03, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_1);
writel(0x03, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_0);
writel(0x0, base + REG_DSI_28nm_8960_PHY_BIST_CTRL_4);
dsi_28nm_dphy_set_timing(phy, timing);
@ -629,7 +616,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy,
static void dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
{
dsi_phy_write(phy->base + REG_DSI_28nm_8960_PHY_CTRL_0, 0x0);
writel(0x0, phy->base + REG_DSI_28nm_8960_PHY_CTRL_0);
/*
* Wait for the registers writes to complete in order to

View File

@ -194,20 +194,20 @@ static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *c
if (config->enable_ssc) {
pr_debug("SSC is enabled\n");
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
config->ssc_stepsize & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
config->ssc_stepsize >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
config->ssc_div_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
config->ssc_div_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
config->ssc_adj_per & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
config->ssc_adj_per >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
SSC_EN | (config->ssc_center ? SSC_CENTER : 0));
writel(config->ssc_stepsize & 0xff,
base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1);
writel(config->ssc_stepsize >> 8,
base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1);
writel(config->ssc_div_per & 0xff,
base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1);
writel(config->ssc_div_per >> 8,
base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1);
writel(config->ssc_adj_per & 0xff,
base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1);
writel(config->ssc_adj_per >> 8,
base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1);
writel(SSC_EN | (config->ssc_center ? SSC_CENTER : 0),
base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL);
}
}
@ -242,36 +242,35 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
vco_config_1 = 0x01;
}
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22);
writel(analog_controls_five_1, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1);
writel(vco_config_1, base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1);
writel(0x01, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE);
writel(0x03, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER);
writel(0x4e, base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER);
writel(0x40, base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS);
writel(0xba, base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE);
writel(0x0c, base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_OUTDIV);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE);
writel(0x08, base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO);
writel(0x0a, base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1);
writel(0xc0, base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1);
writel(0x84, base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1);
writel(0x82, base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1);
writel(0x4c, base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1);
writel(0x80, base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE);
writel(0x29, base + REG_DSI_7nm_PHY_PLL_PFILT);
writel(0x2f, base + REG_DSI_7nm_PHY_PLL_PFILT);
writel(0x2a, base + REG_DSI_7nm_PHY_PLL_IFILT);
writel(!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22,
base + REG_DSI_7nm_PHY_PLL_IFILT);
if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) {
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
writel(0x22, base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE);
if (pll->slave)
dsi_phy_write(pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
writel(0x22, pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE);
}
}
@ -279,21 +278,21 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll, struct dsi_pll_config *confi
{
void __iomem *base = pll->phy->pll_base;
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1,
config->decimal_div_start);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1,
config->frac_div_start & 0xff);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1,
(config->frac_div_start & 0xff00) >> 8);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
(config->frac_div_start & 0x30000) >> 16);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1,
pll->phy->cphy_mode ? 0x00 : 0x10);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS,
config->pll_clock_inverters);
writel(0x12, base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE);
writel(config->decimal_div_start,
base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
writel(config->frac_div_start & 0xff,
base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
writel((config->frac_div_start & 0xff00) >> 8,
base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1);
writel((config->frac_div_start & 0x30000) >> 16,
base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1);
writel(0x40, base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1);
writel(0x06, base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY);
writel(pll->phy->cphy_mode ? 0x00 : 0x10,
base + REG_DSI_7nm_PHY_PLL_CMODE_1);
writel(config->pll_clock_inverters,
base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS);
}
static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@ -347,19 +346,19 @@ static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
writel(0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
{
u32 data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
dsi_phy_write(pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
writel(data | BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0xc0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
ndelay(250);
}
@ -367,19 +366,18 @@ static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
u32 data;
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
data = dsi_phy_read(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
data | BIT(5) | BIT(4));
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@ -389,9 +387,9 @@ static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
* coming out of a CX or analog rail power collapse while
* ensuring that the pads maintain LP00 or LP11 state
*/
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
writel(BIT(0), pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4);
wmb(); /* Ensure that the reset is deasserted */
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
writel(0, pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4);
wmb(); /* Ensure that the reset is deasserted */
}
@ -405,7 +403,7 @@ static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
dsi_pll_enable_pll_bias(pll_7nm->slave);
/* Start PLL */
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
writel(BIT(0), pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
/*
* ensure all PLL configurations are written prior to checking
@ -441,7 +439,7 @@ error:
static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
{
dsi_phy_write(pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
writel(0, pll->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
dsi_pll_disable_pll_bias(pll);
}
@ -455,7 +453,7 @@ static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
* powering down the PLL
*/
dsi_pll_disable_global_clk(pll_7nm);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
writel(0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
dsi_pll_disable_sub(pll_7nm);
if (pll_7nm->slave) {
dsi_pll_disable_global_clk(pll_7nm->slave);
@ -478,13 +476,13 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
u32 dec;
u64 pll_freq, tmp64;
dec = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec = readl(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
frac = dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
frac = readl(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
frac |= ((readl(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
0xff) << 8);
frac |= ((dsi_phy_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
frac |= ((readl(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
0x3) << 16);
/*
@ -537,15 +535,15 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *phy_base = pll_7nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
cached->pll_out_div = dsi_phy_read(pll_7nm->phy->pll_base +
cached->pll_out_div = readl(pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
cmn_clk_cfg0 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
cmn_clk_cfg0 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
cmn_clk_cfg1 = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = cmn_clk_cfg1 & 0x3;
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
@ -561,18 +559,18 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
u32 val;
int ret;
val = dsi_phy_read(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
val = readl(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
val |= cached->pll_out_div;
dsi_phy_write(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
cached->bit_clk_div | (cached->pix_clk_div << 4));
writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
val = dsi_phy_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
val &= ~0x3;
val |= cached->pll_mux;
dsi_phy_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@ -610,7 +608,7 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
}
/* set PLL src */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
return 0;
}
@ -712,8 +710,8 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
if (pll_7nm->phy->cphy_mode) {
u32 data;
data = dsi_phy_read(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
dsi_phy_write(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data | 3);
data = readl(pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
writel(data | 3, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
@ -792,7 +790,7 @@ static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
void __iomem *base = phy->base;
u32 data = 0;
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
data = readl(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
mb(); /* make sure read happened */
return (data & BIT(0));
@ -808,11 +806,9 @@ static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
* corresponding to the logical data lane 0
*/
if (enable)
dsi_phy_write(lane_base +
REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
writel(0x3, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0));
else
dsi_phy_write(lane_base +
REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
writel(0, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0));
}
static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
@ -833,18 +829,18 @@ static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
* be only enabled for the physical data lane corresponding
* to the logical data lane 0
*/
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
writel(0, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i));
writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i));
}
dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
/* other settings */
for (i = 0; i < 5; i++) {
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_CFG0(i));
writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_CFG1(i));
writel(i == 4 ? 0x8a : 0xa, lane_base + REG_DSI_7nm_PHY_LN_CFG2(i));
writel(tx_dctrl[i], lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i));
}
}
@ -882,7 +878,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Request for REFGEN READY */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
dsi_phy_write(phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10, 0x1);
writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
udelay(500);
}
@ -967,53 +963,53 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* Assert PLL core reset */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
/* turn off resync FIFO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
(dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
(readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4);
/* Configure PHY lane swap (TODO: we need to calculate this) */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
writel(0x21, base + REG_DSI_7nm_PHY_CMN_LANE_CFG0);
writel(0x84, base + REG_DSI_7nm_PHY_CMN_LANE_CFG1);
if (phy->cphy_mode)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL, BIT(6));
writel(BIT(6), base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL);
/* Enable LDO */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, vreg_ctrl_1);
writel(vreg_ctrl_0, base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0);
writel(vreg_ctrl_1, base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
glbl_str_swi_cal_sel_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
glbl_hstx_str_ctrl_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0,
glbl_pemph_ctrl_0);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_3);
writel(glbl_str_swi_cal_sel_ctrl,
base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL);
writel(glbl_hstx_str_ctrl_0,
base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0);
writel(glbl_pemph_ctrl_0,
base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0);
if (phy->cphy_mode)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1, 0x01);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
glbl_rescode_top_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
glbl_rescode_bot_ctrl);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
writel(0x01, base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1);
writel(glbl_rescode_top_ctrl,
base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL);
writel(glbl_rescode_bot_ctrl,
base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL);
writel(0x55, base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL);
/* Remove power down from all blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
writel(0x7f, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, lane_ctrl0);
writel(lane_ctrl0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
/* Select full-rate mode */
if (!phy->cphy_mode)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
writel(0x40, base + REG_DSI_7nm_PHY_CMN_CTRL_2);
ret = dsi_7nm_set_usecase(phy);
if (ret) {
@ -1024,34 +1020,34 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* DSI PHY timings */
if (phy->cphy_mode) {
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5,
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7,
timing->shared_timings.clk_post);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0);
writel(timing->hs_exit, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4);
writel(timing->shared_timings.clk_pre,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5);
writel(timing->clk_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6);
writel(timing->shared_timings.clk_post,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7);
writel(timing->hs_rqst, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8);
writel(0x02, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9);
writel(0x04, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11);
} else {
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
timing->shared_timings.clk_pre);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
timing->shared_timings.clk_post);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0);
writel(timing->clk_zero, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1);
writel(timing->clk_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2);
writel(timing->clk_trail, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3);
writel(timing->hs_exit, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4);
writel(timing->hs_zero, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5);
writel(timing->hs_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6);
writel(timing->hs_trail, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7);
writel(timing->hs_rqst, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8);
writel(0x02, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9);
writel(0x04, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11);
writel(timing->shared_timings.clk_pre,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12);
writel(timing->shared_timings.clk_post,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13);
}
/* DSI lane settings */
@ -1067,12 +1063,12 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
void __iomem *base = phy->base;
u32 data;
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
data = readl(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
if (enable)
data |= BIT(5) | BIT(6);
else
data &= ~(BIT(5) | BIT(6));
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1, data);
writel(data, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
return enable;
}
@ -1092,21 +1088,21 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
/* Turn off REFGEN Vote */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10, 0x0);
writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
wmb();
/* Delay to ensure HW removes vote before PHY shut down */
udelay(2);
}
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
data = readl(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* disable all lanes */
data &= ~0x1F;
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0);
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
/* Turn off all PHY blocks */
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x00);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* make sure phy is turned off */
wmb();

View File

@ -127,6 +127,11 @@ struct msm_drm_private {
struct msm_rd_state *hangrd; /* debugfs to dump hanging submits */
struct msm_perf_state *perf;
/**
* total_mem: Total/global amount of memory backing GEM objects.
*/
atomic64_t total_mem;
/**
* List of all GEM objects (mainly for debugfs, protected by obj_lock
* (acquire before per GEM object lock)
@ -330,6 +335,7 @@ bool msm_dsi_is_bonded_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_is_master_dsi(struct msm_dsi *msm_dsi);
bool msm_dsi_wide_bus_enabled(struct msm_dsi *msm_dsi);
struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_dsi);
const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi);
#else
static inline void __init msm_dsi_register(void)
{
@ -367,6 +373,11 @@ static inline struct drm_dsc_config *msm_dsi_get_dsc_config(struct msm_dsi *msm_
{
return NULL;
}
static inline const char *msm_dsi_get_te_source(struct msm_dsi *msm_dsi)
{
return NULL;
}
#endif
#ifdef CONFIG_DRM_MSM_DP

View File

@ -12,6 +12,9 @@
#include <linux/pfn_t.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
#include <trace/events/gpu_mem.h>
#include "msm_drv.h"
#include "msm_fence.h"
@ -33,6 +36,34 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}
static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
{
uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
trace_gpu_mem_total(0, 0, total_mem);
}
static void update_ctx_mem(struct drm_file *file, ssize_t size)
{
struct msm_file_private *ctx = file->driver_priv;
uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
rcu_read_lock(); /* Locks file->pid! */
trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
rcu_read_unlock();
}
static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
{
update_ctx_mem(file, obj->size);
return 0;
}
static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
{
update_ctx_mem(file, -obj->size);
}
/*
* Cache sync.. this is a bit over-complicated, to fit dma-mapping
* API. Really GPU cache is out of scope here (handled on cmdstream)
@ -156,6 +187,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
return p;
}
update_device_mem(dev->dev_private, obj->size);
msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
@ -209,6 +242,8 @@ static void put_pages(struct drm_gem_object *obj)
msm_obj->sgt = NULL;
}
update_device_mem(obj->dev->dev_private, -obj->size);
if (use_pages(obj))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
else
@ -1118,6 +1153,8 @@ static const struct vm_operations_struct vm_ops = {
static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.free = msm_gem_free_object,
.open = msm_gem_open,
.close = msm_gem_close,
.pin = msm_gem_prime_pin,
.unpin = msm_gem_prime_unpin,
.get_sg_table = msm_gem_prime_get_sg_table,

View File

@ -222,14 +222,16 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
struct drm_gem_object *obj, u64 iova, bool full)
{
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* Don't record write only objects */
state_bo->size = obj->size;
state_bo->flags = msm_obj->flags;
state_bo->iova = iova;
BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(msm_obj->name));
memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
memcpy(state_bo->name, msm_obj->name, sizeof(state_bo->name));
if (full) {
void *ptr;

View File

@ -428,6 +428,14 @@ struct msm_file_private {
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
/**
* ctx_mem:
*
* Total amount of memory of GEM buffers with handles attached for
* this context.
*/
atomic64_t ctx_mem;
};
/**
@ -519,6 +527,7 @@ struct msm_gpu_submitqueue {
struct msm_gpu_state_bo {
u64 iova;
size_t size;
u32 flags;
void *data;
bool encoded;
char name[32];

View File

@ -632,6 +632,13 @@ static const struct msm_mdss_data sm6350_data = {
.reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm7150_data = {
.ubwc_enc_version = UBWC_2_0,
.ubwc_dec_version = UBWC_2_0,
.highest_bank_bit = 1,
.reg_bus_bw = 76800,
};
static const struct msm_mdss_data sm8150_data = {
.ubwc_enc_version = UBWC_3_0,
.ubwc_dec_version = UBWC_3_0,
@ -713,6 +720,7 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sm6125-mdss", .data = &sm6125_data },
{ .compatible = "qcom,sm6350-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm6375-mdss", .data = &sm6350_data },
{ .compatible = "qcom,sm7150-mdss", .data = &sm7150_data },
{ .compatible = "qcom,sm8150-mdss", .data = &sm8150_data },
{ .compatible = "qcom,sm8250-mdss", .data = &sm8250_data },
{ .compatible = "qcom,sm8350-mdss", .data = &sm8350_data },

View File

@ -795,6 +795,39 @@ int qcom_smem_get_soc_id(u32 *id)
}
EXPORT_SYMBOL_GPL(qcom_smem_get_soc_id);
/**
* qcom_smem_get_feature_code() - return the feature code
* @code: On success, return the feature code here.
*
* Look up the feature code identifier from SMEM and return it.
*
* Return: 0 on success, negative errno on failure.
*/
int qcom_smem_get_feature_code(u32 *code)
{
struct socinfo *info;
u32 raw_code;
info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HW_SW_BUILD_ID, NULL);
if (IS_ERR(info))
return PTR_ERR(info);
/* This only makes sense for socinfo >= 16 */
if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16))
return -EOPNOTSUPP;
raw_code = __le32_to_cpu(info->feature_code);
/* Ensure the value makes sense */
if (raw_code > SOCINFO_FC_INT_MAX)
raw_code = SOCINFO_FC_UNKNOWN;
*code = raw_code;
return 0;
}
EXPORT_SYMBOL_GPL(qcom_smem_get_feature_code);
static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
{
struct smem_header *header;

View File

@ -21,14 +21,6 @@
#include <dt-bindings/arm/qcom,ids.h>
/*
* SoC version type with major number in the upper 16 bits and minor
* number in the lower 16 bits.
*/
#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
/* Helper macros to create soc_id table */
#define qcom_board_id(id) QCOM_ID_ ## id, __stringify(id)
#define qcom_board_id_named(id, name) QCOM_ID_ ## id, (name)

View File

@ -115,6 +115,29 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
int qcom_scm_lmh_profile_change(u32 profile_id);
bool qcom_scm_lmh_dcvsh_available(void);
/*
* Request TZ to program set of access controlled registers necessary
* irrespective of any features
*/
#define QCOM_SCM_GPU_ALWAYS_EN_REQ BIT(0)
/*
* Request TZ to program BCL id to access controlled register when BCL is
* enabled
*/
#define QCOM_SCM_GPU_BCL_EN_REQ BIT(1)
/*
* Request TZ to program set of access controlled register for CLX feature
* when enabled
*/
#define QCOM_SCM_GPU_CLX_EN_REQ BIT(2)
/*
* Request TZ to program tsense ids to access controlled registers for reading
* gpu temperature sensors
*/
#define QCOM_SCM_GPU_TSENSE_EN_REQ BIT(3)
int qcom_scm_gpu_init_regs(u32 gpu_req);
#ifdef CONFIG_QCOM_QSEECOM
int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);

View File

@ -13,5 +13,6 @@ int qcom_smem_get_free_space(unsigned host);
phys_addr_t qcom_smem_virt_to_phys(void *p);
int qcom_smem_get_soc_id(u32 *id);
int qcom_smem_get_feature_code(u32 *code);
#endif

View File

@ -3,6 +3,8 @@
#ifndef __QCOM_SOCINFO_H__
#define __QCOM_SOCINFO_H__
#include <linux/types.h>
/*
* SMEM item id, used to acquire handles to respective
* SMEM region.
@ -12,6 +14,14 @@
#define SMEM_SOCINFO_BUILD_ID_LENGTH 32
#define SMEM_SOCINFO_CHIP_ID_LENGTH 32
/*
* SoC version type with major number in the upper 16 bits and minor
* number in the lower 16 bits.
*/
#define SOCINFO_MAJOR(ver) (((ver) >> 16) & 0xffff)
#define SOCINFO_MINOR(ver) ((ver) & 0xffff)
#define SOCINFO_VERSION(maj, min) ((((maj) & 0xffff) << 16)|((min) & 0xffff))
/* Socinfo SMEM item structure */
struct socinfo {
__le32 fmt;
@ -74,4 +84,28 @@ struct socinfo {
__le32 boot_core;
};
/* Internal feature codes */
enum qcom_socinfo_feature_code {
/* External feature codes */
SOCINFO_FC_UNKNOWN = 0x0,
SOCINFO_FC_AA,
SOCINFO_FC_AB,
SOCINFO_FC_AC,
SOCINFO_FC_AD,
SOCINFO_FC_AE,
SOCINFO_FC_AF,
SOCINFO_FC_AG,
SOCINFO_FC_AH,
};
/* Internal feature codes */
/* Valid values: 0 <= n <= 0xf */
#define SOCINFO_FC_Yn(n) (0xf1 + (n))
#define SOCINFO_FC_INT_MAX SOCINFO_FC_Yn(0xf)
/* Product codes */
#define SOCINFO_PC_UNKNOWN 0
#define SOCINFO_PCn(n) ((n) + 1)
#define SOCINFO_PC_RESERVE (BIT(31) - 1)
#endif

View File

@ -87,6 +87,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
#define MSM_PARAM_RAYTRACING 0x11 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #