Merge tag 'drm-misc-next-2024-10-31' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next

drm-misc-next for v6.13:

All of the previous pull request, with MORE!

Core Changes:
- Update documentation for scheduler start/stop and job init.
- Add dedede and sm8350-hdk hardware to ci runs.

Driver Changes:
- Small fixes and cleanups to panfrost, omap, nouveau, ivpu, zynqmp, v3d,
  panthor docs, and leadtek-ltk050h3146w.
- Crashdump support for qaic.
- Support DP compliance in zynqmp.
- Add Samsung S6E88A0-AMS427AP24 panel.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/deeef745-f3fb-4e85-a9d0-e8d38d43c1cf@linux.intel.com
This commit is contained in:
Dave Airlie 2024-11-01 13:46:03 +10:00
commit 8a07b2623e
130 changed files with 6423 additions and 979 deletions

View File

@ -81,9 +81,22 @@ properties:
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
$ref: /schemas/graph.yaml#/$defs/port-base
description: Parallel RGB input port
properties:
endpoint:
$ref: /schemas/graph.yaml#/$defs/endpoint-base
unevaluatedProperties: false
properties:
bus-width:
description:
Endpoint bus width.
enum: [ 16, 18, 24 ]
default: 24
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: HDMI output port

View File

@ -60,6 +60,10 @@ properties:
data-lines:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [ 16, 18, 24 ]
deprecated: true
bus-width:
enum: [ 16, 18, 24 ]
port@1:
$ref: /schemas/graph.yaml#/properties/port

View File

@ -51,6 +51,14 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 90, 180, 270]
flip-horizontal:
description: boolean to flip image horizontally
type: boolean
flip-vertical:
description: boolean to flip image vertically
type: boolean
# Display Timings
panel-timing:
description:

View File

@ -200,6 +200,8 @@ properties:
- logictechno,lttd800480070-l2rt
# Logic Technologies LTTD800480070-L6WH-RT 7” 800x480 TFT Resistive Touch Module
- logictechno,lttd800480070-l6wh-rt
# Microchip AC69T88A 5" 800X480 LVDS interface TFT LCD Panel
- microchip,ac69t88a
# Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
- mitsubishi,aa070mc01-ca1
# Mitsubishi AA084XE01 8.4" XGA TFT LCD panel

View File

@ -0,0 +1,65 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,s6e88a0-ams427ap24.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung AMS427AP24 panel with S6E88A0 controller
maintainers:
- Jakob Hauser <jahau@rocketmail.com>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: samsung,s6e88a0-ams427ap24
reg:
maxItems: 1
port: true
reset-gpios: true
flip-horizontal: true
vdd3-supply:
description: core voltage supply
vci-supply:
description: voltage supply for analog circuits
required:
- compatible
- reg
- port
- reset-gpios
- vdd3-supply
- vci-supply
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
dsi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,s6e88a0-ams427ap24";
reg = <0>;
vdd3-supply = <&pm8916_l17>;
vci-supply = <&pm8916_l6>;
reset-gpios = <&tlmm 25 GPIO_ACTIVE_LOW>;
flip-horizontal;
port {
panel_in: endpoint {
remote-endpoint = <&mdss_dsi0_out>;
};
};
};
};

View File

@ -21,6 +21,8 @@ properties:
reset-gpios: true
display-timings: true
flip-horizontal: true
flip-vertical: true
vdd3-supply:
description: core voltage supply
@ -46,14 +48,6 @@ properties:
panel-height-mm:
description: physical panel height [mm]
flip-horizontal:
description: boolean to flip image horizontally
type: boolean
flip-vertical:
description: boolean to flip image vertically
type: boolean
required:
- compatible
- reg

View File

@ -0,0 +1,188 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/rockchip/rockchip,rk3588-dw-hdmi-qp.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip DW HDMI QP TX Encoder
maintainers:
- Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
description: |
Rockchip RK3588 SoC integrates the Synopsys DesignWare HDMI QP TX controller
IP and a HDMI/eDP TX Combo PHY based on a Samsung IP block, providing the
following features, among others:
* Fixed Rate Link (FRL)
* Display Stream Compression (DSC)
* 4K@120Hz and 8K@60Hz video modes
* Variable Refresh Rate (VRR) including Quick Media Switching (QMS)
* Fast Vactive (FVA)
* SCDC I2C DDC access
* Multi-stream audio
* Enhanced Audio Return Channel (EARC)
allOf:
- $ref: /schemas/sound/dai-common.yaml#
properties:
compatible:
enum:
- rockchip,rk3588-dw-hdmi-qp
reg:
maxItems: 1
clocks:
items:
- description: Peripheral/APB bus clock
- description: EARC RX biphase clock
- description: Reference clock
- description: Audio interface clock
- description: TMDS/FRL link clock
- description: Video datapath clock
clock-names:
items:
- const: pclk
- const: earc
- const: ref
- const: aud
- const: hdp
- const: hclk_vo1
interrupts:
items:
- description: AVP Unit interrupt
- description: CEC interrupt
- description: eARC RX interrupt
- description: Main Unit interrupt
- description: HPD interrupt
interrupt-names:
items:
- const: avp
- const: cec
- const: earc
- const: main
- const: hpd
phys:
maxItems: 1
description: The HDMI/eDP PHY
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: Video port for RGB/YUV input.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: Video port for HDMI/eDP output.
required:
- port@0
- port@1
power-domains:
maxItems: 1
resets:
maxItems: 2
reset-names:
items:
- const: ref
- const: hdp
"#sound-dai-cells":
const: 0
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Some HDMI QP related data is accessed through SYS GRF regs.
rockchip,vo-grf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
Additional HDMI QP related data is accessed through VO GRF regs.
required:
- compatible
- reg
- clocks
- clock-names
- interrupts
- interrupt-names
- phys
- ports
- resets
- reset-names
- rockchip,grf
- rockchip,vo-grf
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/power/rk3588-power.h>
#include <dt-bindings/reset/rockchip,rk3588-cru.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
hdmi@fde80000 {
compatible = "rockchip,rk3588-dw-hdmi-qp";
reg = <0x0 0xfde80000 0x0 0x20000>;
clocks = <&cru PCLK_HDMITX0>,
<&cru CLK_HDMITX0_EARC>,
<&cru CLK_HDMITX0_REF>,
<&cru MCLK_I2S5_8CH_TX>,
<&cru CLK_HDMIHDP0>,
<&cru HCLK_VO1>;
clock-names = "pclk", "earc", "ref", "aud", "hdp", "hclk_vo1";
interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH 0>,
<GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "avp", "cec", "earc", "main", "hpd";
phys = <&hdptxphy_hdmi0>;
power-domains = <&power RK3588_PD_VO1>;
resets = <&cru SRST_HDMITX0_REF>, <&cru SRST_HDMIHDP0>;
reset-names = "ref", "hdp";
rockchip,grf = <&sys_grf>;
rockchip,vo-grf = <&vo1_grf>;
#sound-dai-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
hdmi0_in_vp0: endpoint {
remote-endpoint = <&vp0_out_hdmi0>;
};
};
port@1 {
reg = <1>;
hdmi0_out_con0: endpoint {
remote-endpoint = <&hdmi_con0_in>;
};
};
};
};
};

View File

@ -0,0 +1,92 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/sharp,ls010b7dh04.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sharp Memory LCD panels
maintainers:
- Alex Lanzano <lanzano.alex@gmail.com>
description:
Sharp Memory LCDs are a series of monochrome displays that operate over
a SPI bus. The displays require a signal (VCOM) to be generated to prevent
DC bias build up resulting in pixels being unable to change. Three modes
can be used to provide the VCOM signal ("software", "external", "pwm").
properties:
compatible:
enum:
- sharp,ls010b7dh04
- sharp,ls011b7dh03
- sharp,ls012b7dd01
- sharp,ls013b7dh03
- sharp,ls013b7dh05
- sharp,ls018b7dh02
- sharp,ls027b7dh01
- sharp,ls027b7dh01a
- sharp,ls032b7dd02
- sharp,ls044q7dh01
reg:
maxItems: 1
spi-max-frequency:
maximum: 2000000
sharp,vcom-mode:
$ref: /schemas/types.yaml#/definitions/string
description: |
software - This mode relies on a software operation to send a
"maintain display" message to the display, toggling the vcom
bit on and off with each message
external - This mode relies on an external clock to generate
the signal on the EXTCOMM pin
pwm - This mode relies on a pwm device to generate the signal
on the EXTCOMM pin
enum: [software, external, pwm]
enable-gpios: true
pwms:
maxItems: 1
description: External VCOM signal
required:
- compatible
- reg
- sharp,vcom-mode
allOf:
- $ref: panel/panel-common.yaml#
- $ref: /schemas/spi/spi-peripheral-props.yaml#
- if:
properties:
sharp,vcom-mode:
const: pwm
then:
required:
- pwms
unevaluatedProperties: false
examples:
- |
spi {
#address-cells = <1>;
#size-cells = <0>;
display@0 {
compatible = "sharp,ls013b7dh03";
reg = <0>;
spi-cs-high;
spi-max-frequency = <1000000>;
sharp,vcom-mode = "software";
};
};
...

View File

@ -22,6 +22,8 @@ GPU Driver Documentation
afbc
komeda-kms
panfrost
panthor
zynqmp
.. only:: subproject and html

View File

@ -13,3 +13,6 @@ Kernel clients
.. kernel-doc:: drivers/gpu/drm/drm_client_modeset.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_client_event.c
:export:

View File

@ -110,15 +110,6 @@ fbdev Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
:doc: fbdev helpers
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_dma.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_shmem.c
:export:
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_ttm.c
:export:
.. kernel-doc:: include/drm/drm_fb_helper.h
:internal:

View File

@ -210,4 +210,5 @@ Driver specific implementations
* :ref:`i915-usage-stats`
* :ref:`panfrost-usage-stats`
* :ref:`panthor-usage-stats`
* :ref:`xe-usage-stats`

View File

@ -4,7 +4,7 @@
drm/Panthor CSF driver
=========================
.. _panfrost-usage-stats:
.. _panthor-usage-stats:
Panthor DRM client usage stats implementation
==============================================

View File

@ -0,0 +1,149 @@
.. SPDX-License-Identifier: GPL-2.0+
===============================================
Xilinx ZynqMP Ultrascale+ DisplayPort Subsystem
===============================================
This subsystem handles DisplayPort video and audio output on the ZynqMP. It
supports in-memory framebuffers with the DisplayPort DMA controller
(xilinx-dpdma), as well as "live" video and audio from the programmable logic
(PL). This subsystem can perform several transformations, including color space
conversion, alpha blending, and audio mixing, although not all features are
currently supported.
debugfs
-------
To support debugging and compliance testing, several test modes can be enabled
though debugfs. The following files in /sys/kernel/debug/dri/X/DP-1/test/
control the DisplayPort test modes:
active:
Writing a 1 to this file will activate test mode, and writing a 0 will
deactivate test mode. Writing a 1 or 0 when the test mode is already
active/inactive will re-activate/re-deactivate test mode. When test
mode is inactive, changes made to other files will have no (immediate)
effect, although the settings will be saved for when test mode is
activated. When test mode is active, changes made to other files will
apply immediately.
custom:
Custom test pattern value
downspread:
Enable/disable clock downspreading (spread-spectrum clocking) by
writing 1/0
enhanced:
Enable/disable enhanced framing
ignore_aux_errors:
Ignore AUX errors when set to 1. Writes to this file take effect
immediately (regardless of whether test mode is active) and affect all
AUX transfers.
ignore_hpd:
Ignore hotplug events (such as cable removals or monitor link
retraining requests) when set to 1. Writes to this file take effect
immediately (regardless of whether test mode is active).
laneX_preemphasis:
Preemphasis from 0 (lowest) to 2 (highest) for lane X
laneX_swing:
Voltage swing from 0 (lowest) to 3 (highest) for lane X
lanes:
Number of lanes to use (1, 2, or 4)
pattern:
Test pattern. May be one of:
video
Use regular video input
symbol-error
Symbol error measurement pattern
prbs7
Output of the PRBS7 (x^7 + x^6 + 1) polynomial
80bit-custom
A custom 80-bit pattern
cp2520
HBR2 compliance eye pattern
tps1
Link training symbol pattern TPS1 (/D10.2/)
tps2
Link training symbol pattern TPS2
tps3
Link training symbol pattern TPS3 (for HBR2)
rate:
Rate in hertz. One of
* 5400000000 (HBR2)
* 2700000000 (HBR)
* 1620000000 (RBR)
You can dump the displayport test settings with the following command::
for prop in /sys/kernel/debug/dri/1/DP-1/test/*; do
printf '%-17s ' ${prop##*/}
if [ ${prop##*/} = custom ]; then
hexdump -C $prop | head -1
else
cat $prop
fi
done
The output could look something like::
active 1
custom 00000000 00 00 00 00 00 00 00 00 00 00 |..........|
downspread 0
enhanced 1
ignore_aux_errors 1
ignore_hpd 1
lane0_preemphasis 0
lane0_swing 3
lane1_preemphasis 0
lane1_swing 3
lanes 2
pattern prbs7
rate 1620000000
The recommended test procedure is to connect the board to a monitor,
configure test mode, activate test mode, and then disconnect the cable
and connect it to your test equipment of choice. For example, one
sequence of commands could be::
echo 1 > /sys/kernel/debug/dri/1/DP-1/test/enhanced
echo tps1 > /sys/kernel/debug/dri/1/DP-1/test/pattern
echo 1620000000 > /sys/kernel/debug/dri/1/DP-1/test/rate
echo 1 > /sys/kernel/debug/dri/1/DP-1/test/ignore_aux_errors
echo 1 > /sys/kernel/debug/dri/1/DP-1/test/ignore_hpd
echo 1 > /sys/kernel/debug/dri/1/DP-1/test/active
at which point the cable could be disconnected from the monitor.
Internals
---------
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_disp.h
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_dpsub.h
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_kms.h
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_disp.c
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_dp.c
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_dpsub.c
.. kernel-doc:: drivers/gpu/drm/xlnx/zynqmp_kms.c

View File

@ -7387,6 +7387,12 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml
F: drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
DRM DRIVER FOR SHARP MEMORY LCD
M: Alex Lanzano <lanzano.alex@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/display/sharp,ls010b7dh04.yaml
F: drivers/gpu/drm/tiny/sharp-memory.c
DRM DRIVER FOR SITRONIX ST7586 PANELS
M: David Lechner <david@lechnology.com>
S: Maintained
@ -7832,6 +7838,7 @@ L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: Documentation/devicetree/bindings/display/xlnx/
F: Documentation/gpu/zynqmp.rst
F: drivers/gpu/drm/xlnx/
DRM GPU SCHEDULER

View File

@ -16,3 +16,12 @@ config DRM_ACCEL_IVPU
and Deep Learning applications.
If "M" is selected, the module will be called intel_vpu.
config DRM_ACCEL_IVPU_DEBUG
bool "Intel NPU debug mode"
depends on DRM_ACCEL_IVPU
help
Choose this option to enable additional
debug features for the Intel NPU driver:
- Always print debug messages regardless of dyndbg config,
- Enable unsafe module params.

View File

@ -24,4 +24,6 @@ intel_vpu-$(CONFIG_DEV_COREDUMP) += ivpu_coredump.o
obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o
subdir-ccflags-$(CONFIG_DRM_ACCEL_IVPU_DEBUG) += -DDEBUG
CFLAGS_ivpu_trace_points.o = -I$(src)

View File

@ -43,8 +43,10 @@ module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
int ivpu_test_mode;
#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
#endif
u8 ivpu_pll_min_ratio;
module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
@ -86,7 +88,7 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi
ivpu_cmdq_release_all_locked(file_priv);
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
ivpu_mmu_context_fini(vdev, &file_priv->ctx);
file_priv->bound = false;
drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
}
@ -104,6 +106,8 @@ static void file_priv_release(struct kref *ref)
pm_runtime_get_sync(vdev->drm.dev);
mutex_lock(&vdev->context_list_lock);
file_priv_unbind(vdev, file_priv);
drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
xa_destroy(&file_priv->cmdq_xa);
mutex_unlock(&vdev->context_list_lock);
pm_runtime_put_autosuspend(vdev->drm.dev);
@ -254,14 +258,14 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
goto err_unlock;
}
ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
if (ret)
goto err_xa_erase;
ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
file_priv->default_job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK,
(file_priv->ctx.id - 1));
file_priv->default_job_limit.max = file_priv->default_job_limit.min | IVPU_JOB_ID_JOB_MASK;
file_priv->job_limit = file_priv->default_job_limit;
file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
mutex_unlock(&vdev->context_list_lock);
drm_dev_exit(idx);
@ -273,8 +277,6 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
return 0;
err_xa_erase:
xa_erase_irq(&vdev->context_xa, ctx_id);
err_unlock:
mutex_unlock(&vdev->context_list_lock);
mutex_destroy(&file_priv->ms_lock);
@ -622,9 +624,8 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
INIT_LIST_HEAD(&vdev->bo_list);
vdev->default_db_limit.min = IVPU_MIN_DB;
vdev->default_db_limit.max = IVPU_MAX_DB;
vdev->db_limit = vdev->default_db_limit;
vdev->db_limit.min = IVPU_MIN_DB;
vdev->db_limit.max = IVPU_MAX_DB;
ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
if (ret)
@ -652,9 +653,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
if (ret)
goto err_shutdown;
ret = ivpu_mmu_global_context_init(vdev);
if (ret)
goto err_shutdown;
ivpu_mmu_global_context_init(vdev);
ret = ivpu_mmu_init(vdev);
if (ret)

View File

@ -49,11 +49,11 @@
#define IVPU_JOB_ID_JOB_MASK GENMASK(7, 0)
#define IVPU_JOB_ID_CONTEXT_MASK GENMASK(31, 8)
#define IVPU_NUM_ENGINES 2
#define IVPU_NUM_PRIORITIES 4
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_PRIORITIES)
#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
#define IVPU_CMDQ_MIN_ID 1
#define IVPU_CMDQ_MAX_ID 255
#define IVPU_PLATFORM_SILICON 0
#define IVPU_PLATFORM_SIMICS 2
@ -140,7 +140,7 @@ struct ivpu_device {
struct xarray db_xa;
struct xa_limit db_limit;
struct xa_limit default_db_limit;
u32 db_next;
struct mutex bo_list_lock; /* Protects bo_list */
struct list_head bo_list;
@ -171,13 +171,15 @@ struct ivpu_file_priv {
struct kref ref;
struct ivpu_device *vdev;
struct mutex lock; /* Protects cmdq */
struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
struct xarray cmdq_xa;
struct ivpu_mmu_context ctx;
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
struct list_head ms_instance_list;
struct ivpu_bo *ms_info_bo;
struct xa_limit job_limit;
struct xa_limit default_job_limit;
u32 job_id_next;
struct xa_limit cmdq_limit;
u32 cmdq_id_next;
bool has_mmu_faults;
bool bound;
bool aborted;
@ -195,7 +197,7 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
#define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6)
#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
#define IVPU_TEST_MODE_TURBO BIT(9)
extern int ivpu_test_mode;

View File

@ -46,8 +46,10 @@
#define IVPU_FOCUS_PRESENT_TIMER_MS 1000
static char *ivpu_firmware;
#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
#endif
static struct {
int gen;
@ -582,8 +584,10 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->ipc_payload_area_size = ivpu_bo_size(ipc_mem_rx) / 2;
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);
}
/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;

View File

@ -114,14 +114,14 @@ static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
} else {
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
vdev->hw->ranges.dma = vdev->hw->ranges.user;
}
}

View File

@ -141,16 +141,10 @@ static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config
}
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
if (!tile_disable_check(config)) {
ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config);
return -EIO;
}
if (!tile_disable_check(config))
ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
if (config)
ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1);
else
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM);
ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
*tile_fuse_config = config;
return 0;

View File

@ -35,7 +35,8 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
return 0;
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
@ -45,7 +46,7 @@ static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
return -ENOMEM;
}
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.shave,
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
secondary_size, DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
@ -72,26 +73,6 @@ static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
static int ivpu_id_alloc(struct xarray *xa, u32 *id, void *entry, struct xa_limit *limit,
const struct xa_limit default_limit)
{
int ret;
ret = __xa_alloc(xa, id, entry, *limit, GFP_KERNEL);
if (ret) {
limit->min = default_limit.min;
ret = __xa_alloc(xa, id, entry, *limit, GFP_KERNEL);
if (ret)
return ret;
}
limit->min = *id + 1;
if (limit->min > limit->max)
limit->min = default_limit.min;
return ret;
}
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
@ -102,18 +83,23 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq)
return NULL;
xa_lock(&vdev->db_xa); /* lock here to protect db_limit */
ret = ivpu_id_alloc(&vdev->db_xa, &cmdq->db_id, NULL, &vdev->db_limit,
vdev->default_db_limit);
xa_unlock(&vdev->db_xa);
if (ret) {
ret = xa_alloc_cyclic(&vdev->db_xa, &cmdq->db_id, NULL, vdev->db_limit, &vdev->db_next,
GFP_KERNEL);
if (ret < 0) {
ivpu_err(vdev, "Failed to allocate doorbell id: %d\n", ret);
goto err_free_cmdq;
}
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
if (ret < 0) {
ivpu_err(vdev, "Failed to allocate command queue id: %d\n", ret);
goto err_erase_db_xa;
}
cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!cmdq->mem)
goto err_erase_xa;
goto err_erase_cmdq_xa;
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
if (ret)
@ -121,7 +107,9 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
return cmdq;
err_erase_xa:
err_erase_cmdq_xa:
xa_erase(&file_priv->cmdq_xa, cmdq->id);
err_erase_db_xa:
xa_erase(&vdev->db_xa, cmdq->db_id);
err_free_cmdq:
kfree(cmdq);
@ -145,13 +133,13 @@ static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
struct ivpu_device *vdev = file_priv->vdev;
int ret;
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->id,
task_pid_nr(current), engine,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (ret)
return ret;
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->id,
priority);
if (ret)
return ret;
@ -165,20 +153,21 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
int ret;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
else
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
if (!ret)
ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d\n",
cmdq->db_id, cmdq->id, file_priv->ctx.id);
return ret;
}
static int
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct vpu_job_queue_header *jobq_header;
@ -194,7 +183,7 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
jobq_header = &cmdq->jobq->header;
jobq_header->engine_idx = engine;
jobq_header->engine_idx = VPU_ENGINE_COMPUTE;
jobq_header->head = 0;
jobq_header->tail = 0;
if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
@ -205,7 +194,7 @@ ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 eng
wmb(); /* Flush WC buffer for jobq->header */
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, priority);
if (ret)
return ret;
}
@ -232,9 +221,9 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
cmdq->db_registered = false;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->id);
if (!ret)
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->id);
}
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
@ -244,55 +233,46 @@ static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cm
return 0;
}
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
u8 priority)
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u8 priority)
{
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
int ret;
lockdep_assert_held(&file_priv->lock);
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
if (cmdq->priority == priority)
break;
if (!cmdq) {
cmdq = ivpu_cmdq_alloc(file_priv);
if (!cmdq)
return NULL;
file_priv->cmdq[cmdq_idx] = cmdq;
cmdq->priority = priority;
}
ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
ret = ivpu_cmdq_init(file_priv, cmdq, priority);
if (ret)
return NULL;
return cmdq;
}
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
lockdep_assert_held(&file_priv->lock);
if (cmdq) {
file_priv->cmdq[cmdq_idx] = NULL;
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq) {
xa_erase(&file_priv->cmdq_xa, cmdq_id);
ivpu_cmdq_fini(file_priv, cmdq);
ivpu_cmdq_free(file_priv, cmdq);
}
}
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
lockdep_assert_held(&file_priv->lock);
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
ivpu_cmdq_release_locked(file_priv, engine, priority);
}
/*
* Mark the doorbell as unregistered
* This function needs to be called when the VPU hardware is restarted
@ -301,20 +281,13 @@ void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
*/
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
mutex_lock(&file_priv->lock);
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
if (cmdq)
cmdq->db_registered = false;
}
}
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
cmdq->db_registered = false;
mutex_unlock(&file_priv->lock);
}
@ -334,17 +307,11 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
{
u16 engine;
u8 priority;
struct ivpu_cmdq *cmdq;
unsigned long cmdq_id;
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
if (file_priv->cmdq[cmdq_idx])
ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
}
}
xa_for_each(&file_priv->cmdq_xa, cmdq_id, cmdq)
ivpu_cmdq_fini(file_priv, cmdq);
}
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
@ -369,8 +336,8 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
/* Check if there is space left in job queue */
if (next_entry == header->head) {
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
ivpu_dbg(vdev, JOB, "Job queue full: ctx %d cmdq %d db %d head %d tail %d\n",
job->file_priv->ctx.id, cmdq->id, cmdq->db_id, header->head, tail);
return -EBUSY;
}
@ -381,8 +348,7 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW &&
(unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
if (cmdq->primary_preempt_buf) {
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
@ -557,7 +523,7 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
mutex_lock(&file_priv->lock);
cmdq = ivpu_cmdq_acquire(file_priv, job->engine_idx, priority);
cmdq = ivpu_cmdq_acquire(file_priv, priority);
if (!cmdq) {
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
file_priv->ctx.id, job->engine_idx, priority);
@ -567,9 +533,9 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
xa_lock(&vdev->submitted_jobs_xa);
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
ret = ivpu_id_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, &file_priv->job_limit,
file_priv->default_job_limit);
if (ret) {
ret = __xa_alloc_cyclic(&vdev->submitted_jobs_xa, &job->job_id, job, file_priv->job_limit,
&file_priv->job_id_next, GFP_KERNEL);
if (ret < 0) {
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
file_priv->ctx.id);
ret = -EBUSY;
@ -699,7 +665,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
int idx, ret;
u8 priority;
if (params->engine > DRM_IVPU_ENGINE_COPY)
if (params->engine != DRM_IVPU_ENGINE_COMPUTE)
return -EINVAL;
if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)

View File

@ -28,8 +28,10 @@ struct ivpu_cmdq {
struct ivpu_bo *secondary_preempt_buf;
struct ivpu_bo *mem;
u32 entry_count;
u32 id;
u32 db_id;
bool db_registered;
u8 priority;
};
/**

View File

@ -132,7 +132,7 @@ int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.query_engine_hb.engine_idx = engine;
@ -155,7 +155,7 @@ int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_reset.engine_idx = engine;
@ -174,7 +174,7 @@ int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id
struct vpu_jsm_msg resp;
int ret;
if (engine > VPU_ENGINE_COPY)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.engine_preempt.engine_idx = engine;
@ -346,7 +346,7 @@ int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
struct vpu_jsm_msg resp;
int ret;
if (engine >= VPU_ENGINE_NB)
if (engine != VPU_ENGINE_COMPUTE)
return -EINVAL;
req.payload.hws_resume_engine.engine_idx = engine;

View File

@ -696,7 +696,7 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
return ret;
}
static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
static int ivpu_mmu_cdtab_entry_set(struct ivpu_device *vdev, u32 ssid, u64 cd_dma, bool valid)
{
struct ivpu_mmu_info *mmu = vdev->mmu;
struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
@ -708,30 +708,29 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
return -EINVAL;
entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
drm_WARN_ON(&vdev->drm, (entry[0] & IVPU_MMU_CD_0_V) == valid);
if (cd_dma != 0) {
cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
IVPU_MMU_CD_0_TCR_EPD1 |
IVPU_MMU_CD_0_AA64 |
IVPU_MMU_CD_0_R |
IVPU_MMU_CD_0_ASET |
IVPU_MMU_CD_0_V;
cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
cd[2] = 0;
cd[3] = 0x0000000000007444;
cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, IVPU_MMU_T0SZ_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, IVPU_MMU_IPS_48BIT) |
FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
IVPU_MMU_CD_0_TCR_EPD1 |
IVPU_MMU_CD_0_AA64 |
IVPU_MMU_CD_0_R |
IVPU_MMU_CD_0_ASET;
cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
cd[2] = 0;
cd[3] = 0x0000000000007444;
/* For global context generate memory fault on VPU */
if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
cd[0] |= IVPU_MMU_CD_0_A;
} else {
memset(cd, 0, sizeof(cd));
}
/* For global context generate memory fault on VPU */
if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
cd[0] |= IVPU_MMU_CD_0_A;
if (valid)
cd[0] |= IVPU_MMU_CD_0_V;
WRITE_ONCE(entry[1], cd[1]);
WRITE_ONCE(entry[2], cd[2]);
@ -741,8 +740,8 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
if (!ivpu_is_force_snoop_enabled(vdev))
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
ivpu_dbg(vdev, MMU, "CDTAB set %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
valid ? "valid" : "invalid", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
mutex_lock(&mmu->lock);
if (!mmu->on)
@ -750,38 +749,18 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
if (ret)
goto unlock;
goto err_invalidate;
ret = ivpu_mmu_cmdq_sync(vdev);
if (ret)
goto err_invalidate;
unlock:
mutex_unlock(&mmu->lock);
return ret;
}
static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
{
int ret;
ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
if (ret)
ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
return ret;
}
static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
{
int ret;
if (ssid == 0) {
ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
return -EINVAL;
}
ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
if (ret)
ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
return 0;
err_invalidate:
WRITE_ONCE(entry[0], 0);
mutex_unlock(&mmu->lock);
return ret;
}
@ -808,12 +787,6 @@ int ivpu_mmu_init(struct ivpu_device *vdev)
return ret;
}
ret = ivpu_mmu_cd_add_gbl(vdev);
if (ret) {
ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
return ret;
}
ret = ivpu_mmu_enable(vdev);
if (ret) {
ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
@ -966,12 +939,12 @@ void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
REGV_WR32(IVPU_MMU_REG_GERRORN, gerror_val);
}
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
{
return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
return ivpu_mmu_cdtab_entry_set(vdev, ssid, pgtable->pgd_dma, true);
}
void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid)
{
ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
ivpu_mmu_cdtab_entry_set(vdev, ssid, 0, false);
}

View File

@ -40,8 +40,8 @@ struct ivpu_mmu_info {
int ivpu_mmu_init(struct ivpu_device *vdev);
void ivpu_mmu_disable(struct ivpu_device *vdev);
int ivpu_mmu_enable(struct ivpu_device *vdev);
int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid);
int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid);
int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);

View File

@ -90,19 +90,6 @@ static void ivpu_pgtable_free_page(struct ivpu_device *vdev, u64 *cpu_addr, dma_
}
}
static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
dma_addr_t pgd_dma;
pgtable->pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
if (!pgtable->pgd_dma_ptr)
return -ENOMEM;
pgtable->pgd_dma = pgd_dma;
return 0;
}
static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
int pgd_idx, pud_idx, pmd_idx;
@ -140,6 +127,27 @@ static void ivpu_mmu_pgtables_free(struct ivpu_device *vdev, struct ivpu_mmu_pgt
}
ivpu_pgtable_free_page(vdev, pgtable->pgd_dma_ptr, pgtable->pgd_dma);
pgtable->pgd_dma_ptr = NULL;
pgtable->pgd_dma = 0;
}
static u64*
ivpu_mmu_ensure_pgd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
{
u64 *pgd_dma_ptr = pgtable->pgd_dma_ptr;
dma_addr_t pgd_dma;
if (pgd_dma_ptr)
return pgd_dma_ptr;
pgd_dma_ptr = ivpu_pgtable_alloc_page(vdev, &pgd_dma);
if (!pgd_dma_ptr)
return NULL;
pgtable->pgd_dma_ptr = pgd_dma_ptr;
pgtable->pgd_dma = pgd_dma;
return pgd_dma_ptr;
}
static u64*
@ -237,6 +245,12 @@ ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx
int pmd_idx = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
int pte_idx = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
drm_WARN_ON(&vdev->drm, ctx->id == IVPU_RESERVED_CONTEXT_MMU_SSID);
/* Allocate PGD - first level page table if needed */
if (!ivpu_mmu_ensure_pgd(vdev, &ctx->pgtable))
return -ENOMEM;
/* Allocate PUD - second level page table if needed */
if (!ivpu_mmu_ensure_pud(vdev, &ctx->pgtable, pgd_idx))
return -ENOMEM;
@ -418,6 +432,7 @@ int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
{
size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
int ret;
u64 prot;
@ -448,20 +463,36 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
if (ret) {
ivpu_err(vdev, "Failed to map context pages\n");
mutex_unlock(&ctx->lock);
return ret;
goto err_unmap_pages;
}
vpu_addr += size;
}
if (!ctx->is_cd_valid) {
ret = ivpu_mmu_cd_set(vdev, ctx->id, &ctx->pgtable);
if (ret) {
ivpu_err(vdev, "Failed to set context descriptor for context %u: %d\n",
ctx->id, ret);
goto err_unmap_pages;
}
ctx->is_cd_valid = true;
}
/* Ensure page table modifications are flushed from wc buffers to memory */
wmb();
mutex_unlock(&ctx->lock);
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
if (ret) {
ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
goto err_unmap_pages;
}
mutex_unlock(&ctx->lock);
return 0;
err_unmap_pages:
ivpu_mmu_context_unmap_pages(ctx, start_vpu_addr, vpu_addr - start_vpu_addr);
mutex_unlock(&ctx->lock);
return ret;
}
@ -530,65 +561,75 @@ ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *n
mutex_unlock(&ctx->lock);
}
static int
ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
{
u64 start, end;
int ret;
mutex_init(&ctx->lock);
ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
if (ret) {
ivpu_err(vdev, "Failed to initialize pgtable for ctx %u: %d\n", context_id, ret);
return ret;
}
if (!context_id) {
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.dma.end;
start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
end = max_t(u64, vdev->hw->ranges.user.end, vdev->hw->ranges.dma.end);
}
drm_mm_init(&ctx->mm, start, end - start);
ctx->id = context_id;
return 0;
}
static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
if (drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd_dma_ptr))
return;
if (ctx->is_cd_valid) {
ivpu_mmu_cd_clear(vdev, ctx->id);
ctx->is_cd_valid = false;
}
mutex_destroy(&ctx->lock);
ivpu_mmu_pgtables_free(vdev, &ctx->pgtable);
drm_mm_takedown(&ctx->mm);
ctx->pgtable.pgd_dma_ptr = NULL;
ctx->pgtable.pgd_dma = 0;
}
int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
void ivpu_mmu_global_context_init(struct ivpu_device *vdev)
{
return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
}
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
{
return ivpu_mmu_context_fini(vdev, &vdev->gctx);
ivpu_mmu_context_fini(vdev, &vdev->gctx);
}
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev)
{
return ivpu_mmu_user_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
int ret;
ivpu_mmu_context_init(vdev, &vdev->rctx, IVPU_RESERVED_CONTEXT_MMU_SSID);
mutex_lock(&vdev->rctx.lock);
if (!ivpu_mmu_ensure_pgd(vdev, &vdev->rctx.pgtable)) {
ivpu_err(vdev, "Failed to allocate root page table for reserved context\n");
ret = -ENOMEM;
goto unlock;
}
ret = ivpu_mmu_cd_set(vdev, vdev->rctx.id, &vdev->rctx.pgtable);
if (ret) {
ivpu_err(vdev, "Failed to set context descriptor for reserved context\n");
goto unlock;
}
unlock:
mutex_unlock(&vdev->rctx.lock);
return ret;
}
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev)
{
return ivpu_mmu_user_context_fini(vdev, &vdev->rctx);
ivpu_mmu_cd_clear(vdev, vdev->rctx.id);
ivpu_mmu_context_fini(vdev, &vdev->rctx);
}
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
@ -603,36 +644,3 @@ void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
xa_unlock(&vdev->context_xa);
}
int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
{
int ret;
drm_WARN_ON(&vdev->drm, !ctx_id);
ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
if (ret) {
ivpu_err(vdev, "Failed to initialize context %u: %d\n", ctx_id, ret);
return ret;
}
ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
if (ret) {
ivpu_err(vdev, "Failed to set page table for context %u: %d\n", ctx_id, ret);
goto err_context_fini;
}
return 0;
err_context_fini:
ivpu_mmu_context_fini(vdev, ctx);
return ret;
}
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
{
drm_WARN_ON(&vdev->drm, !ctx->id);
ivpu_mmu_clear_pgtable(vdev, ctx->id);
ivpu_mmu_context_fini(vdev, ctx);
}

View File

@ -23,19 +23,20 @@ struct ivpu_mmu_pgtable {
};
struct ivpu_mmu_context {
struct mutex lock; /* Protects: mm, pgtable */
struct mutex lock; /* Protects: mm, pgtable, is_cd_valid */
struct drm_mm mm;
struct ivpu_mmu_pgtable pgtable;
bool is_cd_valid;
u32 id;
};
int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id);
void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_mmu_global_context_init(struct ivpu_device *vdev);
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,

View File

@ -24,8 +24,10 @@
#include "vpu_boot_api.h"
static bool ivpu_disable_recovery;
#if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
MODULE_PARM_DESC(disable_recovery, "Disables recovery when NPU hang is detected");
#endif
static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);

View File

@ -54,12 +54,12 @@ static void qaicm_wq_release(struct drm_device *dev, void *res)
destroy_workqueue(wq);
}
static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt)
static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
{
struct workqueue_struct *wq;
int ret;
wq = alloc_workqueue(fmt, WQ_UNBOUND, 0);
wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
if (!wq)
return ERR_PTR(-ENOMEM);
ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);

View File

@ -2,6 +2,7 @@
/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */
#include <linux/devcoredump.h>
#include <linux/firmware.h>
#include <linux/limits.h>
#include <linux/mhi.h>
@ -9,6 +10,7 @@
#include <linux/mod_devicetable.h>
#include <linux/overflow.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include "sahara.h"
@ -36,12 +38,14 @@
#define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */
#define SAHARA_TRANSFER_MAX_SIZE 0x80000
#define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */
#define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\
SAHARA_PACKET_MAX_SIZE)
#define SAHARA_IMAGE_ID_NONE U32_MAX
#define SAHARA_VERSION 2
#define SAHARA_SUCCESS 0
#define SAHARA_TABLE_ENTRY_STR_LEN 20
#define SAHARA_MODE_IMAGE_TX_PENDING 0x0
#define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1
@ -53,6 +57,8 @@
#define SAHARA_END_OF_IMAGE_LENGTH 0x10
#define SAHARA_DONE_LENGTH 0x8
#define SAHARA_RESET_LENGTH 0x8
#define SAHARA_MEM_DEBUG64_LENGTH 0x18
#define SAHARA_MEM_READ64_LENGTH 0x18
struct sahara_packet {
__le32 cmd;
@ -80,18 +86,95 @@ struct sahara_packet {
__le32 image;
__le32 status;
} end_of_image;
struct {
__le64 table_address;
__le64 table_length;
} memory_debug64;
struct {
__le64 memory_address;
__le64 memory_length;
} memory_read64;
};
};
struct sahara_debug_table_entry64 {
__le64 type;
__le64 address;
__le64 length;
char description[SAHARA_TABLE_ENTRY_STR_LEN];
char filename[SAHARA_TABLE_ENTRY_STR_LEN];
};
struct sahara_dump_table_entry {
u64 type;
u64 address;
u64 length;
char description[SAHARA_TABLE_ENTRY_STR_LEN];
char filename[SAHARA_TABLE_ENTRY_STR_LEN];
};
#define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef
#define SAHARA_DUMP_V1_VER 1
struct sahara_memory_dump_meta_v1 {
u64 magic;
u64 version;
u64 dump_size;
u64 table_size;
};
/*
* Layout of crashdump provided to user via devcoredump
* +------------------------------------------+
* | Crashdump Meta structure |
* | type: struct sahara_memory_dump_meta_v1 |
* +------------------------------------------+
* | Crashdump Table |
* | type: array of struct |
* | sahara_dump_table_entry |
* | |
* | |
* +------------------------------------------+
* | Crashdump |
* | |
* | |
* | |
* | |
* | |
* +------------------------------------------+
*
* First is the metadata header. Userspace can use the magic number to verify
* the content type, and then check the version for the rest of the format.
* New versions should keep the magic number location/value, and version
* location, but increment the version value.
*
* For v1, the metadata lists the size of the entire dump (header + table +
* dump) and the size of the table. Then the dump image table, which describes
* the contents of the dump. Finally all the images are listed in order, with
* no deadspace in between. Userspace can use the sizes listed in the image
* table to reconstruct the individual images.
*/
struct sahara_context {
struct sahara_packet *tx[SAHARA_NUM_TX_BUF];
struct sahara_packet *rx;
struct work_struct work;
struct work_struct fw_work;
struct work_struct dump_work;
struct mhi_device *mhi_dev;
const char **image_table;
u32 table_size;
u32 active_image_id;
const struct firmware *firmware;
u64 dump_table_address;
u64 dump_table_length;
size_t rx_size;
size_t rx_size_requested;
void *mem_dump;
size_t mem_dump_sz;
struct sahara_dump_table_entry *dump_image;
u64 dump_image_offset;
void *mem_dump_freespace;
u64 dump_images_left;
bool is_mem_dump_mode;
};
static const char *aic100_image_table[] = {
@ -153,6 +236,8 @@ static void sahara_send_reset(struct sahara_context *context)
{
int ret;
context->is_mem_dump_mode = false;
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@ -186,7 +271,8 @@ static void sahara_hello(struct sahara_context *context)
}
if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING &&
le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE) {
le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE &&
le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) {
dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n",
le32_to_cpu(context->rx->hello.mode));
return;
@ -320,9 +406,70 @@ static void sahara_end_of_image(struct sahara_context *context)
dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret);
}
static void sahara_memory_debug64(struct sahara_context *context)
{
int ret;
dev_dbg(&context->mhi_dev->dev,
"MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n",
le32_to_cpu(context->rx->length),
le64_to_cpu(context->rx->memory_debug64.table_address),
le64_to_cpu(context->rx->memory_debug64.table_length));
if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) {
dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n",
le32_to_cpu(context->rx->length));
return;
}
context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address);
context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length);
if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 ||
!context->dump_table_length) {
dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n",
context->dump_table_length);
return;
}
/*
* From this point, the protocol flips. We make memory_read requests to
* the device, and the device responds with the raw data. If the device
* has an error, it will send an End of Image command. First we need to
* request the memory dump table so that we know where all the pieces
* of the dump are that we can consume.
*/
context->is_mem_dump_mode = true;
/*
* Assume that the table is smaller than our MTU so that we can read it
* in one shot. The spec does not put an upper limit on the table, but
* no known device will exceed this.
*/
if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) {
dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n",
context->dump_table_length);
sahara_send_reset(context);
return;
}
context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address);
context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length);
context->rx_size_requested = context->dump_table_length;
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
SAHARA_MEM_READ64_LENGTH, MHI_EOT);
if (ret)
dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret);
}
static void sahara_processing(struct work_struct *work)
{
struct sahara_context *context = container_of(work, struct sahara_context, work);
struct sahara_context *context = container_of(work, struct sahara_context, fw_work);
int ret;
switch (le32_to_cpu(context->rx->cmd)) {
@ -338,6 +485,12 @@ static void sahara_processing(struct work_struct *work)
case SAHARA_DONE_RESP_CMD:
/* Intentional do nothing as we don't need to exit an app */
break;
case SAHARA_RESET_RESP_CMD:
/* Intentional do nothing as we don't need to exit an app */
break;
case SAHARA_MEM_DEBUG64_CMD:
sahara_memory_debug64(context);
break;
default:
dev_err(&context->mhi_dev->dev, "Unknown command %d\n",
le32_to_cpu(context->rx->cmd));
@ -350,6 +503,217 @@ static void sahara_processing(struct work_struct *work)
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
}
static void sahara_parse_dump_table(struct sahara_context *context)
{
struct sahara_dump_table_entry *image_out_table;
struct sahara_debug_table_entry64 *dev_table;
struct sahara_memory_dump_meta_v1 *dump_meta;
u64 table_nents;
u64 dump_length;
int ret;
u64 i;
table_nents = context->dump_table_length / sizeof(*dev_table);
context->dump_images_left = table_nents;
dump_length = 0;
dev_table = (struct sahara_debug_table_entry64 *)(context->rx);
for (i = 0; i < table_nents; ++i) {
/* Do not trust the device, ensure the strings are terminated */
dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
if (dump_length == SIZE_MAX) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
dev_dbg(&context->mhi_dev->dev,
"Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n",
i,
le64_to_cpu(dev_table[i].type),
le64_to_cpu(dev_table[i].address),
le64_to_cpu(dev_table[i].length),
dev_table[i].description,
dev_table[i].filename);
}
dump_length = size_add(dump_length, sizeof(*dump_meta));
if (dump_length == SIZE_MAX) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
if (dump_length == SIZE_MAX) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
context->mem_dump_sz = dump_length;
context->mem_dump = vzalloc(dump_length);
if (!context->mem_dump) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
/* Populate the dump metadata and table for userspace */
dump_meta = context->mem_dump;
dump_meta->magic = SAHARA_DUMP_V1_MAGIC;
dump_meta->version = SAHARA_DUMP_V1_VER;
dump_meta->dump_size = dump_length;
dump_meta->table_size = context->dump_table_length;
image_out_table = context->mem_dump + sizeof(*dump_meta);
for (i = 0; i < table_nents; ++i) {
image_out_table[i].type = le64_to_cpu(dev_table[i].type);
image_out_table[i].address = le64_to_cpu(dev_table[i].address);
image_out_table[i].length = le64_to_cpu(dev_table[i].length);
strscpy(image_out_table[i].description, dev_table[i].description,
SAHARA_TABLE_ENTRY_STR_LEN);
strscpy(image_out_table[i].filename,
dev_table[i].filename,
SAHARA_TABLE_ENTRY_STR_LEN);
}
context->mem_dump_freespace = &image_out_table[i];
/* Done parsing the table, switch to image dump mode */
context->dump_table_length = 0;
/* Request the first chunk of the first image */
context->dump_image = &image_out_table[0];
dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
context->dump_image_offset = dump_length;
context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address);
context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
context->rx_size_requested = dump_length;
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
SAHARA_MEM_READ64_LENGTH, MHI_EOT);
if (ret)
dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret);
}
static void sahara_parse_dump_image(struct sahara_context *context)
{
u64 dump_length;
int ret;
memcpy(context->mem_dump_freespace, context->rx, context->rx_size);
context->mem_dump_freespace += context->rx_size;
if (context->dump_image_offset >= context->dump_image->length) {
/* Need to move to next image */
context->dump_image++;
context->dump_images_left--;
context->dump_image_offset = 0;
if (!context->dump_images_left) {
/* Dump done */
dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev,
context->mem_dump,
context->mem_dump_sz,
GFP_KERNEL);
context->mem_dump = NULL;
sahara_send_reset(context);
return;
}
}
/* Get next image chunk */
dump_length = context->dump_image->length - context->dump_image_offset;
dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH);
context->tx[0]->memory_read64.memory_address =
cpu_to_le64(context->dump_image->address + context->dump_image_offset);
context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length);
context->dump_image_offset += dump_length;
context->rx_size_requested = dump_length;
ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0],
SAHARA_MEM_READ64_LENGTH, MHI_EOT);
if (ret)
dev_err(&context->mhi_dev->dev,
"Unable to send read for dump content %d\n", ret);
}
static void sahara_dump_processing(struct work_struct *work)
{
struct sahara_context *context = container_of(work, struct sahara_context, dump_work);
int ret;
/*
* We should get the expected raw data, but if the device has an error
* it is supposed to send EOI with an error code.
*/
if (context->rx_size != context->rx_size_requested &&
context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) {
dev_err(&context->mhi_dev->dev,
"Unexpected response to read_data. Expected size: %#zx got: %#zx\n",
context->rx_size_requested,
context->rx_size);
goto error;
}
if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) {
dev_err(&context->mhi_dev->dev,
"Unexpected EOI response to read_data. Status: %d\n",
le32_to_cpu(context->rx->end_of_image.status));
goto error;
}
if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH &&
le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) {
dev_err(&context->mhi_dev->dev,
"Invalid EOI response to read_data. CMD: %d\n",
le32_to_cpu(context->rx->cmd));
goto error;
}
/*
* Need to know if we received the dump table, or part of a dump image.
* Since we get raw data, we cannot tell from the data itself. Instead,
* we use the stored dump_table_length, which we zero after we read and
* process the entire table.
*/
if (context->dump_table_length)
sahara_parse_dump_table(context);
else
sahara_parse_dump_image(context);
ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx,
SAHARA_PACKET_MAX_SIZE, MHI_EOT);
if (ret)
dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret);
return;
error:
vfree(context->mem_dump);
context->mem_dump = NULL;
sahara_send_reset(context);
}
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@ -382,7 +746,8 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
}
context->mhi_dev = mhi_dev;
INIT_WORK(&context->work, sahara_processing);
INIT_WORK(&context->fw_work, sahara_processing);
INIT_WORK(&context->dump_work, sahara_dump_processing);
context->image_table = aic100_image_table;
context->table_size = ARRAY_SIZE(aic100_image_table);
context->active_image_id = SAHARA_IMAGE_ID_NONE;
@ -405,7 +770,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
cancel_work_sync(&context->work);
cancel_work_sync(&context->fw_work);
cancel_work_sync(&context->dump_work);
if (context->mem_dump)
vfree(context->mem_dump);
sahara_release_image(context);
mhi_unprepare_from_transfer(mhi_dev);
}
@ -418,8 +786,14 @@ static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result
{
struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
if (!mhi_result->transaction_status)
schedule_work(&context->work);
if (!mhi_result->transaction_status) {
context->rx_size = mhi_result->bytes_xferd;
if (context->is_mem_dump_mode)
schedule_work(&context->dump_work);
else
schedule_work(&context->fw_work);
}
}
static const struct mhi_device_id sahara_mhi_match_table[] = {

View File

@ -9,9 +9,6 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select HDMI
select I2C
select DMA_SHARED_BUFFER
@ -211,10 +208,28 @@ config DRM_DEBUG_MODESET_LOCK
If in doubt, say "N".
config DRM_CLIENT_SELECTION
config DRM_CLIENT
bool
depends on DRM
select DRM_CLIENT_SETUP if DRM_FBDEV_EMULATION
help
Enables support for DRM clients. DRM drivers that need
struct drm_client_dev and its interfaces should select this
option. Drivers that support the default clients should
select DRM_CLIENT_SELECTION instead.
config DRM_CLIENT_LIB
tristate
depends on DRM
select DRM_KMS_HELPER if DRM_FBDEV_EMULATION
select FB_CORE if DRM_FBDEV_EMULATION
help
This option enables the DRM client library and selects all
modules and components according to the enabled clients.
config DRM_CLIENT_SELECTION
tristate
depends on DRM
select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
help
Drivers that support in-kernel DRM clients have to select this
option.
@ -222,10 +237,18 @@ config DRM_CLIENT_SELECTION
config DRM_CLIENT_SETUP
bool
depends on DRM_CLIENT_SELECTION
help
Enables the DRM client selection. DRM drivers that support the
default clients should select DRM_CLIENT_SELECTION instead.
menu "Supported DRM clients"
depends on DRM_CLIENT_SELECTION
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
depends on DRM_CLIENT_SELECTION
select DRM_CLIENT
select DRM_CLIENT_SETUP
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default FB
help
@ -264,6 +287,8 @@ config DRM_FBDEV_LEAK_PHYS_SMEM
If in doubt, say "N" or spread the word to your closed source
library vendor.
endmenu
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM
@ -333,19 +358,21 @@ config DRM_TTM_HELPER
tristate
depends on DRM
select DRM_TTM
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Helpers for ttm-based gem objects
config DRM_GEM_DMA_HELPER
tristate
depends on DRM
select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM DMA helper functions
config DRM_GEM_SHMEM_HELPER
tristate
depends on DRM && MMU
select FB_SYSMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
help
Choose this if you need the GEM shmem helper functions

View File

@ -40,8 +40,6 @@ drm-y := \
drm_blend.o \
drm_bridge.o \
drm_cache.o \
drm_client.o \
drm_client_modeset.o \
drm_color_mgmt.o \
drm_connector.o \
drm_crtc.o \
@ -75,6 +73,10 @@ drm-y := \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
drm-$(CONFIG_DRM_CLIENT) += \
drm_client.o \
drm_client_event.o \
drm_client_modeset.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@ -142,14 +144,18 @@ drm_kms_helper-y := \
drm_probe_helper.o \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_CLIENT_SETUP) += \
drm_client_setup.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \
drm_fbdev_client.o \
drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
#
# DRM clients
#
drm_client_lib-y := drm_client_setup.o
drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o
#
# Drivers and the rest
#

View File

@ -5,6 +5,7 @@ config DRM_AMDGPU
depends on DRM && PCI && MMU
depends on !UML
select FW_LOADER
select DRM_CLIENT
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_DSC_HELPER

View File

@ -38,8 +38,8 @@
#include <linux/apple-gmux.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include <linux/device.h>
@ -4793,13 +4793,13 @@ int amdgpu_device_prepare(struct drm_device *dev)
* amdgpu_device_suspend - initiate device suspend
*
* @dev: drm dev pointer
* @fbcon : notify the fbdev of suspend
* @notify_clients: notify in-kernel DRM clients
*
* Puts the hw in the suspend state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver suspend.
*/
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
@ -4819,8 +4819,8 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
DRM_WARN("smart shift update failed\n");
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
if (notify_clients)
drm_client_dev_suspend(adev_to_drm(adev), false);
cancel_delayed_work_sync(&adev->delayed_init_work);
@ -4855,13 +4855,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
* amdgpu_device_resume - initiate device resume
*
* @dev: drm dev pointer
* @fbcon : notify the fbdev of resume
* @notify_clients: notify in-kernel DRM clients
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
@ -4917,8 +4917,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
if (notify_clients)
drm_client_dev_resume(adev_to_drm(adev), false);
amdgpu_ras_resume(adev);
@ -5482,7 +5482,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (r)
goto out;
drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false);
drm_client_dev_resume(adev_to_drm(tmp_adev), false);
/*
* The GPU enters bad state once faulty pages
@ -5842,7 +5842,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, true);
drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
/* disable ras on ALL IPs */
if (!need_emergency_restart &&

View File

@ -120,6 +120,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
/* passthrough data, allow everything */
data->bridge.interlace_allowed = true;
data->bridge.ycbcr_420_allowed = true;
return devm_drm_bridge_add(data->dev, &data->bridge);
}

View File

@ -180,6 +180,10 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
/* passthrough data, allow everything */
data->bridge.interlace_allowed = true;
data->bridge.ycbcr_420_allowed = true;
auxiliary_set_drvdata(auxdev, data);
return devm_drm_bridge_add(data->dev, &data->bridge);

View File

@ -270,6 +270,10 @@ static int display_connector_probe(struct platform_device *pdev)
/* All the supported connector types support interlaced modes. */
conn->bridge.interlace_allowed = true;
if (type == DRM_MODE_CONNECTOR_HDMIA ||
type == DRM_MODE_CONNECTOR_DisplayPort)
conn->bridge.ycbcr_420_allowed = true;
/* Get the optional connector label. */
of_property_read_string(pdev->dev.of_node, "label", &label);

View File

@ -85,3 +85,4 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
EXPORT_SYMBOL_GPL(devm_imx_drm_legacy_bridge);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Freescale i.MX DRM bridge driver for legacy DT bindings");

View File

@ -770,8 +770,6 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&ctx->lock);
hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe);
ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector,
adjusted_mode);
if (ret) {

View File

@ -180,6 +180,8 @@ struct sii902x {
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
bool sink_is_hdmi;
u32 bus_width;
/*
* Mutex protects audio and video functions from interfering
* each other, by keeping their i2c command sequences atomic.
@ -477,6 +479,8 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
u32 *input_fmts;
*num_input_fmts = 0;
@ -485,7 +489,20 @@ static u32 *sii902x_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
if (!input_fmts)
return NULL;
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
switch (sii902x->bus_width) {
case 16:
input_fmts[0] = MEDIA_BUS_FMT_RGB565_1X16;
break;
case 18:
input_fmts[0] = MEDIA_BUS_FMT_RGB666_1X18;
break;
case 24:
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
break;
default:
return NULL;
}
*num_input_fmts = 1;
return input_fmts;
@ -1167,6 +1184,11 @@ static int sii902x_probe(struct i2c_client *client)
return PTR_ERR(sii902x->reset_gpio);
}
sii902x->bus_width = 24;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
if (endpoint)
of_property_read_u32(endpoint, "bus-width", &sii902x->bus_width);
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 1, -1);
if (endpoint) {
struct device_node *remote = of_graph_get_remote_port_parent(endpoint);

View File

@ -46,6 +46,14 @@ config DRM_DW_HDMI_CEC
Support the CE interface which is part of the Synopsys
Designware HDMI block.
config DRM_DW_HDMI_QP
tristate
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
select REGMAP_MMIO
config DRM_DW_MIPI_DSI
tristate
select DRM_KMS_HELPER

View File

@ -5,4 +5,6 @@ obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o
obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o

View File

@ -0,0 +1,647 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd.
* Copyright (c) 2024 Collabora Ltd.
*
* Author: Algea Cao <algea.cao@rock-chips.com>
* Author: Cristian Ciocaltea <cristian.ciocaltea@collabora.com>
*/
#include <linux/completion.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/workqueue.h>
#include <drm/bridge/dw_hdmi_qp.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <sound/hdmi-codec.h>
#include "dw-hdmi-qp.h"
#define DDC_CI_ADDR 0x37
#define DDC_SEGMENT_ADDR 0x30
#define HDMI14_MAX_TMDSCLK 340000000
#define SCRAMB_POLL_DELAY_MS 3000
struct dw_hdmi_qp_i2c {
struct i2c_adapter adap;
struct mutex lock; /* used to serialize data transfers */
struct completion cmp;
u8 stat;
u8 slave_reg;
bool is_regaddr;
bool is_segment;
};
struct dw_hdmi_qp {
struct drm_bridge bridge;
struct device *dev;
struct dw_hdmi_qp_i2c *i2c;
struct {
const struct dw_hdmi_qp_phy_ops *ops;
void *data;
} phy;
struct regmap *regm;
};
static void dw_hdmi_qp_write(struct dw_hdmi_qp *hdmi, unsigned int val,
int offset)
{
regmap_write(hdmi->regm, offset, val);
}
static unsigned int dw_hdmi_qp_read(struct dw_hdmi_qp *hdmi, int offset)
{
unsigned int val = 0;
regmap_read(hdmi->regm, offset, &val);
return val;
}
static void dw_hdmi_qp_mod(struct dw_hdmi_qp *hdmi, unsigned int data,
unsigned int mask, unsigned int reg)
{
regmap_update_bits(hdmi->regm, reg, mask, data);
}
static int dw_hdmi_qp_i2c_read(struct dw_hdmi_qp *hdmi,
unsigned char *buf, unsigned int length)
{
struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
int stat;
if (!i2c->is_regaddr) {
dev_dbg(hdmi->dev, "set read register address to 0\n");
i2c->slave_reg = 0x00;
i2c->is_regaddr = true;
}
while (length--) {
reinit_completion(&i2c->cmp);
dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR,
I2CM_INTERFACE_CONTROL0);
if (i2c->is_segment)
dw_hdmi_qp_mod(hdmi, I2CM_EXT_READ, I2CM_WR_MASK,
I2CM_INTERFACE_CONTROL0);
else
dw_hdmi_qp_mod(hdmi, I2CM_FM_READ, I2CM_WR_MASK,
I2CM_INTERFACE_CONTROL0);
stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
if (!stat) {
dev_err(hdmi->dev, "i2c read timed out\n");
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
return -EAGAIN;
}
/* Check for error condition on the bus */
if (i2c->stat & I2CM_NACK_RCVD_IRQ) {
dev_err(hdmi->dev, "i2c read error\n");
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
return -EIO;
}
*buf++ = dw_hdmi_qp_read(hdmi, I2CM_INTERFACE_RDDATA_0_3) & 0xff;
dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0);
}
i2c->is_segment = false;
return 0;
}
static int dw_hdmi_qp_i2c_write(struct dw_hdmi_qp *hdmi,
unsigned char *buf, unsigned int length)
{
struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
int stat;
if (!i2c->is_regaddr) {
/* Use the first write byte as register address */
i2c->slave_reg = buf[0];
length--;
buf++;
i2c->is_regaddr = true;
}
while (length--) {
reinit_completion(&i2c->cmp);
dw_hdmi_qp_write(hdmi, *buf++, I2CM_INTERFACE_WRDATA_0_3);
dw_hdmi_qp_mod(hdmi, i2c->slave_reg++ << 12, I2CM_ADDR,
I2CM_INTERFACE_CONTROL0);
dw_hdmi_qp_mod(hdmi, I2CM_FM_WRITE, I2CM_WR_MASK,
I2CM_INTERFACE_CONTROL0);
stat = wait_for_completion_timeout(&i2c->cmp, HZ / 10);
if (!stat) {
dev_err(hdmi->dev, "i2c write time out!\n");
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
return -EAGAIN;
}
/* Check for error condition on the bus */
if (i2c->stat & I2CM_NACK_RCVD_IRQ) {
dev_err(hdmi->dev, "i2c write nack!\n");
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
return -EIO;
}
dw_hdmi_qp_mod(hdmi, 0, I2CM_WR_MASK, I2CM_INTERFACE_CONTROL0);
}
return 0;
}
static int dw_hdmi_qp_i2c_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs, int num)
{
struct dw_hdmi_qp *hdmi = i2c_get_adapdata(adap);
struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
u8 addr = msgs[0].addr;
int i, ret = 0;
if (addr == DDC_CI_ADDR)
/*
* The internal I2C controller does not support the multi-byte
* read and write operations needed for DDC/CI.
* FIXME: Blacklist the DDC/CI address until we filter out
* unsupported I2C operations.
*/
return -EOPNOTSUPP;
for (i = 0; i < num; i++) {
if (msgs[i].len == 0) {
dev_err(hdmi->dev,
"unsupported transfer %d/%d, no data\n",
i + 1, num);
return -EOPNOTSUPP;
}
}
guard(mutex)(&i2c->lock);
/* Unmute DONE and ERROR interrupts */
dw_hdmi_qp_mod(hdmi, I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N,
I2CM_NACK_RCVD_MASK_N | I2CM_OP_DONE_MASK_N,
MAINUNIT_1_INT_MASK_N);
/* Set slave device address taken from the first I2C message */
if (addr == DDC_SEGMENT_ADDR && msgs[0].len == 1)
addr = DDC_ADDR;
dw_hdmi_qp_mod(hdmi, addr << 5, I2CM_SLVADDR, I2CM_INTERFACE_CONTROL0);
/* Set slave device register address on transfer */
i2c->is_regaddr = false;
/* Set segment pointer for I2C extended read mode operation */
i2c->is_segment = false;
for (i = 0; i < num; i++) {
if (msgs[i].addr == DDC_SEGMENT_ADDR && msgs[i].len == 1) {
i2c->is_segment = true;
dw_hdmi_qp_mod(hdmi, DDC_SEGMENT_ADDR, I2CM_SEG_ADDR,
I2CM_INTERFACE_CONTROL1);
dw_hdmi_qp_mod(hdmi, *msgs[i].buf << 7, I2CM_SEG_PTR,
I2CM_INTERFACE_CONTROL1);
} else {
if (msgs[i].flags & I2C_M_RD)
ret = dw_hdmi_qp_i2c_read(hdmi, msgs[i].buf,
msgs[i].len);
else
ret = dw_hdmi_qp_i2c_write(hdmi, msgs[i].buf,
msgs[i].len);
}
if (ret < 0)
break;
}
if (!ret)
ret = num;
/* Mute DONE and ERROR interrupts */
dw_hdmi_qp_mod(hdmi, 0, I2CM_OP_DONE_MASK_N | I2CM_NACK_RCVD_MASK_N,
MAINUNIT_1_INT_MASK_N);
return ret;
}
static u32 dw_hdmi_qp_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm dw_hdmi_qp_algorithm = {
.master_xfer = dw_hdmi_qp_i2c_xfer,
.functionality = dw_hdmi_qp_i2c_func,
};
static struct i2c_adapter *dw_hdmi_qp_i2c_adapter(struct dw_hdmi_qp *hdmi)
{
struct dw_hdmi_qp_i2c *i2c;
struct i2c_adapter *adap;
int ret;
i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return ERR_PTR(-ENOMEM);
mutex_init(&i2c->lock);
init_completion(&i2c->cmp);
adap = &i2c->adap;
adap->owner = THIS_MODULE;
adap->dev.parent = hdmi->dev;
adap->algo = &dw_hdmi_qp_algorithm;
strscpy(adap->name, "DesignWare HDMI QP", sizeof(adap->name));
i2c_set_adapdata(adap, hdmi);
ret = devm_i2c_add_adapter(hdmi->dev, adap);
if (ret) {
dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name);
devm_kfree(hdmi->dev, i2c);
return ERR_PTR(ret);
}
hdmi->i2c = i2c;
dev_info(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
return adap;
}
static int dw_hdmi_qp_config_avi_infoframe(struct dw_hdmi_qp *hdmi,
const u8 *buffer, size_t len)
{
u32 val, i, j;
if (len != HDMI_INFOFRAME_SIZE(AVI)) {
dev_err(hdmi->dev, "failed to configure avi infoframe\n");
return -EINVAL;
}
/*
* DW HDMI QP IP uses a different byte format from standard AVI info
* frames, though generally the bits are in the correct bytes.
*/
val = buffer[1] << 8 | buffer[2] << 16;
dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS0);
for (i = 0; i < 4; i++) {
for (j = 0; j < 4; j++) {
if (i * 4 + j >= 14)
break;
if (!j)
val = buffer[i * 4 + j + 3];
val |= buffer[i * 4 + j + 3] << (8 * j);
}
dw_hdmi_qp_write(hdmi, val, PKT_AVI_CONTENTS1 + i * 4);
}
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN,
PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN, PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_config_drm_infoframe(struct dw_hdmi_qp *hdmi,
const u8 *buffer, size_t len)
{
u32 val, i;
if (len != HDMI_INFOFRAME_SIZE(DRM)) {
dev_err(hdmi->dev, "failed to configure drm infoframe\n");
return -EINVAL;
}
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
val = buffer[1] << 8 | buffer[2] << 16;
dw_hdmi_qp_write(hdmi, val, PKT_DRMI_CONTENTS0);
for (i = 0; i <= buffer[2]; i++) {
if (i % 4 == 0)
val = buffer[3 + i];
val |= buffer[3 + i] << ((i % 4) * 8);
if ((i % 4 == 3) || i == buffer[2])
dw_hdmi_qp_write(hdmi, val,
PKT_DRMI_CONTENTS1 + ((i / 4) * 4));
}
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_FIELDRATE, PKTSCHED_PKT_CONFIG1);
dw_hdmi_qp_mod(hdmi, PKTSCHED_DRMI_TX_EN, PKTSCHED_DRMI_TX_EN,
PKTSCHED_PKT_EN);
return 0;
}
static int dw_hdmi_qp_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
int ret;
ret = drm_atomic_helper_connector_hdmi_check(conn_state->connector,
conn_state->state);
if (ret)
dev_dbg(hdmi->dev, "%s failed: %d\n", __func__, ret);
return ret;
}
static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
struct drm_atomic_state *state = old_state->base.state;
struct drm_connector_state *conn_state;
struct drm_connector *connector;
unsigned int op_mode;
connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
if (WARN_ON(!connector))
return;
conn_state = drm_atomic_get_new_connector_state(state, connector);
if (WARN_ON(!conn_state))
return;
if (connector->display_info.is_hdmi) {
dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
__func__, conn_state->hdmi.tmds_char_rate);
op_mode = 0;
} else {
dev_dbg(hdmi->dev, "%s mode=DVI\n", __func__);
op_mode = OPMODE_DVI;
}
hdmi->phy.ops->init(hdmi, hdmi->phy.data);
dw_hdmi_qp_mod(hdmi, HDCP2_BYPASS, HDCP2_BYPASS, HDCP2LOGIC_CONFIG0);
dw_hdmi_qp_mod(hdmi, op_mode, OPMODE_DVI, LINK_CONFIG0);
drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
}
static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
hdmi->phy.ops->disable(hdmi, hdmi->phy.data);
}
static enum drm_connector_status
dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
return hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data);
}
static const struct drm_edid *
dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge,
struct drm_connector *connector)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
const struct drm_edid *drm_edid;
drm_edid = drm_edid_read_ddc(connector, bridge->ddc);
if (!drm_edid)
dev_dbg(hdmi->dev, "failed to get edid\n");
return drm_edid;
}
static enum drm_mode_status
dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
unsigned long long rate;
rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
if (rate > HDMI14_MAX_TMDSCLK) {
dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock);
return MODE_CLOCK_HIGH;
}
return MODE_OK;
}
static int dw_hdmi_qp_bridge_clear_infoframe(struct drm_bridge *bridge,
enum hdmi_infoframe_type type)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_AVI_TX_EN | PKTSCHED_GCP_TX_EN,
PKTSCHED_PKT_EN);
break;
case HDMI_INFOFRAME_TYPE_DRM:
dw_hdmi_qp_mod(hdmi, 0, PKTSCHED_DRMI_TX_EN, PKTSCHED_PKT_EN);
break;
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
}
return 0;
}
static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
enum hdmi_infoframe_type type,
const u8 *buffer, size_t len)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
dw_hdmi_qp_bridge_clear_infoframe(bridge, type);
switch (type) {
case HDMI_INFOFRAME_TYPE_AVI:
return dw_hdmi_qp_config_avi_infoframe(hdmi, buffer, len);
case HDMI_INFOFRAME_TYPE_DRM:
return dw_hdmi_qp_config_drm_infoframe(hdmi, buffer, len);
default:
dev_dbg(hdmi->dev, "Unsupported infoframe type %x\n", type);
return 0;
}
}
static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_check = dw_hdmi_qp_bridge_atomic_check,
.atomic_enable = dw_hdmi_qp_bridge_atomic_enable,
.atomic_disable = dw_hdmi_qp_bridge_atomic_disable,
.detect = dw_hdmi_qp_bridge_detect,
.edid_read = dw_hdmi_qp_bridge_edid_read,
.mode_valid = dw_hdmi_qp_bridge_mode_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
};
static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
{
struct dw_hdmi_qp *hdmi = dev_id;
struct dw_hdmi_qp_i2c *i2c = hdmi->i2c;
u32 stat;
stat = dw_hdmi_qp_read(hdmi, MAINUNIT_1_INT_STATUS);
i2c->stat = stat & (I2CM_OP_DONE_IRQ | I2CM_READ_REQUEST_IRQ |
I2CM_NACK_RCVD_IRQ);
if (i2c->stat) {
dw_hdmi_qp_write(hdmi, i2c->stat, MAINUNIT_1_INT_CLEAR);
complete(&i2c->cmp);
}
if (stat)
return IRQ_HANDLED;
return IRQ_NONE;
}
static const struct regmap_config dw_hdmi_qp_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = EARCRX_1_INT_FORCE,
};
static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N);
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N);
dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0);
/* Software reset */
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0);
dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0);
/* Clear DONE and ERROR interrupts */
dw_hdmi_qp_write(hdmi, I2CM_OP_DONE_CLEAR | I2CM_NACK_RCVD_CLEAR,
MAINUNIT_1_INT_CLEAR);
if (hdmi->phy.ops->setup_hpd)
hdmi->phy.ops->setup_hpd(hdmi, hdmi->phy.data);
}
struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
struct drm_encoder *encoder,
const struct dw_hdmi_qp_plat_data *plat_data)
{
struct device *dev = &pdev->dev;
struct dw_hdmi_qp *hdmi;
void __iomem *regs;
int ret;
if (!plat_data->phy_ops || !plat_data->phy_ops->init ||
!plat_data->phy_ops->disable || !plat_data->phy_ops->read_hpd) {
dev_err(dev, "Missing platform PHY ops\n");
return ERR_PTR(-ENODEV);
}
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return ERR_PTR(-ENOMEM);
hdmi->dev = dev;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return ERR_CAST(regs);
hdmi->regm = devm_regmap_init_mmio(dev, regs, &dw_hdmi_qp_regmap_config);
if (IS_ERR(hdmi->regm)) {
dev_err(dev, "Failed to configure regmap\n");
return ERR_CAST(hdmi->regm);
}
hdmi->phy.ops = plat_data->phy_ops;
hdmi->phy.data = plat_data->phy_data;
dw_hdmi_qp_init_hw(hdmi);
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
dw_hdmi_qp_main_hardirq, NULL,
IRQF_SHARED, dev_name(dev), hdmi);
if (ret)
return ERR_PTR(ret);
hdmi->bridge.driver_private = hdmi;
hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
DRM_BRIDGE_OP_HPD;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
hdmi->bridge.vendor = "Synopsys";
hdmi->bridge.product = "DW HDMI QP TX";
hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi);
if (IS_ERR(hdmi->bridge.ddc))
return ERR_CAST(hdmi->bridge.ddc);
ret = devm_drm_bridge_add(dev, &hdmi->bridge);
if (ret)
return ERR_PTR(ret);
ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
return ERR_PTR(ret);
return hdmi;
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_bind);
void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_init_hw(hdmi);
}
EXPORT_SYMBOL_GPL(dw_hdmi_qp_resume);
MODULE_AUTHOR("Algea Cao <algea.cao@rock-chips.com>");
MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@collabora.com>");
MODULE_DESCRIPTION("DW HDMI QP transmitter library");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,834 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Rockchip Electronics Co.Ltd
* Author:
* Algea Cao <algea.cao@rock-chips.com>
*/
#ifndef __DW_HDMI_QP_H__
#define __DW_HDMI_QP_H__
#include <linux/bits.h>
/* Main Unit Registers */
#define CORE_ID 0x0
#define VER_NUMBER 0x4
#define VER_TYPE 0x8
#define CONFIG_REG 0xc
#define CONFIG_CEC BIT(28)
#define CONFIG_AUD_UD BIT(23)
#define CORE_TIMESTAMP_HHMM 0x14
#define CORE_TIMESTAMP_MMDD 0x18
#define CORE_TIMESTAMP_YYYY 0x1c
/* Reset Manager Registers */
#define GLOBAL_SWRESET_REQUEST 0x40
#define EARCRX_CMDC_SWINIT_P BIT(27)
#define AVP_DATAPATH_PACKET_AUDIO_SWINIT_P BIT(10)
#define GLOBAL_SWDISABLE 0x44
#define CEC_SWDISABLE BIT(17)
#define AVP_DATAPATH_PACKET_AUDIO_SWDISABLE BIT(10)
#define AVP_DATAPATH_VIDEO_SWDISABLE BIT(6)
#define RESET_MANAGER_CONFIG0 0x48
#define RESET_MANAGER_STATUS0 0x50
#define RESET_MANAGER_STATUS1 0x54
#define RESET_MANAGER_STATUS2 0x58
/* Timer Base Registers */
#define TIMER_BASE_CONFIG0 0x80
#define TIMER_BASE_STATUS0 0x84
/* CMU Registers */
#define CMU_CONFIG0 0xa0
#define CMU_CONFIG1 0xa4
#define CMU_CONFIG2 0xa8
#define CMU_CONFIG3 0xac
#define CMU_STATUS 0xb0
#define DISPLAY_CLK_MONITOR 0x3f
#define DISPLAY_CLK_LOCKED 0X15
#define EARC_BPCLK_OFF BIT(9)
#define AUDCLK_OFF BIT(7)
#define LINKQPCLK_OFF BIT(5)
#define VIDQPCLK_OFF BIT(3)
#define IPI_CLK_OFF BIT(1)
#define CMU_IPI_CLK_FREQ 0xb4
#define CMU_VIDQPCLK_FREQ 0xb8
#define CMU_LINKQPCLK_FREQ 0xbc
#define CMU_AUDQPCLK_FREQ 0xc0
#define CMU_EARC_BPCLK_FREQ 0xc4
/* I2CM Registers */
#define I2CM_SM_SCL_CONFIG0 0xe0
#define I2CM_FM_SCL_CONFIG0 0xe4
#define I2CM_CONFIG0 0xe8
#define I2CM_CONTROL0 0xec
#define I2CM_STATUS0 0xf0
#define I2CM_INTERFACE_CONTROL0 0xf4
#define I2CM_ADDR 0xff000
#define I2CM_SLVADDR 0xfe0
#define I2CM_WR_MASK 0x1e
#define I2CM_EXT_READ BIT(4)
#define I2CM_SHORT_READ BIT(3)
#define I2CM_FM_READ BIT(2)
#define I2CM_FM_WRITE BIT(1)
#define I2CM_FM_EN BIT(0)
#define I2CM_INTERFACE_CONTROL1 0xf8
#define I2CM_SEG_PTR 0x7f80
#define I2CM_SEG_ADDR 0x7f
#define I2CM_INTERFACE_WRDATA_0_3 0xfc
#define I2CM_INTERFACE_WRDATA_4_7 0x100
#define I2CM_INTERFACE_WRDATA_8_11 0x104
#define I2CM_INTERFACE_WRDATA_12_15 0x108
#define I2CM_INTERFACE_RDDATA_0_3 0x10c
#define I2CM_INTERFACE_RDDATA_4_7 0x110
#define I2CM_INTERFACE_RDDATA_8_11 0x114
#define I2CM_INTERFACE_RDDATA_12_15 0x118
/* SCDC Registers */
#define SCDC_CONFIG0 0x140
#define SCDC_I2C_FM_EN BIT(12)
#define SCDC_UPD_FLAGS_AUTO_CLR BIT(6)
#define SCDC_UPD_FLAGS_POLL_EN BIT(4)
#define SCDC_CONTROL0 0x148
#define SCDC_STATUS0 0x150
#define STATUS_UPDATE BIT(0)
#define FRL_START BIT(4)
#define FLT_UPDATE BIT(5)
/* FLT Registers */
#define FLT_CONFIG0 0x160
#define FLT_CONFIG1 0x164
#define FLT_CONFIG2 0x168
#define FLT_CONTROL0 0x170
/* Main Unit 2 Registers */
#define MAINUNIT_STATUS0 0x180
/* Video Interface Registers */
#define VIDEO_INTERFACE_CONFIG0 0x800
#define VIDEO_INTERFACE_CONFIG1 0x804
#define VIDEO_INTERFACE_CONFIG2 0x808
#define VIDEO_INTERFACE_CONTROL0 0x80c
#define VIDEO_INTERFACE_STATUS0 0x814
/* Video Packing Registers */
#define VIDEO_PACKING_CONFIG0 0x81c
/* Audio Interface Registers */
#define AUDIO_INTERFACE_CONFIG0 0x820
#define AUD_IF_SEL_MSK 0x3
#define AUD_IF_SPDIF 0x2
#define AUD_IF_I2S 0x1
#define AUD_IF_PAI 0x0
#define AUD_FIFO_INIT_ON_OVF_MSK BIT(2)
#define AUD_FIFO_INIT_ON_OVF_EN BIT(2)
#define I2S_LINES_EN_MSK GENMASK(7, 4)
#define I2S_LINES_EN(x) BIT((x) + 4)
#define I2S_BPCUV_RCV_MSK BIT(12)
#define I2S_BPCUV_RCV_EN BIT(12)
#define I2S_BPCUV_RCV_DIS 0
#define SPDIF_LINES_EN GENMASK(19, 16)
#define AUD_FORMAT_MSK GENMASK(26, 24)
#define AUD_3DOBA (0x7 << 24)
#define AUD_3DASP (0x6 << 24)
#define AUD_MSOBA (0x5 << 24)
#define AUD_MSASP (0x4 << 24)
#define AUD_HBR (0x3 << 24)
#define AUD_DST (0x2 << 24)
#define AUD_OBA (0x1 << 24)
#define AUD_ASP (0x0 << 24)
#define AUDIO_INTERFACE_CONFIG1 0x824
#define AUDIO_INTERFACE_CONTROL0 0x82c
#define AUDIO_FIFO_CLR_P BIT(0)
#define AUDIO_INTERFACE_STATUS0 0x834
/* Frame Composer Registers */
#define FRAME_COMPOSER_CONFIG0 0x840
#define FRAME_COMPOSER_CONFIG1 0x844
#define FRAME_COMPOSER_CONFIG2 0x848
#define FRAME_COMPOSER_CONFIG3 0x84c
#define FRAME_COMPOSER_CONFIG4 0x850
#define FRAME_COMPOSER_CONFIG5 0x854
#define FRAME_COMPOSER_CONFIG6 0x858
#define FRAME_COMPOSER_CONFIG7 0x85c
#define FRAME_COMPOSER_CONFIG8 0x860
#define FRAME_COMPOSER_CONFIG9 0x864
#define FRAME_COMPOSER_CONTROL0 0x86c
/* Video Monitor Registers */
#define VIDEO_MONITOR_CONFIG0 0x880
#define VIDEO_MONITOR_STATUS0 0x884
#define VIDEO_MONITOR_STATUS1 0x888
#define VIDEO_MONITOR_STATUS2 0x88c
#define VIDEO_MONITOR_STATUS3 0x890
#define VIDEO_MONITOR_STATUS4 0x894
#define VIDEO_MONITOR_STATUS5 0x898
#define VIDEO_MONITOR_STATUS6 0x89c
/* HDCP2 Logic Registers */
#define HDCP2LOGIC_CONFIG0 0x8e0
#define HDCP2_BYPASS BIT(0)
#define HDCP2LOGIC_ESM_GPIO_IN 0x8e4
#define HDCP2LOGIC_ESM_GPIO_OUT 0x8e8
/* HDCP14 Registers */
#define HDCP14_CONFIG0 0x900
#define HDCP14_CONFIG1 0x904
#define HDCP14_CONFIG2 0x908
#define HDCP14_CONFIG3 0x90c
#define HDCP14_KEY_SEED 0x914
#define HDCP14_KEY_H 0x918
#define HDCP14_KEY_L 0x91c
#define HDCP14_KEY_STATUS 0x920
#define HDCP14_AKSV_H 0x924
#define HDCP14_AKSV_L 0x928
#define HDCP14_AN_H 0x92c
#define HDCP14_AN_L 0x930
#define HDCP14_STATUS0 0x934
#define HDCP14_STATUS1 0x938
/* Scrambler Registers */
#define SCRAMB_CONFIG0 0x960
/* Video Configuration Registers */
#define LINK_CONFIG0 0x968
#define OPMODE_FRL_4LANES BIT(8)
#define OPMODE_DVI BIT(4)
#define OPMODE_FRL BIT(0)
/* TMDS FIFO Registers */
#define TMDS_FIFO_CONFIG0 0x970
#define TMDS_FIFO_CONTROL0 0x974
/* FRL RSFEC Registers */
#define FRL_RSFEC_CONFIG0 0xa20
#define FRL_RSFEC_STATUS0 0xa30
/* FRL Packetizer Registers */
#define FRL_PKTZ_CONFIG0 0xa40
#define FRL_PKTZ_CONTROL0 0xa44
#define FRL_PKTZ_CONTROL1 0xa50
#define FRL_PKTZ_STATUS1 0xa54
/* Packet Scheduler Registers */
#define PKTSCHED_CONFIG0 0xa80
#define PKTSCHED_PRQUEUE0_CONFIG0 0xa84
#define PKTSCHED_PRQUEUE1_CONFIG0 0xa88
#define PKTSCHED_PRQUEUE2_CONFIG0 0xa8c
#define PKTSCHED_PRQUEUE2_CONFIG1 0xa90
#define PKTSCHED_PRQUEUE2_CONFIG2 0xa94
#define PKTSCHED_PKT_CONFIG0 0xa98
#define PKTSCHED_PKT_CONFIG1 0xa9c
#define PKTSCHED_DRMI_FIELDRATE BIT(13)
#define PKTSCHED_AVI_FIELDRATE BIT(12)
#define PKTSCHED_PKT_CONFIG2 0xaa0
#define PKTSCHED_PKT_CONFIG3 0xaa4
#define PKTSCHED_PKT_EN 0xaa8
#define PKTSCHED_DRMI_TX_EN BIT(17)
#define PKTSCHED_AUDI_TX_EN BIT(15)
#define PKTSCHED_AVI_TX_EN BIT(13)
#define PKTSCHED_EMP_CVTEM_TX_EN BIT(10)
#define PKTSCHED_AMD_TX_EN BIT(8)
#define PKTSCHED_GCP_TX_EN BIT(3)
#define PKTSCHED_AUDS_TX_EN BIT(2)
#define PKTSCHED_ACR_TX_EN BIT(1)
#define PKTSCHED_NULL_TX_EN BIT(0)
#define PKTSCHED_PKT_CONTROL0 0xaac
#define PKTSCHED_PKT_SEND 0xab0
#define PKTSCHED_PKT_STATUS0 0xab4
#define PKTSCHED_PKT_STATUS1 0xab8
#define PKT_NULL_CONTENTS0 0xb00
#define PKT_NULL_CONTENTS1 0xb04
#define PKT_NULL_CONTENTS2 0xb08
#define PKT_NULL_CONTENTS3 0xb0c
#define PKT_NULL_CONTENTS4 0xb10
#define PKT_NULL_CONTENTS5 0xb14
#define PKT_NULL_CONTENTS6 0xb18
#define PKT_NULL_CONTENTS7 0xb1c
#define PKT_ACP_CONTENTS0 0xb20
#define PKT_ACP_CONTENTS1 0xb24
#define PKT_ACP_CONTENTS2 0xb28
#define PKT_ACP_CONTENTS3 0xb2c
#define PKT_ACP_CONTENTS4 0xb30
#define PKT_ACP_CONTENTS5 0xb34
#define PKT_ACP_CONTENTS6 0xb38
#define PKT_ACP_CONTENTS7 0xb3c
#define PKT_ISRC1_CONTENTS0 0xb40
#define PKT_ISRC1_CONTENTS1 0xb44
#define PKT_ISRC1_CONTENTS2 0xb48
#define PKT_ISRC1_CONTENTS3 0xb4c
#define PKT_ISRC1_CONTENTS4 0xb50
#define PKT_ISRC1_CONTENTS5 0xb54
#define PKT_ISRC1_CONTENTS6 0xb58
#define PKT_ISRC1_CONTENTS7 0xb5c
#define PKT_ISRC2_CONTENTS0 0xb60
#define PKT_ISRC2_CONTENTS1 0xb64
#define PKT_ISRC2_CONTENTS2 0xb68
#define PKT_ISRC2_CONTENTS3 0xb6c
#define PKT_ISRC2_CONTENTS4 0xb70
#define PKT_ISRC2_CONTENTS5 0xb74
#define PKT_ISRC2_CONTENTS6 0xb78
#define PKT_ISRC2_CONTENTS7 0xb7c
#define PKT_GMD_CONTENTS0 0xb80
#define PKT_GMD_CONTENTS1 0xb84
#define PKT_GMD_CONTENTS2 0xb88
#define PKT_GMD_CONTENTS3 0xb8c
#define PKT_GMD_CONTENTS4 0xb90
#define PKT_GMD_CONTENTS5 0xb94
#define PKT_GMD_CONTENTS6 0xb98
#define PKT_GMD_CONTENTS7 0xb9c
#define PKT_AMD_CONTENTS0 0xba0
#define PKT_AMD_CONTENTS1 0xba4
#define PKT_AMD_CONTENTS2 0xba8
#define PKT_AMD_CONTENTS3 0xbac
#define PKT_AMD_CONTENTS4 0xbb0
#define PKT_AMD_CONTENTS5 0xbb4
#define PKT_AMD_CONTENTS6 0xbb8
#define PKT_AMD_CONTENTS7 0xbbc
#define PKT_VSI_CONTENTS0 0xbc0
#define PKT_VSI_CONTENTS1 0xbc4
#define PKT_VSI_CONTENTS2 0xbc8
#define PKT_VSI_CONTENTS3 0xbcc
#define PKT_VSI_CONTENTS4 0xbd0
#define PKT_VSI_CONTENTS5 0xbd4
#define PKT_VSI_CONTENTS6 0xbd8
#define PKT_VSI_CONTENTS7 0xbdc
#define PKT_AVI_CONTENTS0 0xbe0
#define HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT BIT(4)
#define HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR 0x04
#define HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR 0x08
#define HDMI_FC_AVICONF2_IT_CONTENT_VALID 0x80
#define PKT_AVI_CONTENTS1 0xbe4
#define PKT_AVI_CONTENTS2 0xbe8
#define PKT_AVI_CONTENTS3 0xbec
#define PKT_AVI_CONTENTS4 0xbf0
#define PKT_AVI_CONTENTS5 0xbf4
#define PKT_AVI_CONTENTS6 0xbf8
#define PKT_AVI_CONTENTS7 0xbfc
#define PKT_SPDI_CONTENTS0 0xc00
#define PKT_SPDI_CONTENTS1 0xc04
#define PKT_SPDI_CONTENTS2 0xc08
#define PKT_SPDI_CONTENTS3 0xc0c
#define PKT_SPDI_CONTENTS4 0xc10
#define PKT_SPDI_CONTENTS5 0xc14
#define PKT_SPDI_CONTENTS6 0xc18
#define PKT_SPDI_CONTENTS7 0xc1c
#define PKT_AUDI_CONTENTS0 0xc20
#define PKT_AUDI_CONTENTS1 0xc24
#define PKT_AUDI_CONTENTS2 0xc28
#define PKT_AUDI_CONTENTS3 0xc2c
#define PKT_AUDI_CONTENTS4 0xc30
#define PKT_AUDI_CONTENTS5 0xc34
#define PKT_AUDI_CONTENTS6 0xc38
#define PKT_AUDI_CONTENTS7 0xc3c
#define PKT_NVI_CONTENTS0 0xc40
#define PKT_NVI_CONTENTS1 0xc44
#define PKT_NVI_CONTENTS2 0xc48
#define PKT_NVI_CONTENTS3 0xc4c
#define PKT_NVI_CONTENTS4 0xc50
#define PKT_NVI_CONTENTS5 0xc54
#define PKT_NVI_CONTENTS6 0xc58
#define PKT_NVI_CONTENTS7 0xc5c
#define PKT_DRMI_CONTENTS0 0xc60
#define PKT_DRMI_CONTENTS1 0xc64
#define PKT_DRMI_CONTENTS2 0xc68
#define PKT_DRMI_CONTENTS3 0xc6c
#define PKT_DRMI_CONTENTS4 0xc70
#define PKT_DRMI_CONTENTS5 0xc74
#define PKT_DRMI_CONTENTS6 0xc78
#define PKT_DRMI_CONTENTS7 0xc7c
#define PKT_GHDMI1_CONTENTS0 0xc80
#define PKT_GHDMI1_CONTENTS1 0xc84
#define PKT_GHDMI1_CONTENTS2 0xc88
#define PKT_GHDMI1_CONTENTS3 0xc8c
#define PKT_GHDMI1_CONTENTS4 0xc90
#define PKT_GHDMI1_CONTENTS5 0xc94
#define PKT_GHDMI1_CONTENTS6 0xc98
#define PKT_GHDMI1_CONTENTS7 0xc9c
#define PKT_GHDMI2_CONTENTS0 0xca0
#define PKT_GHDMI2_CONTENTS1 0xca4
#define PKT_GHDMI2_CONTENTS2 0xca8
#define PKT_GHDMI2_CONTENTS3 0xcac
#define PKT_GHDMI2_CONTENTS4 0xcb0
#define PKT_GHDMI2_CONTENTS5 0xcb4
#define PKT_GHDMI2_CONTENTS6 0xcb8
#define PKT_GHDMI2_CONTENTS7 0xcbc
/* EMP Packetizer Registers */
#define PKT_EMP_CONFIG0 0xce0
#define PKT_EMP_CONTROL0 0xcec
#define PKT_EMP_CONTROL1 0xcf0
#define PKT_EMP_CONTROL2 0xcf4
#define PKT_EMP_VTEM_CONTENTS0 0xd00
#define PKT_EMP_VTEM_CONTENTS1 0xd04
#define PKT_EMP_VTEM_CONTENTS2 0xd08
#define PKT_EMP_VTEM_CONTENTS3 0xd0c
#define PKT_EMP_VTEM_CONTENTS4 0xd10
#define PKT_EMP_VTEM_CONTENTS5 0xd14
#define PKT_EMP_VTEM_CONTENTS6 0xd18
#define PKT_EMP_VTEM_CONTENTS7 0xd1c
#define PKT0_EMP_CVTEM_CONTENTS0 0xd20
#define PKT0_EMP_CVTEM_CONTENTS1 0xd24
#define PKT0_EMP_CVTEM_CONTENTS2 0xd28
#define PKT0_EMP_CVTEM_CONTENTS3 0xd2c
#define PKT0_EMP_CVTEM_CONTENTS4 0xd30
#define PKT0_EMP_CVTEM_CONTENTS5 0xd34
#define PKT0_EMP_CVTEM_CONTENTS6 0xd38
#define PKT0_EMP_CVTEM_CONTENTS7 0xd3c
#define PKT1_EMP_CVTEM_CONTENTS0 0xd40
#define PKT1_EMP_CVTEM_CONTENTS1 0xd44
#define PKT1_EMP_CVTEM_CONTENTS2 0xd48
#define PKT1_EMP_CVTEM_CONTENTS3 0xd4c
#define PKT1_EMP_CVTEM_CONTENTS4 0xd50
#define PKT1_EMP_CVTEM_CONTENTS5 0xd54
#define PKT1_EMP_CVTEM_CONTENTS6 0xd58
#define PKT1_EMP_CVTEM_CONTENTS7 0xd5c
#define PKT2_EMP_CVTEM_CONTENTS0 0xd60
#define PKT2_EMP_CVTEM_CONTENTS1 0xd64
#define PKT2_EMP_CVTEM_CONTENTS2 0xd68
#define PKT2_EMP_CVTEM_CONTENTS3 0xd6c
#define PKT2_EMP_CVTEM_CONTENTS4 0xd70
#define PKT2_EMP_CVTEM_CONTENTS5 0xd74
#define PKT2_EMP_CVTEM_CONTENTS6 0xd78
#define PKT2_EMP_CVTEM_CONTENTS7 0xd7c
#define PKT3_EMP_CVTEM_CONTENTS0 0xd80
#define PKT3_EMP_CVTEM_CONTENTS1 0xd84
#define PKT3_EMP_CVTEM_CONTENTS2 0xd88
#define PKT3_EMP_CVTEM_CONTENTS3 0xd8c
#define PKT3_EMP_CVTEM_CONTENTS4 0xd90
#define PKT3_EMP_CVTEM_CONTENTS5 0xd94
#define PKT3_EMP_CVTEM_CONTENTS6 0xd98
#define PKT3_EMP_CVTEM_CONTENTS7 0xd9c
#define PKT4_EMP_CVTEM_CONTENTS0 0xda0
#define PKT4_EMP_CVTEM_CONTENTS1 0xda4
#define PKT4_EMP_CVTEM_CONTENTS2 0xda8
#define PKT4_EMP_CVTEM_CONTENTS3 0xdac
#define PKT4_EMP_CVTEM_CONTENTS4 0xdb0
#define PKT4_EMP_CVTEM_CONTENTS5 0xdb4
#define PKT4_EMP_CVTEM_CONTENTS6 0xdb8
#define PKT4_EMP_CVTEM_CONTENTS7 0xdbc
#define PKT5_EMP_CVTEM_CONTENTS0 0xdc0
#define PKT5_EMP_CVTEM_CONTENTS1 0xdc4
#define PKT5_EMP_CVTEM_CONTENTS2 0xdc8
#define PKT5_EMP_CVTEM_CONTENTS3 0xdcc
#define PKT5_EMP_CVTEM_CONTENTS4 0xdd0
#define PKT5_EMP_CVTEM_CONTENTS5 0xdd4
#define PKT5_EMP_CVTEM_CONTENTS6 0xdd8
#define PKT5_EMP_CVTEM_CONTENTS7 0xddc
/* Audio Packetizer Registers */
#define AUDPKT_CONTROL0 0xe20
#define AUDPKT_PBIT_FORCE_EN_MASK BIT(12)
#define AUDPKT_PBIT_FORCE_EN BIT(12)
#define AUDPKT_CHSTATUS_OVR_EN_MASK BIT(0)
#define AUDPKT_CHSTATUS_OVR_EN BIT(0)
#define AUDPKT_CONTROL1 0xe24
#define AUDPKT_ACR_CONTROL0 0xe40
#define AUDPKT_ACR_N_VALUE 0xfffff
#define AUDPKT_ACR_CONTROL1 0xe44
#define AUDPKT_ACR_CTS_OVR_VAL_MSK GENMASK(23, 4)
#define AUDPKT_ACR_CTS_OVR_VAL(x) ((x) << 4)
#define AUDPKT_ACR_CTS_OVR_EN_MSK BIT(1)
#define AUDPKT_ACR_CTS_OVR_EN BIT(1)
#define AUDPKT_ACR_STATUS0 0xe4c
#define AUDPKT_CHSTATUS_OVR0 0xe60
#define AUDPKT_CHSTATUS_OVR1 0xe64
/* IEC60958 Byte 3: Sampleing frenuency Bits 24 to 27 */
#define AUDPKT_CHSTATUS_SR_MASK GENMASK(3, 0)
#define AUDPKT_CHSTATUS_SR_22050 0x4
#define AUDPKT_CHSTATUS_SR_24000 0x6
#define AUDPKT_CHSTATUS_SR_32000 0x3
#define AUDPKT_CHSTATUS_SR_44100 0x0
#define AUDPKT_CHSTATUS_SR_48000 0x2
#define AUDPKT_CHSTATUS_SR_88200 0x8
#define AUDPKT_CHSTATUS_SR_96000 0xa
#define AUDPKT_CHSTATUS_SR_176400 0xc
#define AUDPKT_CHSTATUS_SR_192000 0xe
#define AUDPKT_CHSTATUS_SR_768000 0x9
#define AUDPKT_CHSTATUS_SR_NOT_INDICATED 0x1
/* IEC60958 Byte 4: Original Sampleing frenuency Bits 36 to 39 */
#define AUDPKT_CHSTATUS_0SR_MASK GENMASK(15, 12)
#define AUDPKT_CHSTATUS_OSR_8000 0x6
#define AUDPKT_CHSTATUS_OSR_11025 0xa
#define AUDPKT_CHSTATUS_OSR_12000 0x2
#define AUDPKT_CHSTATUS_OSR_16000 0x8
#define AUDPKT_CHSTATUS_OSR_22050 0xb
#define AUDPKT_CHSTATUS_OSR_24000 0x9
#define AUDPKT_CHSTATUS_OSR_32000 0xc
#define AUDPKT_CHSTATUS_OSR_44100 0xf
#define AUDPKT_CHSTATUS_OSR_48000 0xd
#define AUDPKT_CHSTATUS_OSR_88200 0x7
#define AUDPKT_CHSTATUS_OSR_96000 0x5
#define AUDPKT_CHSTATUS_OSR_176400 0x3
#define AUDPKT_CHSTATUS_OSR_192000 0x1
#define AUDPKT_CHSTATUS_OSR_NOT_INDICATED 0x0
#define AUDPKT_CHSTATUS_OVR2 0xe68
#define AUDPKT_CHSTATUS_OVR3 0xe6c
#define AUDPKT_CHSTATUS_OVR4 0xe70
#define AUDPKT_CHSTATUS_OVR5 0xe74
#define AUDPKT_CHSTATUS_OVR6 0xe78
#define AUDPKT_CHSTATUS_OVR7 0xe7c
#define AUDPKT_CHSTATUS_OVR8 0xe80
#define AUDPKT_CHSTATUS_OVR9 0xe84
#define AUDPKT_CHSTATUS_OVR10 0xe88
#define AUDPKT_CHSTATUS_OVR11 0xe8c
#define AUDPKT_CHSTATUS_OVR12 0xe90
#define AUDPKT_CHSTATUS_OVR13 0xe94
#define AUDPKT_CHSTATUS_OVR14 0xe98
#define AUDPKT_USRDATA_OVR_MSG_GENERIC0 0xea0
#define AUDPKT_USRDATA_OVR_MSG_GENERIC1 0xea4
#define AUDPKT_USRDATA_OVR_MSG_GENERIC2 0xea8
#define AUDPKT_USRDATA_OVR_MSG_GENERIC3 0xeac
#define AUDPKT_USRDATA_OVR_MSG_GENERIC4 0xeb0
#define AUDPKT_USRDATA_OVR_MSG_GENERIC5 0xeb4
#define AUDPKT_USRDATA_OVR_MSG_GENERIC6 0xeb8
#define AUDPKT_USRDATA_OVR_MSG_GENERIC7 0xebc
#define AUDPKT_USRDATA_OVR_MSG_GENERIC8 0xec0
#define AUDPKT_USRDATA_OVR_MSG_GENERIC9 0xec4
#define AUDPKT_USRDATA_OVR_MSG_GENERIC10 0xec8
#define AUDPKT_USRDATA_OVR_MSG_GENERIC11 0xecc
#define AUDPKT_USRDATA_OVR_MSG_GENERIC12 0xed0
#define AUDPKT_USRDATA_OVR_MSG_GENERIC13 0xed4
#define AUDPKT_USRDATA_OVR_MSG_GENERIC14 0xed8
#define AUDPKT_USRDATA_OVR_MSG_GENERIC15 0xedc
#define AUDPKT_USRDATA_OVR_MSG_GENERIC16 0xee0
#define AUDPKT_USRDATA_OVR_MSG_GENERIC17 0xee4
#define AUDPKT_USRDATA_OVR_MSG_GENERIC18 0xee8
#define AUDPKT_USRDATA_OVR_MSG_GENERIC19 0xeec
#define AUDPKT_USRDATA_OVR_MSG_GENERIC20 0xef0
#define AUDPKT_USRDATA_OVR_MSG_GENERIC21 0xef4
#define AUDPKT_USRDATA_OVR_MSG_GENERIC22 0xef8
#define AUDPKT_USRDATA_OVR_MSG_GENERIC23 0xefc
#define AUDPKT_USRDATA_OVR_MSG_GENERIC24 0xf00
#define AUDPKT_USRDATA_OVR_MSG_GENERIC25 0xf04
#define AUDPKT_USRDATA_OVR_MSG_GENERIC26 0xf08
#define AUDPKT_USRDATA_OVR_MSG_GENERIC27 0xf0c
#define AUDPKT_USRDATA_OVR_MSG_GENERIC28 0xf10
#define AUDPKT_USRDATA_OVR_MSG_GENERIC29 0xf14
#define AUDPKT_USRDATA_OVR_MSG_GENERIC30 0xf18
#define AUDPKT_USRDATA_OVR_MSG_GENERIC31 0xf1c
#define AUDPKT_USRDATA_OVR_MSG_GENERIC32 0xf20
#define AUDPKT_VBIT_OVR0 0xf24
/* CEC Registers */
#define CEC_TX_CONTROL 0x1000
#define CEC_STATUS 0x1004
#define CEC_CONFIG 0x1008
#define CEC_ADDR 0x100c
#define CEC_TX_COUNT 0x1020
#define CEC_TX_DATA3_0 0x1024
#define CEC_TX_DATA7_4 0x1028
#define CEC_TX_DATA11_8 0x102c
#define CEC_TX_DATA15_12 0x1030
#define CEC_RX_COUNT_STATUS 0x1040
#define CEC_RX_DATA3_0 0x1044
#define CEC_RX_DATA7_4 0x1048
#define CEC_RX_DATA11_8 0x104c
#define CEC_RX_DATA15_12 0x1050
#define CEC_LOCK_CONTROL 0x1054
#define CEC_RXQUAL_BITTIME_CONFIG 0x1060
#define CEC_RX_BITTIME_CONFIG 0x1064
#define CEC_TX_BITTIME_CONFIG 0x1068
/* eARC RX CMDC Registers */
#define EARCRX_CMDC_CONFIG0 0x1800
#define EARCRX_XACTREAD_STOP_CFG BIT(26)
#define EARCRX_XACTREAD_RETRY_CFG BIT(25)
#define EARCRX_CMDC_DSCVR_EARCVALID0_TO_DISC1 BIT(24)
#define EARCRX_CMDC_XACT_RESTART_EN BIT(18)
#define EARCRX_CMDC_CONFIG1 0x1804
#define EARCRX_CMDC_CONTROL 0x1808
#define EARCRX_CMDC_HEARTBEAT_LOSS_EN BIT(4)
#define EARCRX_CMDC_DISCOVERY_EN BIT(3)
#define EARCRX_CONNECTOR_HPD BIT(1)
#define EARCRX_CMDC_WHITELIST0_CONFIG 0x180c
#define EARCRX_CMDC_WHITELIST1_CONFIG 0x1810
#define EARCRX_CMDC_WHITELIST2_CONFIG 0x1814
#define EARCRX_CMDC_WHITELIST3_CONFIG 0x1818
#define EARCRX_CMDC_STATUS 0x181c
#define EARCRX_CMDC_XACT_INFO 0x1820
#define EARCRX_CMDC_XACT_ACTION 0x1824
#define EARCRX_CMDC_HEARTBEAT_RXSTAT_SE 0x1828
#define EARCRX_CMDC_HEARTBEAT_STATUS 0x182c
#define EARCRX_CMDC_XACT_WR0 0x1840
#define EARCRX_CMDC_XACT_WR1 0x1844
#define EARCRX_CMDC_XACT_WR2 0x1848
#define EARCRX_CMDC_XACT_WR3 0x184c
#define EARCRX_CMDC_XACT_WR4 0x1850
#define EARCRX_CMDC_XACT_WR5 0x1854
#define EARCRX_CMDC_XACT_WR6 0x1858
#define EARCRX_CMDC_XACT_WR7 0x185c
#define EARCRX_CMDC_XACT_WR8 0x1860
#define EARCRX_CMDC_XACT_WR9 0x1864
#define EARCRX_CMDC_XACT_WR10 0x1868
#define EARCRX_CMDC_XACT_WR11 0x186c
#define EARCRX_CMDC_XACT_WR12 0x1870
#define EARCRX_CMDC_XACT_WR13 0x1874
#define EARCRX_CMDC_XACT_WR14 0x1878
#define EARCRX_CMDC_XACT_WR15 0x187c
#define EARCRX_CMDC_XACT_WR16 0x1880
#define EARCRX_CMDC_XACT_WR17 0x1884
#define EARCRX_CMDC_XACT_WR18 0x1888
#define EARCRX_CMDC_XACT_WR19 0x188c
#define EARCRX_CMDC_XACT_WR20 0x1890
#define EARCRX_CMDC_XACT_WR21 0x1894
#define EARCRX_CMDC_XACT_WR22 0x1898
#define EARCRX_CMDC_XACT_WR23 0x189c
#define EARCRX_CMDC_XACT_WR24 0x18a0
#define EARCRX_CMDC_XACT_WR25 0x18a4
#define EARCRX_CMDC_XACT_WR26 0x18a8
#define EARCRX_CMDC_XACT_WR27 0x18ac
#define EARCRX_CMDC_XACT_WR28 0x18b0
#define EARCRX_CMDC_XACT_WR29 0x18b4
#define EARCRX_CMDC_XACT_WR30 0x18b8
#define EARCRX_CMDC_XACT_WR31 0x18bc
#define EARCRX_CMDC_XACT_WR32 0x18c0
#define EARCRX_CMDC_XACT_WR33 0x18c4
#define EARCRX_CMDC_XACT_WR34 0x18c8
#define EARCRX_CMDC_XACT_WR35 0x18cc
#define EARCRX_CMDC_XACT_WR36 0x18d0
#define EARCRX_CMDC_XACT_WR37 0x18d4
#define EARCRX_CMDC_XACT_WR38 0x18d8
#define EARCRX_CMDC_XACT_WR39 0x18dc
#define EARCRX_CMDC_XACT_WR40 0x18e0
#define EARCRX_CMDC_XACT_WR41 0x18e4
#define EARCRX_CMDC_XACT_WR42 0x18e8
#define EARCRX_CMDC_XACT_WR43 0x18ec
#define EARCRX_CMDC_XACT_WR44 0x18f0
#define EARCRX_CMDC_XACT_WR45 0x18f4
#define EARCRX_CMDC_XACT_WR46 0x18f8
#define EARCRX_CMDC_XACT_WR47 0x18fc
#define EARCRX_CMDC_XACT_WR48 0x1900
#define EARCRX_CMDC_XACT_WR49 0x1904
#define EARCRX_CMDC_XACT_WR50 0x1908
#define EARCRX_CMDC_XACT_WR51 0x190c
#define EARCRX_CMDC_XACT_WR52 0x1910
#define EARCRX_CMDC_XACT_WR53 0x1914
#define EARCRX_CMDC_XACT_WR54 0x1918
#define EARCRX_CMDC_XACT_WR55 0x191c
#define EARCRX_CMDC_XACT_WR56 0x1920
#define EARCRX_CMDC_XACT_WR57 0x1924
#define EARCRX_CMDC_XACT_WR58 0x1928
#define EARCRX_CMDC_XACT_WR59 0x192c
#define EARCRX_CMDC_XACT_WR60 0x1930
#define EARCRX_CMDC_XACT_WR61 0x1934
#define EARCRX_CMDC_XACT_WR62 0x1938
#define EARCRX_CMDC_XACT_WR63 0x193c
#define EARCRX_CMDC_XACT_WR64 0x1940
#define EARCRX_CMDC_XACT_RD0 0x1960
#define EARCRX_CMDC_XACT_RD1 0x1964
#define EARCRX_CMDC_XACT_RD2 0x1968
#define EARCRX_CMDC_XACT_RD3 0x196c
#define EARCRX_CMDC_XACT_RD4 0x1970
#define EARCRX_CMDC_XACT_RD5 0x1974
#define EARCRX_CMDC_XACT_RD6 0x1978
#define EARCRX_CMDC_XACT_RD7 0x197c
#define EARCRX_CMDC_XACT_RD8 0x1980
#define EARCRX_CMDC_XACT_RD9 0x1984
#define EARCRX_CMDC_XACT_RD10 0x1988
#define EARCRX_CMDC_XACT_RD11 0x198c
#define EARCRX_CMDC_XACT_RD12 0x1990
#define EARCRX_CMDC_XACT_RD13 0x1994
#define EARCRX_CMDC_XACT_RD14 0x1998
#define EARCRX_CMDC_XACT_RD15 0x199c
#define EARCRX_CMDC_XACT_RD16 0x19a0
#define EARCRX_CMDC_XACT_RD17 0x19a4
#define EARCRX_CMDC_XACT_RD18 0x19a8
#define EARCRX_CMDC_XACT_RD19 0x19ac
#define EARCRX_CMDC_XACT_RD20 0x19b0
#define EARCRX_CMDC_XACT_RD21 0x19b4
#define EARCRX_CMDC_XACT_RD22 0x19b8
#define EARCRX_CMDC_XACT_RD23 0x19bc
#define EARCRX_CMDC_XACT_RD24 0x19c0
#define EARCRX_CMDC_XACT_RD25 0x19c4
#define EARCRX_CMDC_XACT_RD26 0x19c8
#define EARCRX_CMDC_XACT_RD27 0x19cc
#define EARCRX_CMDC_XACT_RD28 0x19d0
#define EARCRX_CMDC_XACT_RD29 0x19d4
#define EARCRX_CMDC_XACT_RD30 0x19d8
#define EARCRX_CMDC_XACT_RD31 0x19dc
#define EARCRX_CMDC_XACT_RD32 0x19e0
#define EARCRX_CMDC_XACT_RD33 0x19e4
#define EARCRX_CMDC_XACT_RD34 0x19e8
#define EARCRX_CMDC_XACT_RD35 0x19ec
#define EARCRX_CMDC_XACT_RD36 0x19f0
#define EARCRX_CMDC_XACT_RD37 0x19f4
#define EARCRX_CMDC_XACT_RD38 0x19f8
#define EARCRX_CMDC_XACT_RD39 0x19fc
#define EARCRX_CMDC_XACT_RD40 0x1a00
#define EARCRX_CMDC_XACT_RD41 0x1a04
#define EARCRX_CMDC_XACT_RD42 0x1a08
#define EARCRX_CMDC_XACT_RD43 0x1a0c
#define EARCRX_CMDC_XACT_RD44 0x1a10
#define EARCRX_CMDC_XACT_RD45 0x1a14
#define EARCRX_CMDC_XACT_RD46 0x1a18
#define EARCRX_CMDC_XACT_RD47 0x1a1c
#define EARCRX_CMDC_XACT_RD48 0x1a20
#define EARCRX_CMDC_XACT_RD49 0x1a24
#define EARCRX_CMDC_XACT_RD50 0x1a28
#define EARCRX_CMDC_XACT_RD51 0x1a2c
#define EARCRX_CMDC_XACT_RD52 0x1a30
#define EARCRX_CMDC_XACT_RD53 0x1a34
#define EARCRX_CMDC_XACT_RD54 0x1a38
#define EARCRX_CMDC_XACT_RD55 0x1a3c
#define EARCRX_CMDC_XACT_RD56 0x1a40
#define EARCRX_CMDC_XACT_RD57 0x1a44
#define EARCRX_CMDC_XACT_RD58 0x1a48
#define EARCRX_CMDC_XACT_RD59 0x1a4c
#define EARCRX_CMDC_XACT_RD60 0x1a50
#define EARCRX_CMDC_XACT_RD61 0x1a54
#define EARCRX_CMDC_XACT_RD62 0x1a58
#define EARCRX_CMDC_XACT_RD63 0x1a5c
#define EARCRX_CMDC_XACT_RD64 0x1a60
#define EARCRX_CMDC_SYNC_CONFIG 0x1b00
/* eARC RX DMAC Registers */
#define EARCRX_DMAC_PHY_CONTROL 0x1c00
#define EARCRX_DMAC_CONFIG 0x1c08
#define EARCRX_DMAC_CONTROL0 0x1c0c
#define EARCRX_DMAC_AUDIO_EN BIT(1)
#define EARCRX_DMAC_EN BIT(0)
#define EARCRX_DMAC_CONTROL1 0x1c10
#define EARCRX_DMAC_STATUS 0x1c14
#define EARCRX_DMAC_CHSTATUS0 0x1c18
#define EARCRX_DMAC_CHSTATUS1 0x1c1c
#define EARCRX_DMAC_CHSTATUS2 0x1c20
#define EARCRX_DMAC_CHSTATUS3 0x1c24
#define EARCRX_DMAC_CHSTATUS4 0x1c28
#define EARCRX_DMAC_CHSTATUS5 0x1c2c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC0 0x1c30
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC1 0x1c34
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC2 0x1c38
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC3 0x1c3c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC4 0x1c40
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC5 0x1c44
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC6 0x1c48
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC7 0x1c4c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC8 0x1c50
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC9 0x1c54
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC10 0x1c58
#define EARCRX_DMAC_USRDATA_MSG_HDMI_AC11 0x1c5c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT0 0x1c60
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT1 0x1c64
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT2 0x1c68
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT3 0x1c6c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT4 0x1c70
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT5 0x1c74
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT6 0x1c78
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT7 0x1c7c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT8 0x1c80
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT9 0x1c84
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT10 0x1c88
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT11 0x1c8c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT0 0x1c90
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT1 0x1c94
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT2 0x1c98
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT3 0x1c9c
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT4 0x1ca0
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT5 0x1ca4
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT6 0x1ca8
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT7 0x1cac
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT8 0x1cb0
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT9 0x1cb4
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT10 0x1cb8
#define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT11 0x1cbc
#define EARCRX_DMAC_USRDATA_MSG_GENERIC0 0x1cc0
#define EARCRX_DMAC_USRDATA_MSG_GENERIC1 0x1cc4
#define EARCRX_DMAC_USRDATA_MSG_GENERIC2 0x1cc8
#define EARCRX_DMAC_USRDATA_MSG_GENERIC3 0x1ccc
#define EARCRX_DMAC_USRDATA_MSG_GENERIC4 0x1cd0
#define EARCRX_DMAC_USRDATA_MSG_GENERIC5 0x1cd4
#define EARCRX_DMAC_USRDATA_MSG_GENERIC6 0x1cd8
#define EARCRX_DMAC_USRDATA_MSG_GENERIC7 0x1cdc
#define EARCRX_DMAC_USRDATA_MSG_GENERIC8 0x1ce0
#define EARCRX_DMAC_USRDATA_MSG_GENERIC9 0x1ce4
#define EARCRX_DMAC_USRDATA_MSG_GENERIC10 0x1ce8
#define EARCRX_DMAC_USRDATA_MSG_GENERIC11 0x1cec
#define EARCRX_DMAC_USRDATA_MSG_GENERIC12 0x1cf0
#define EARCRX_DMAC_USRDATA_MSG_GENERIC13 0x1cf4
#define EARCRX_DMAC_USRDATA_MSG_GENERIC14 0x1cf8
#define EARCRX_DMAC_USRDATA_MSG_GENERIC15 0x1cfc
#define EARCRX_DMAC_USRDATA_MSG_GENERIC16 0x1d00
#define EARCRX_DMAC_USRDATA_MSG_GENERIC17 0x1d04
#define EARCRX_DMAC_USRDATA_MSG_GENERIC18 0x1d08
#define EARCRX_DMAC_USRDATA_MSG_GENERIC19 0x1d0c
#define EARCRX_DMAC_USRDATA_MSG_GENERIC20 0x1d10
#define EARCRX_DMAC_USRDATA_MSG_GENERIC21 0x1d14
#define EARCRX_DMAC_USRDATA_MSG_GENERIC22 0x1d18
#define EARCRX_DMAC_USRDATA_MSG_GENERIC23 0x1d1c
#define EARCRX_DMAC_USRDATA_MSG_GENERIC24 0x1d20
#define EARCRX_DMAC_USRDATA_MSG_GENERIC25 0x1d24
#define EARCRX_DMAC_USRDATA_MSG_GENERIC26 0x1d28
#define EARCRX_DMAC_USRDATA_MSG_GENERIC27 0x1d2c
#define EARCRX_DMAC_USRDATA_MSG_GENERIC28 0x1d30
#define EARCRX_DMAC_USRDATA_MSG_GENERIC29 0x1d34
#define EARCRX_DMAC_USRDATA_MSG_GENERIC30 0x1d38
#define EARCRX_DMAC_USRDATA_MSG_GENERIC31 0x1d3c
#define EARCRX_DMAC_USRDATA_MSG_GENERIC32 0x1d40
#define EARCRX_DMAC_CHSTATUS_STREAMER0 0x1d44
#define EARCRX_DMAC_CHSTATUS_STREAMER1 0x1d48
#define EARCRX_DMAC_CHSTATUS_STREAMER2 0x1d4c
#define EARCRX_DMAC_CHSTATUS_STREAMER3 0x1d50
#define EARCRX_DMAC_CHSTATUS_STREAMER4 0x1d54
#define EARCRX_DMAC_CHSTATUS_STREAMER5 0x1d58
#define EARCRX_DMAC_CHSTATUS_STREAMER6 0x1d5c
#define EARCRX_DMAC_CHSTATUS_STREAMER7 0x1d60
#define EARCRX_DMAC_CHSTATUS_STREAMER8 0x1d64
#define EARCRX_DMAC_CHSTATUS_STREAMER9 0x1d68
#define EARCRX_DMAC_CHSTATUS_STREAMER10 0x1d6c
#define EARCRX_DMAC_CHSTATUS_STREAMER11 0x1d70
#define EARCRX_DMAC_CHSTATUS_STREAMER12 0x1d74
#define EARCRX_DMAC_CHSTATUS_STREAMER13 0x1d78
#define EARCRX_DMAC_CHSTATUS_STREAMER14 0x1d7c
#define EARCRX_DMAC_USRDATA_STREAMER0 0x1d80
/* Main Unit Interrupt Registers */
#define MAIN_INTVEC_INDEX 0x3000
#define MAINUNIT_0_INT_STATUS 0x3010
#define MAINUNIT_0_INT_MASK_N 0x3014
#define MAINUNIT_0_INT_CLEAR 0x3018
#define MAINUNIT_0_INT_FORCE 0x301c
#define MAINUNIT_1_INT_STATUS 0x3020
#define FLT_EXIT_TO_LTSL_IRQ BIT(22)
#define FLT_EXIT_TO_LTS4_IRQ BIT(21)
#define FLT_EXIT_TO_LTSP_IRQ BIT(20)
#define SCDC_NACK_RCVD_IRQ BIT(12)
#define SCDC_RR_REPLY_STOP_IRQ BIT(11)
#define SCDC_UPD_FLAGS_CLR_IRQ BIT(10)
#define SCDC_UPD_FLAGS_CHG_IRQ BIT(9)
#define SCDC_UPD_FLAGS_RD_IRQ BIT(8)
#define I2CM_NACK_RCVD_IRQ BIT(2)
#define I2CM_READ_REQUEST_IRQ BIT(1)
#define I2CM_OP_DONE_IRQ BIT(0)
#define MAINUNIT_1_INT_MASK_N 0x3024
#define I2CM_NACK_RCVD_MASK_N BIT(2)
#define I2CM_READ_REQUEST_MASK_N BIT(1)
#define I2CM_OP_DONE_MASK_N BIT(0)
#define MAINUNIT_1_INT_CLEAR 0x3028
#define I2CM_NACK_RCVD_CLEAR BIT(2)
#define I2CM_READ_REQUEST_CLEAR BIT(1)
#define I2CM_OP_DONE_CLEAR BIT(0)
#define MAINUNIT_1_INT_FORCE 0x302c
/* AVPUNIT Interrupt Registers */
#define AVP_INTVEC_INDEX 0x3800
#define AVP_0_INT_STATUS 0x3810
#define AVP_0_INT_MASK_N 0x3814
#define AVP_0_INT_CLEAR 0x3818
#define AVP_0_INT_FORCE 0x381c
#define AVP_1_INT_STATUS 0x3820
#define AVP_1_INT_MASK_N 0x3824
#define HDCP14_AUTH_CHG_MASK_N BIT(6)
#define AVP_1_INT_CLEAR 0x3828
#define AVP_1_INT_FORCE 0x382c
#define AVP_2_INT_STATUS 0x3830
#define AVP_2_INT_MASK_N 0x3834
#define AVP_2_INT_CLEAR 0x3838
#define AVP_2_INT_FORCE 0x383c
#define AVP_3_INT_STATUS 0x3840
#define AVP_3_INT_MASK_N 0x3844
#define AVP_3_INT_CLEAR 0x3848
#define AVP_3_INT_FORCE 0x384c
#define AVP_4_INT_STATUS 0x3850
#define AVP_4_INT_MASK_N 0x3854
#define AVP_4_INT_CLEAR 0x3858
#define AVP_4_INT_FORCE 0x385c
#define AVP_5_INT_STATUS 0x3860
#define AVP_5_INT_MASK_N 0x3864
#define AVP_5_INT_CLEAR 0x3868
#define AVP_5_INT_FORCE 0x386c
#define AVP_6_INT_STATUS 0x3870
#define AVP_6_INT_MASK_N 0x3874
#define AVP_6_INT_CLEAR 0x3878
#define AVP_6_INT_FORCE 0x387c
/* CEC Interrupt Registers */
#define CEC_INT_STATUS 0x4000
#define CEC_INT_MASK_N 0x4004
#define CEC_INT_CLEAR 0x4008
#define CEC_INT_FORCE 0x400c
/* eARC RX Interrupt Registers */
#define EARCRX_INTVEC_INDEX 0x4800
#define EARCRX_0_INT_STATUS 0x4810
#define EARCRX_CMDC_DISCOVERY_TIMEOUT_IRQ BIT(9)
#define EARCRX_CMDC_DISCOVERY_DONE_IRQ BIT(8)
#define EARCRX_0_INT_MASK_N 0x4814
#define EARCRX_0_INT_CLEAR 0x4818
#define EARCRX_0_INT_FORCE 0x481c
#define EARCRX_1_INT_STATUS 0x4820
#define EARCRX_1_INT_MASK_N 0x4824
#define EARCRX_1_INT_CLEAR 0x4828
#define EARCRX_1_INT_FORCE 0x482c
#endif /* __DW_HDMI_QP_H__ */

View File

@ -3503,6 +3503,9 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
if (hdmi->version >= 0x200a)
hdmi->bridge.ycbcr_420_allowed = plat_data->ycbcr_420_allowed;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;

View File

@ -1707,7 +1707,7 @@ static void tc_bridge_mode_set(struct drm_bridge *bridge,
{
struct tc_data *tc = bridge_to_tc(bridge);
drm_mode_copy(&tc->mode, mode);
drm_mode_copy(&tc->mode, adj);
}
static const struct drm_edid *tc_edid_read(struct drm_bridge *bridge,

View File

@ -443,7 +443,9 @@ static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
ret = -EINVAL;
ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
if (ep) {
ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
if (ret)
ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
of_node_put(ep);
}

View File

@ -94,7 +94,7 @@ static const struct regmap_access_table dlpc_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(dlpc_volatile_ranges),
};
static struct regmap_config dlpc_regmap_config = {
static const struct regmap_config dlpc_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = WR_DSI_PORT_EN,

View File

@ -90,7 +90,12 @@ CONFIG_QCOM_GPI_DMA=y
CONFIG_USB_ONBOARD_DEV=y
CONFIG_NVMEM_QCOM_QFPROM=y
CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
CONFIG_REGULATOR_QCOM_REFGEN=y
CONFIG_TYPEC_MUX_FSA4480=y
CONFIG_QCOM_PMIC_GLINK=y
CONFIG_UCSI_PMIC_GLINK=y
CONFIG_QRTR=y
CONFIG_QRTR_SMD=y
# db410c ethernet
CONFIG_USB_RTL8152=y

View File

@ -30,6 +30,7 @@ if [[ "$KERNEL_ARCH" = "arm64" ]]; then
DEVICE_TREES+=" arch/arm64/boot/dts/mediatek/mt8192-asurada-spherion-r0.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r5.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sc7180-trogdor-kingoftown.dtb"
DEVICE_TREES+=" arch/arm64/boot/dts/qcom/sm8350-hdk.dtb"
elif [[ "$KERNEL_ARCH" = "arm" ]]; then
GCC_ARCH="arm-linux-gnueabihf"
DEBIAN_ARCH="armhf"

View File

@ -162,6 +162,22 @@ msm:sdm845:
script:
- ./install/bare-metal/cros-servo.sh
msm:sm8350-hdk:
extends:
- .lava-igt:arm64
stage: msm
parallel: 4
variables:
BOOT_METHOD: fastboot
DEVICE_TYPE: sm8350-hdk
DRIVER_NAME: msm
DTB: ${DEVICE_TYPE}
FARM: collabora
GPU_VERSION: ${DEVICE_TYPE}
KERNEL_IMAGE_NAME: "Image.gz"
KERNEL_IMAGE_TYPE: ""
RUNNER_TAG: mesa-ci-x86-64-lava-sm8350-hdk
.rockchip-device:
variables:
DTB: ${DEVICE_TYPE}
@ -286,6 +302,15 @@ i915:tgl:
GPU_VERSION: tgl
RUNNER_TAG: mesa-ci-x86-64-lava-acer-cp514-2h-1130g7-volteer
i915:jsl:
extends:
- .i915
parallel: 4
variables:
DEVICE_TYPE: acer-cb317-1h-c3z6-dedede
GPU_VERSION: jsl
RUNNER_TAG: mesa-ci-x86-64-lava-acer-cb317-1h-c3z6-dedede
.amdgpu:
extends:
- .lava-igt:x86_64

View File

@ -0,0 +1,51 @@
core_setmaster@master-drop-set-user,Fail
i915_module_load@load,Fail
i915_module_load@reload,Fail
i915_module_load@reload-no-display,Fail
i915_module_load@resize-bar,Fail
i915_pm_rpm@gem-execbuf-stress,Timeout
i915_pm_rpm@module-reload,Fail
kms_flip@plain-flip-fb-recreate,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-linear-to-64bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-xtile-to-64bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-linear-to-32bpp-linear-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-16bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-xtile-to-32bpp-xtile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-16bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-downscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilegen12rcccs-upscaling,Fail
kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs-downscaling,Fail
kms_lease@lease-uevent,Fail
kms_pm_rpm@legacy-planes,Timeout
kms_pm_rpm@legacy-planes-dpms,Timeout
kms_pm_rpm@modeset-stress-extra-wait,Timeout
kms_pm_rpm@universal-planes,Timeout
kms_pm_rpm@universal-planes-dpms,Timeout
kms_rotation_crc@multiplane-rotation,Fail
kms_rotation_crc@multiplane-rotation-cropping-bottom,Fail
kms_rotation_crc@multiplane-rotation-cropping-top,Fail
perf@i915-ref-count,Fail
perf_pmu@busy-accuracy-50,Fail
perf_pmu@module-unload,Fail
perf_pmu@most-busy-idle-check-all,Fail
perf_pmu@rc6,Crash
sysfs_heartbeat_interval@long,Timeout
sysfs_heartbeat_interval@off,Timeout
sysfs_preempt_timeout@off,Timeout
sysfs_timeslice_duration@off,Timeout
xe_module_load@force-load,Fail
xe_module_load@load,Fail
xe_module_load@many-reload,Fail
xe_module_load@reload,Fail
xe_module_load@reload-no-display,Fail

View File

@ -0,0 +1,13 @@
# Board Name: acer-cb317-1h-c3z6-dedede
# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12475
# Failure Rate: 100
# IGT Version: 1.28-ga73311079
# Linux Version: 6.12.0-rc1
kms_flip@flip-vs-panning-interruptible
# Board Name: acer-cb317-1h-c3z6-dedede
# Bug Report: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12476
# Failure Rate: 100
# IGT Version: 1.28-ga73311079
# Linux Version: 6.12.0-rc1
kms_universal_plane@cursor-fb-leak

View File

@ -0,0 +1,20 @@
# Suspend to RAM seems to be broken on this machine
.*suspend.*
# Skip driver specific tests
^amdgpu.*
^msm.*
nouveau_.*
^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
# GEM tests takes ~1000 hours, so skip it
gem_.*
# trap_err
i915_pm_rc6_residency.*
# Hangs the machine and timeout occurs
i915_pm_rpm@system-hibernate*

View File

@ -0,0 +1,15 @@
kms_3d,Fail
kms_cursor_legacy@forked-bo,Fail
kms_cursor_legacy@forked-move,Fail
kms_cursor_legacy@single-bo,Fail
kms_cursor_legacy@single-move,Fail
kms_cursor_legacy@torture-bo,Fail
kms_cursor_legacy@torture-move,Fail
kms_hdmi_inject@inject-4k,Fail
kms_lease@lease-uevent,Fail
kms_plane_alpha_blend@alpha-7efc,Fail
kms_plane_alpha_blend@alpha-basic,Fail
kms_plane_alpha_blend@alpha-opaque-fb,Fail
kms_plane_alpha_blend@alpha-transparent-fb,Fail
kms_plane_alpha_blend@constant-alpha-max,Fail
msm/msm_recovery@gpu-fault-parallel,Fail

View File

@ -0,0 +1,6 @@
# Board Name: sm8350-hdk
# Bug Report: https://gitlab.freedesktop.org/drm/msm/-/issues/65
# Failure Rate: 100
# IGT Version: 1.28-ga73311079
# Linux Version: 6.12.0-rc1
msm/msm_recovery@gpu-fault

View File

@ -0,0 +1,211 @@
# Skip driver specific tests
^amdgpu.*
nouveau_.*
^panfrost.*
^v3d.*
^vc4.*
^vmwgfx*
# Skip intel specific tests
gem_.*
i915_.*
tools_test.*
# Currently fails and causes coverage loss for other tests
# since core_getversion also fails.
core_hotunplug.*
# Kernel panic
msm/msm_mapping@ring
# DEBUG - Begin test msm/msm_mapping@ring
# [ 200.874157] [IGT] msm_mapping: executing
# [ 200.880236] [IGT] msm_mapping: starting subtest ring
# [ 200.895243] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=PERMISSION source=CP (0,0,0,1)
# [ 200.906885] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.917625] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.928353] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.939084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.949815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.950227] platform 3d6a000.gmu: [drm:a6xx_hfi_send_msg.constprop.0] *ERROR* Message HFI_H2F_MSG_GX_BW_PERF_VOTE id 25 timed out waiting for response
# [ 200.960467] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.960500] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 200.995966] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 201.006702] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 204.213387] platform 3d6a000.gmu: GMU watchdog expired
# [ 205.909103] adreno_fault_handler: 224274 callbacks suppressed
# [ 205.909108] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.925794] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.936529] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.947263] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.957997] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.968731] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.979465] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 205.990199] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 206.000932] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 206.011666] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.925090] adreno_fault_handler: 224511 callbacks suppressed
# [ 210.925096] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.941781] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.952517] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.963250] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.973985] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.984719] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 210.995452] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 211.006186] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 211.016921] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 211.027655] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 215.937100] adreno_fault_handler: 223760 callbacks suppressed
# [ 215.937106] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 215.953824] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 215.964573] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 215.975321] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 215.986067] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 215.996815] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 216.007563] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 216.018310] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 216.029057] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 216.039805] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 220.945182] adreno_fault_handler: 222822 callbacks suppressed
# [ 220.945188] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 220.961897] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 220.972645] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 220.983392] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 220.994140] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 221.004889] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 221.015636] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 221.026383] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 221.037130] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 221.047879] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 225.953179] adreno_fault_handler: 223373 callbacks suppressed
# [ 225.953184] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 225.969883] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 225.980617] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 225.991350] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 226.002084] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 226.012818] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 226.023551] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 226.034285] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 226.045019] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 226.055753] *** gpu fault: ttbr0=00000001160d6000 iova=0001000000001000 dir=WRITE type=UNKNOWN source=CP (0,0,0,1)
# [ 228.001087] rcu: INFO: rcu_preempt detected stalls on CPUs/tasks:
# [ 228.007412] rcu: 0-....: (524 ticks this GP) idle=4ffc/1/0x4000000000000000 softirq=9367/9368 fqs=29
# [ 228.017097] rcu: (detected by 1, t=6504 jiffies, g=29837, q=6 ncpus=8)
# [ 228.023959] Sending NMI from CPU 1 to CPUs 0:
# [ 228.161164] watchdog: BUG: soft lockup - CPU#0 stuck for 26s! [gpu-worker:150]
# [ 228.173169] Modules linked in:
# [ 228.176361] irq event stamp: 2809595
# [ 228.180083] hardirqs last enabled at (2809594): [<ffffd3bc52cb91ac>] exit_to_kernel_mode+0x38/0x130
# [ 228.189547] hardirqs last disabled at (2809595): [<ffffd3bc52cb92c8>] el1_interrupt+0x24/0x64
# [ 228.198377] softirqs last enabled at (1669060): [<ffffd3bc51936f98>] handle_softirqs+0x4a4/0x4bc
# [ 228.207565] softirqs last disabled at (1669063): [<ffffd3bc518905a4>] __do_softirq+0x14/0x20
# [ 228.216316] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Not tainted 6.12.0-rc1-g685d530dc83a #1
# [ 228.224966] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT)
# [ 228.231730] pstate: 00400005 (nzcv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--)
# [ 228.238948] pc : tcp_fastretrans_alert+0x0/0x884
# [ 228.243751] lr : tcp_ack+0x9d4/0x1238
# [ 228.247562] sp : ffff8000800036d0
# [ 228.251011] x29: ffff8000800036d0 x28: 000000000000000c x27: 0000000000000001
# [ 228.258421] x26: ffff704683cd8000 x25: 0000000000000403 x24: ffff70468b7e7c00
# [ 228.265829] x23: 0000000000000000 x22: 0000000000000004 x21: 000000000000140f
# [ 228.273237] x20: 00000000f1de79f7 x19: 00000000f1de7a5f x18: 0000000000000001
# [ 228.280644] x17: 00000000302d6762 x16: 632d6b64682d3035 x15: ffff704683c39000
# [ 228.288051] x14: 00000000000e2000 x13: ffff704683df6000 x12: 0000000000000000
# [ 228.295458] x11: 00000000000000a0 x10: 0000000000000000 x9 : ffffd3bc551a9a20
# [ 228.302865] x8 : ffff800080003640 x7 : 0000000000040faa x6 : 00000000ffff9634
# [ 228.310271] x5 : 00000000000005a8 x4 : ffff800080003788 x3 : ffff80008000377c
# [ 228.317679] x2 : 0000000000000000 x1 : 00000000f1de79f7 x0 : ffff704683cd8000
# [ 228.325087] Call trace:
# [ 228.327640] tcp_fastretrans_alert+0x0/0x884
# [ 228.332082] tcp_rcv_established+0x7c4/0x8bc
# [ 228.336523] tcp_v4_do_rcv+0x244/0x31c
# [ 228.340429] tcp_v4_rcv+0xcc4/0x1084
# [ 228.344155] ip_protocol_deliver_rcu+0x64/0x218
# [ 228.348862] ip_local_deliver_finish+0xb8/0x1ac
# [ 228.353566] ip_local_deliver+0x84/0x254
# [ 228.357651] ip_sublist_rcv_finish+0x84/0xb8
# [ 228.362092] ip_sublist_rcv+0x11c/0x2f0
# [ 228.366081] ip_list_rcv+0xfc/0x190
# [ 228.369711] __netif_receive_skb_list_core+0x174/0x208
# [ 228.375050] netif_receive_skb_list_internal+0x204/0x3ac
# [ 228.380564] napi_complete_done+0x64/0x1d0
# [ 228.384826] lan78xx_poll+0x71c/0x9cc
# [ 228.388638] __napi_poll.constprop.0+0x3c/0x254
# [ 228.393341] net_rx_action+0x164/0x2d4
# [ 228.397244] handle_softirqs+0x128/0x4bc
# [ 228.401329] __do_softirq+0x14/0x20
# [ 228.404958] ____do_softirq+0x10/0x1c
# [ 228.408769] call_on_irq_stack+0x24/0x4c
# [ 228.412854] do_softirq_own_stack+0x1c/0x28
# [ 228.417199] __irq_exit_rcu+0x124/0x164
# [ 228.421188] irq_exit_rcu+0x10/0x38
# [ 228.424819] el1_interrupt+0x38/0x64
# [ 228.428546] el1h_64_irq_handler+0x18/0x24
# [ 228.432807] el1h_64_irq+0x64/0x68
# [ 228.436354] lock_acquire+0x214/0x32c
# [ 228.440166] __mutex_lock+0x98/0x3d0
# [ 228.443893] mutex_lock_nested+0x24/0x30
# [ 228.447978] fault_worker+0x58/0x184
# [ 228.451704] kthread_worker_fn+0xf4/0x320
# [ 228.455873] kthread+0x114/0x118
# [ 228.459243] ret_from_fork+0x10/0x20
# [ 228.462970] Kernel panic - not syncing: softlockup: hung tasks
# [ 228.469018] CPU: 0 UID: 0 PID: 150 Comm: gpu-worker Tainted: G L 6.12.0-rc1-g685d530dc83a #1
# [ 228.479190] Tainted: [L]=SOFTLOCKUP
# [ 228.482815] Hardware name: Qualcomm Technologies, Inc. SM8350 HDK (DT)
# [ 228.489574] Call trace:
# [ 228.492125] dump_backtrace+0x98/0xf0
# [ 228.495931] show_stack+0x18/0x24
# [ 228.499380] dump_stack_lvl+0x38/0xd0
# [ 228.503189] dump_stack+0x18/0x24
# [ 228.506639] panic+0x3bc/0x41c
# [ 228.509826] watchdog_timer_fn+0x254/0x2e4
# [ 228.514087] __hrtimer_run_queues+0x3b0/0x40c
# [ 228.518612] hrtimer_interrupt+0xe8/0x248
# [ 228.522777] arch_timer_handler_virt+0x2c/0x44
# [ 228.527399] handle_percpu_devid_irq+0xa8/0x2c4
# [ 228.532103] generic_handle_domain_irq+0x2c/0x44
# [ 228.536902] gic_handle_irq+0x4c/0x11c
# [ 228.540802] do_interrupt_handler+0x50/0x84
# [ 228.545146] el1_interrupt+0x34/0x64
# [ 228.548870] el1h_64_irq_handler+0x18/0x24
# [ 228.553128] el1h_64_irq+0x64/0x68
# [ 228.556672] tcp_fastretrans_alert+0x0/0x884
# [ 228.561110] tcp_rcv_established+0x7c4/0x8bc
# [ 228.565548] tcp_v4_do_rcv+0x244/0x31c
# [ 228.569449] tcp_v4_rcv+0xcc4/0x1084
# [ 228.573171] ip_protocol_deliver_rcu+0x64/0x218
# [ 228.577873] ip_local_deliver_finish+0xb8/0x1ac
# [ 228.582574] ip_local_deliver+0x84/0x254
# [ 228.586655] ip_sublist_rcv_finish+0x84/0xb8
# [ 228.591092] ip_sublist_rcv+0x11c/0x2f0
# [ 228.595079] ip_list_rcv+0xfc/0x190
# [ 228.598706] __netif_receive_skb_list_core+0x174/0x208
# [ 228.604039] netif_receive_skb_list_internal+0x204/0x3ac
# [ 228.609549] napi_complete_done+0x64/0x1d0
# [ 228.613808] lan78xx_poll+0x71c/0x9cc
# [ 228.617614] __napi_poll.constprop.0+0x3c/0x254
# [ 228.622314] net_rx_action+0x164/0x2d4
# [ 228.626214] handle_softirqs+0x128/0x4bc
# [ 228.630297] __do_softirq+0x14/0x20
# [ 228.633923] ____do_softirq+0x10/0x1c
# [ 228.637729] call_on_irq_stack+0x24/0x4c
# [ 228.641811] do_softirq_own_stack+0x1c/0x28
# [ 228.646152] __irq_exit_rcu+0x124/0x164
# [ 228.650139] irq_exit_rcu+0x10/0x38
# [ 228.653768] el1_interrupt+0x38/0x64
# [ 228.657491] el1h_64_irq_handler+0x18/0x24
# [ 228.661750] el1h_64_irq+0x64/0x68
# [ 228.665293] lock_acquire+0x214/0x32c
# [ 228.669098] __mutex_lock+0x98/0x3d0
# [ 228.672821] mutex_lock_nested+0x24/0x30
# [ 228.676903] fault_worker+0x58/0x184
# [ 228.680626] kthread_worker_fn+0xf4/0x320
# [ 228.684790] kthread+0x114/0x118
# [ 228.688156] ret_from_fork+0x10/0x20
# [ 228.691882] SMP: stopping secondary CPUs
# [ 229.736843] SMP: failed to stop secondary CPUs 1,4
# [ 229.741827] Kernel Offset: 0x53bbd1880000 from 0xffff800080000000
# [ 229.748159] PHYS_OFFSET: 0xfff08fba80000000
# [ 229.752499] CPU features: 0x18,00000017,00200928,4200720b
# [ 229.758095] Memory Limit: none
# [ 229.761291] ---[ end Kernel panic - not syncing: softlockup: hung tasks ]---

View File

@ -3,7 +3,7 @@
config DRM_DISPLAY_DP_AUX_BUS
tristate
depends on DRM
depends on OF || COMPILE_TEST
depends on OF
config DRM_DISPLAY_HELPER
tristate

View File

@ -397,11 +397,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
bridge_connector->encoder = encoder;
/*
* TODO: Handle doublescan_allowed, stereo_allowed and
* ycbcr_420_allowed.
* TODO: Handle doublescan_allowed and stereo_allowed.
*/
connector = &bridge_connector->base;
connector->interlace_allowed = true;
connector->ycbcr_420_allowed = true;
/*
* Initialise connector status handling. First locate the furthest
@ -414,6 +414,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
drm_for_each_bridge_in_chain(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
if (!bridge->ycbcr_420_allowed)
connector->ycbcr_420_allowed = false;
if (bridge->ops & DRM_BRIDGE_OP_EDID)
bridge_connector->bridge_edid = bridge;

View File

@ -1132,6 +1132,8 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
drm_printf(p, "connector[%u]: %s\n", connector->base.id, connector->name);
drm_printf(p, "\tcrtc=%s\n", state->crtc ? state->crtc->name : "(null)");
drm_printf(p, "\tself_refresh_aware=%d\n", state->self_refresh_aware);
drm_printf(p, "\tinterlace_allowed=%d\n", connector->interlace_allowed);
drm_printf(p, "\tycbcr_420_allowed=%d\n", connector->ycbcr_420_allowed);
drm_printf(p, "\tmax_requested_bpc=%d\n", state->max_requested_bpc);
drm_printf(p, "\tcolorspace=%s\n", drm_get_colorspace_name(state->colorspace));

View File

@ -10,7 +10,6 @@
#include <linux/slab.h>
#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@ -172,99 +171,6 @@ void drm_client_release(struct drm_client_dev *client)
}
EXPORT_SYMBOL(drm_client_release);
/**
* drm_client_dev_unregister - Unregister clients
* @dev: DRM device
*
* This function releases all clients by calling each client's
* &drm_client_funcs.unregister callback. The callback function
* is responsibe for releaseing all resources including the client
* itself.
*
* The helper drm_dev_unregister() calls this function. Drivers
* that use it don't need to call this function themselves.
*/
void drm_client_dev_unregister(struct drm_device *dev)
{
struct drm_client_dev *client, *tmp;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
list_del(&client->list);
if (client->funcs && client->funcs->unregister) {
client->funcs->unregister(client);
} else {
drm_client_release(client);
kfree(client);
}
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_unregister);
/**
* drm_client_dev_hotplug - Send hotplug event to clients
* @dev: DRM device
*
* This function calls the &drm_client_funcs.hotplug callback on the attached clients.
*
* drm_kms_helper_hotplug_event() calls this function, so drivers that use it
* don't need to call this function themselves.
*/
void drm_client_dev_hotplug(struct drm_device *dev)
{
struct drm_client_dev *client;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (!dev->mode_config.num_connector) {
drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
return;
}
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->hotplug)
continue;
if (client->hotplug_failed)
continue;
ret = client->funcs->hotplug(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (ret)
client->hotplug_failed = true;
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
void drm_client_dev_restore(struct drm_device *dev)
{
struct drm_client_dev *client;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->restore)
continue;
ret = client->funcs->restore(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
}
mutex_unlock(&dev->clientlist_mutex);
}
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
if (buffer->gem) {
@ -584,30 +490,3 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re
0, 0, NULL, 0);
}
EXPORT_SYMBOL(drm_client_framebuffer_flush);
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list)
drm_printf(&p, "%s\n", client->name);
mutex_unlock(&dev->clientlist_mutex);
return 0;
}
static const struct drm_debugfs_info drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
void drm_client_debugfs_init(struct drm_device *dev)
{
drm_debugfs_add_files(dev, drm_client_debugfs_list,
ARRAY_SIZE(drm_client_debugfs_list));
}
#endif

View File

@ -0,0 +1,197 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright 2018 Noralf Trønnes
*/
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <drm/drm_client.h>
#include <drm/drm_client_event.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
/**
* drm_client_dev_unregister - Unregister clients
* @dev: DRM device
*
* This function releases all clients by calling each client's
* &drm_client_funcs.unregister callback. The callback function
* is responsibe for releaseing all resources including the client
* itself.
*
* The helper drm_dev_unregister() calls this function. Drivers
* that use it don't need to call this function themselves.
*/
void drm_client_dev_unregister(struct drm_device *dev)
{
struct drm_client_dev *client, *tmp;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
list_del(&client->list);
if (client->funcs && client->funcs->unregister) {
client->funcs->unregister(client);
} else {
drm_client_release(client);
kfree(client);
}
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_unregister);
/**
* drm_client_dev_hotplug - Send hotplug event to clients
* @dev: DRM device
*
* This function calls the &drm_client_funcs.hotplug callback on the attached clients.
*
* drm_kms_helper_hotplug_event() calls this function, so drivers that use it
* don't need to call this function themselves.
*/
void drm_client_dev_hotplug(struct drm_device *dev)
{
struct drm_client_dev *client;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
if (!dev->mode_config.num_connector) {
drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
return;
}
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->hotplug)
continue;
if (client->hotplug_failed)
continue;
ret = client->funcs->hotplug(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (ret)
client->hotplug_failed = true;
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
void drm_client_dev_restore(struct drm_device *dev)
{
struct drm_client_dev *client;
int ret;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->restore)
continue;
ret = client->funcs->restore(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
}
mutex_unlock(&dev->clientlist_mutex);
}
static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
{
struct drm_device *dev = client->dev;
int ret = 0;
if (drm_WARN_ON_ONCE(dev, client->suspended))
return 0;
if (client->funcs && client->funcs->suspend)
ret = client->funcs->suspend(client, holds_console_lock);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = true;
return ret;
}
void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->suspended)
drm_client_suspend(client, holds_console_lock);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_suspend);
static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock)
{
struct drm_device *dev = client->dev;
int ret = 0;
if (drm_WARN_ON_ONCE(dev, !client->suspended))
return 0;
if (client->funcs && client->funcs->resume)
ret = client->funcs->resume(client, holds_console_lock);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = false;
return ret;
}
void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (client->suspended)
drm_client_resume(client, holds_console_lock);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_resume);
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
struct drm_debugfs_entry *entry = m->private;
struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list)
drm_printf(&p, "%s\n", client->name);
mutex_unlock(&dev->clientlist_mutex);
return 0;
}
static const struct drm_debugfs_info drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
void drm_client_debugfs_init(struct drm_device *dev)
{
drm_debugfs_add_files(dev, drm_client_debugfs_list,
ARRAY_SIZE(drm_client_debugfs_list));
}
#endif

View File

@ -64,3 +64,6 @@ void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color
drm_client_setup_with_fourcc(dev, fourcc);
}
EXPORT_SYMBOL(drm_client_setup_with_color_mode);
MODULE_DESCRIPTION("In-kernel DRM clients");
MODULE_LICENSE("GPL and additional rights");

View File

@ -32,7 +32,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>

View File

@ -38,7 +38,7 @@
#include <drm/drm_accel.h>
#include <drm/drm_cache.h>
#include <drm/drm_client.h>
#include <drm/drm_client_event.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>

View File

@ -697,6 +697,7 @@ void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u3
}
EXPORT_SYMBOL(drm_fb_helper_damage_area);
#ifdef CONFIG_FB_DEFERRED_IO
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
@ -740,6 +741,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
}
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
#endif
/**
* drm_fb_helper_set_suspend - wrapper around fb_set_suspend

View File

@ -61,11 +61,37 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
return ret;
}
static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (holds_console_lock)
drm_fb_helper_set_suspend(fb_helper, true);
else
drm_fb_helper_set_suspend_unlocked(fb_helper, true);
return 0;
}
static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (holds_console_lock)
drm_fb_helper_set_suspend(fb_helper, false);
else
drm_fb_helper_set_suspend_unlocked(fb_helper, false);
return 0;
}
static const struct drm_client_funcs drm_fbdev_client_funcs = {
.owner = THIS_MODULE,
.unregister = drm_fbdev_client_unregister,
.restore = drm_fbdev_client_restore,
.hotplug = drm_fbdev_client_hotplug,
.suspend = drm_fbdev_client_suspend,
.resume = drm_fbdev_client_resume,
};
/**
@ -76,8 +102,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* This function sets up fbdev emulation. Restore, hotplug events and
* teardown are all taken care of. Drivers that do suspend/resume need
* to call drm_fb_helper_set_suspend_unlocked() themselves. Simple
* drivers might use drm_mode_config_helper_suspend().
* to call drm_client_dev_suspend() and drm_client_dev_resume() by
* themselves. Simple drivers might use drm_mode_config_helper_suspend().
*
* This function is safe to call even when there are no connectors present.
* Setup will be retried on the next hotplug event.

View File

@ -40,7 +40,7 @@
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_client.h>
#include <drm/drm_client_event.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>

View File

@ -48,6 +48,14 @@ struct drm_prime_file_private;
struct drm_printer;
struct drm_vblank_crtc;
/* drm_client_event.c */
#if defined(CONFIG_DRM_CLIENT)
void drm_client_debugfs_init(struct drm_device *dev);
#else
static inline void drm_client_debugfs_init(struct drm_device *dev)
{ }
#endif
/* drm_file.c */
extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);

View File

@ -21,7 +21,7 @@
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
@ -185,7 +185,7 @@ EXPORT_SYMBOL(drm_crtc_init);
* Zero on success, negative error code on error.
*
* See also:
* drm_kms_helper_poll_disable() and drm_fb_helper_set_suspend_unlocked().
* drm_kms_helper_poll_disable() and drm_client_dev_suspend().
*/
int drm_mode_config_helper_suspend(struct drm_device *dev)
{
@ -199,10 +199,11 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1);
drm_client_dev_suspend(dev, false);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_client_dev_resume(dev, false);
/*
* Don't enable polling if it was never initialized
*/
@ -230,7 +231,7 @@ EXPORT_SYMBOL(drm_mode_config_helper_suspend);
* Zero on success, negative error code on error.
*
* See also:
* drm_fb_helper_set_suspend_unlocked() and drm_kms_helper_poll_enable().
* drm_client_dev_resume() and drm_kms_helper_poll_enable().
*/
int drm_mode_config_helper_resume(struct drm_device *dev)
{
@ -247,7 +248,8 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
DRM_ERROR("Failed to resume (%d)\n", ret);
dev->mode_config.suspend_state = NULL;
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0);
drm_client_dev_resume(dev, false);
/*
* Don't enable polling if it is not initialized
*/

View File

@ -33,7 +33,7 @@
#include <linux/moduleparam.h>
#include <drm/drm_bridge.h>
#include <drm/drm_client.h>
#include <drm/drm_client_event.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>

View File

@ -9,6 +9,7 @@ config DRM_FSL_DCU
select DRM_PANEL
select REGMAP_MMIO
select VIDEOMODE_HELPERS
select MFD_SYSCON if SOC_LS1021A
help
Choose this option if you have an Freescale DCU chipset.
If M is selected the module will be called fsl-dcu-drm.

View File

@ -101,12 +101,25 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev)
static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
{
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct regmap *scfg;
int ret;
ret = fsl_dcu_drm_modeset_init(fsl_dev);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize mode setting\n");
return ret;
if (ret < 0)
return dev_err_probe(dev->dev, ret, "failed to initialize mode setting\n");
scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg");
if (PTR_ERR(scfg) != -ENODEV) {
/*
* For simplicity, enable the PIXCLK unconditionally,
* resulting in increased power consumption. Disabling
* the clock in PM or on unload could be implemented as
* a future improvement.
*/
ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN,
SCFG_PIXCLKCR_PXCEN);
if (ret < 0)
return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n");
}
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
@ -274,10 +287,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
}
fsl_dev->irq = platform_get_irq(pdev, 0);
if (fsl_dev->irq < 0) {
dev_err(dev, "failed to get irq\n");
if (fsl_dev->irq < 0)
return fsl_dev->irq;
}
fsl_dev->regmap = devm_regmap_init_mmio(dev, base,
&fsl_dcu_regmap_config);

View File

@ -160,6 +160,9 @@
#define FSL_DCU_ARGB4444 12
#define FSL_DCU_YUV422 14
#define SCFG_PIXCLKCR 0x28
#define SCFG_PIXCLKCR_PXCEN BIT(31)
#define VF610_LAYER_REG_NUM 9
#define LS1021A_LAYER_REG_NUM 10

View File

@ -29,7 +29,7 @@ void fsl_tcon_bypass_enable(struct fsl_tcon *tcon)
FSL_TCON_CTRL1_TCON_BYPASS);
}
static struct regmap_config fsl_tcon_regmap_config = {
static const struct regmap_config fsl_tcon_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,

View File

@ -10,6 +10,7 @@ config DRM_I915
# the shmem_readpage() which depends upon tmpfs
select SHMEM
select TMPFS
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_DSC_HELPER
select DRM_DISPLAY_HDCP_HELPER

View File

@ -11,7 +11,7 @@
#include <acpi/video.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client.h>
#include <drm/drm_client_event.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>

View File

@ -7,7 +7,7 @@ config DRM_IMX_DCSS
select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
depends on DRM && ((ARCH_MXC && ARM64) || COMPILE_TEST)
help
Choose this if you have a NXP i.MX8MQ based system and want to use the
Display Controller Subsystem. This option enables DCSS support.

View File

@ -136,7 +136,7 @@ static int div_q(int A, int B)
else
temp -= B / 2;
result = (int)(temp / B);
result = div_s64(temp, B);
return result;
}
@ -239,7 +239,7 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
ll_temp = coef[phase][i];
ll_temp <<= PSC_COEFF_PRECISION;
ll_temp += sum >> 1;
ll_temp /= sum;
ll_temp = div_s64(ll_temp, sum);
coef[phase][i] = (int)ll_temp;
}
}

View File

@ -15,6 +15,7 @@ config DRM_IMX_PARALLEL_DISPLAY
depends on DRM_IMX
select DRM_BRIDGE
select DRM_BRIDGE_CONNECTOR
select DRM_IMX_LEGACY_BRIDGE
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS

View File

@ -2,9 +2,9 @@
config DRM_MEDIATEK
tristate "DRM Support for Mediatek SoCs"
depends on DRM
depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST)
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
depends on COMMON_CLK
depends on HAVE_ARM_SMCCC
depends on HAVE_ARM_SMCCC || COMPILE_TEST
depends on OF
depends on MTK_MMSYS
select DRM_CLIENT_SELECTION

View File

@ -311,7 +311,7 @@ static const struct mtk_dp_efuse_fmt mt8195_dp_efuse_fmt[MTK_DP_CAL_MAX] = {
},
};
static struct regmap_config mtk_dp_regmap_config = {
static const struct regmap_config mtk_dp_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_MESON
tristate "DRM Support for Amlogic Meson Display Controller"
depends on DRM && OF && (ARM || ARM64)
depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on ARCH_MESON || COMPILE_TEST
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER

View File

@ -128,7 +128,7 @@ static bool meson_vpu_has_available_connectors(struct device *dev)
return false;
}
static struct regmap_config meson_regmap_config = {
static const struct regmap_config meson_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,

View File

@ -272,20 +272,6 @@ static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi,
writeb(data, dw_hdmi->hdmitx + addr);
}
/* Helper to change specific bits in controller registers */
static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
dw_hdmi->data->dwc_write(dw_hdmi, addr, data);
}
/* Bridge */
/* Setup PHY bandwidth modes */

View File

@ -6,6 +6,7 @@ config DRM_MSM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT
depends on OF
depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n

View File

@ -1467,14 +1467,14 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
dp_priv = container_of(dp_display, struct dp_display_private, dp_display);
ret = dp_bridge_init(dp_display, dev, encoder);
ret = dp_bridge_init(dp_display, dev, encoder, yuv_supported);
if (ret) {
DRM_DEV_ERROR(dev->dev,
"failed to create dp bridge: %d\n", ret);
return ret;
}
dp_display->connector = dp_drm_connector_init(dp_display, encoder, yuv_supported);
dp_display->connector = dp_drm_connector_init(dp_display, encoder);
if (IS_ERR(dp_display->connector)) {
ret = PTR_ERR(dp_display->connector);
DRM_DEV_ERROR(dev->dev,

View File

@ -289,7 +289,7 @@ static const struct drm_bridge_funcs edp_bridge_ops = {
};
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder)
struct drm_encoder *encoder, bool yuv_supported)
{
int rc;
struct msm_dp_bridge *dp_bridge;
@ -304,6 +304,7 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
bridge = &dp_bridge->bridge;
bridge->funcs = dp_display->is_edp ? &edp_bridge_ops : &dp_bridge_ops;
bridge->type = dp_display->connector_type;
bridge->ycbcr_420_allowed = yuv_supported;
/*
* Many ops only make sense for DP. Why?
@ -351,8 +352,8 @@ int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
}
/* connector initialization */
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
bool yuv_supported)
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display,
struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
@ -363,9 +364,6 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct dr
if (!dp_display->is_edp)
drm_connector_attach_dp_subconnector_property(connector);
if (yuv_supported)
connector->ycbcr_420_allowed = true;
drm_connector_attach_encoder(connector, encoder);
return connector;

View File

@ -19,10 +19,11 @@ struct msm_dp_bridge {
#define to_dp_bridge(x) container_of((x), struct msm_dp_bridge, bridge)
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display, struct drm_encoder *encoder,
bool yuv_supported);
struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display,
struct drm_encoder *encoder);
int dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
struct drm_encoder *encoder);
struct drm_encoder *encoder,
bool yuv_supported);
void dp_bridge_atomic_enable(struct drm_bridge *drm_bridge,
struct drm_bridge_state *old_bridge_state);

View File

@ -28,8 +28,8 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@ -804,8 +804,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
/* Disable console. */
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true);
drm_client_dev_suspend(dev, false);
if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
@ -836,8 +835,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
}
}
/* Enable console. */
drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false);
drm_client_dev_resume(dev, false);
}
int

View File

@ -2,7 +2,7 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_client_event.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"

View File

@ -443,6 +443,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *fifoch,
ret = gf100_grctx_generate(gr, chan, fifoch->inst);
if (ret) {
nvkm_error(&base->engine.subdev, "failed to construct context\n");
mutex_unlock(&gr->fecs.mutex);
return ret;
}
}

View File

@ -691,11 +691,6 @@ u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
return mgr_desc[channel].sync_lost_irq;
}
u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
void dispc_mgr_enable(struct dispc_device *dispc,
enum omap_channel channel, bool enable)
{
@ -726,30 +721,6 @@ void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
bool dispc_wb_go_busy(struct dispc_device *dispc)
{
return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
if (!enable)
return;
go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
if (go) {
DSSERR("GO bit not down for WB\n");
return;
}
REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6);
}
static void dispc_ovl_write_firh_reg(struct dispc_device *dispc,
enum omap_plane_id plane, int reg,
u32 value)
@ -1498,17 +1469,6 @@ void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
min(high, 0xfffu));
}
void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable)
{
if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14);
}
void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
enum omap_plane_id plane,
u32 *fifo_low, u32 *fifo_high,
@ -2814,95 +2774,6 @@ int dispc_ovl_setup(struct dispc_device *dispc,
return r;
}
int dispc_wb_setup(struct dispc_device *dispc,
const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm,
enum dss_writeback_channel channel_in)
{
int r;
u32 l;
enum omap_plane_id plane = OMAP_DSS_WB;
const int pos_x = 0, pos_y = 0;
const u8 zorder = 0, global_alpha = 0;
const bool replication = true;
bool truncation;
int in_width = vm->hactive;
int in_height = vm->vactive;
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
in_height /= 2;
DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
"rot %d\n", wi->paddr, wi->p_uv_addr, in_width,
in_height, wi->width, wi->height, wi->fourcc, wi->rotation);
r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (r)
return r;
switch (wi->fourcc) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XRGB4444:
truncation = true;
break;
default:
truncation = false;
break;
}
/* setup extra DISPC_WB_ATTRIBUTES */
l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */
l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
if (mem_to_mem)
l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */
else
l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */
dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
if (mem_to_mem) {
/* WBDELAYCOUNT */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
} else {
u32 wbdelay;
if (channel_in == DSS_WB_TV_MGR)
wbdelay = vm->vsync_len + vm->vback_porch;
else
wbdelay = vm->vfront_porch + vm->vsync_len +
vm->vback_porch;
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
wbdelay /= 2;
wbdelay = min(wbdelay, 255u);
/* WBDELAYCOUNT */
REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
}
return 0;
}
bool dispc_has_writeback(struct dispc_device *dispc)
{
return dispc->feat->has_writeback;
}
int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable)
{
@ -3742,23 +3613,6 @@ void dispc_mgr_set_clock_div(struct dispc_device *dispc,
cinfo->pck_div);
}
int dispc_mgr_get_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
struct dispc_clock_info *cinfo)
{
unsigned long fck;
fck = dispc_fclk_rate(dispc);
cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
return 0;
}
u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
return dispc_read_reg(dispc, DISPC_IRQSTATUS);

View File

@ -416,7 +416,6 @@ u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
enum omap_channel channel);
u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
enum omap_channel channel);
u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc);
u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc);
@ -458,20 +457,11 @@ int dispc_ovl_setup(struct dispc_device *dispc,
int dispc_ovl_enable(struct dispc_device *dispc,
enum omap_plane_id plane, bool enable);
bool dispc_has_writeback(struct dispc_device *dispc);
int dispc_wb_setup(struct dispc_device *dispc,
const struct omap_dss_writeback_info *wi,
bool mem_to_mem, const struct videomode *vm,
enum dss_writeback_channel channel_in);
bool dispc_wb_go_busy(struct dispc_device *dispc);
void dispc_wb_go(struct dispc_device *dispc);
void dispc_enable_sidle(struct dispc_device *dispc);
void dispc_disable_sidle(struct dispc_device *dispc);
void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable);
void dispc_pck_free_enable(struct dispc_device *dispc, bool enable);
void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable);
typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data);
@ -494,9 +484,6 @@ void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
void dispc_mgr_set_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
const struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(struct dispc_device *dispc,
enum omap_channel channel,
struct dispc_clock_info *cinfo);
void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS

View File

@ -632,6 +632,16 @@ config DRM_PANEL_SAMSUNG_AMS639RQ08
Say Y or M here if you want to enable support for the
Samsung AMS639RQ08 FHD Plus (2340x1080@60Hz) CMD mode panel.
config DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24
tristate "Samsung AMS427AP24 panel with S6E88A0 controller"
depends on GPIOLIB && OF && REGULATOR
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for Samsung AMS427AP24 panel
with S6E88A0 controller (found in Samsung Galaxy S4 Mini Value Edition
GT-I9195I). To compile this driver as a module, choose M here.
config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
depends on OF

View File

@ -77,6 +77,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_SPI) += panel-samsung-s6e63m0-spi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o

View File

@ -318,7 +318,7 @@ static int ili9322_regmap_spi_read(void *context, const void *reg,
return spi_write_then_read(spi, buf, 1, val, 1);
}
static struct regmap_bus ili9322_regmap_bus = {
static const struct regmap_bus ili9322_regmap_bus = {
.write = ili9322_regmap_spi_write,
.read = ili9322_regmap_spi_read,
.reg_format_endian_default = REGMAP_ENDIAN_BIG,

View File

@ -26,7 +26,7 @@ struct ltk050h3146w;
struct ltk050h3146w_desc {
const unsigned long mode_flags;
const struct drm_display_mode *mode;
int (*init)(struct ltk050h3146w *ctx);
void (*init)(struct mipi_dsi_multi_context *dsi_ctx);
};
struct ltk050h3146w {
@ -243,67 +243,57 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
return container_of(panel, struct ltk050h3146w, panel);
}
static int ltk050h3148w_init_sequence(struct ltk050h3146w *ctx)
static void ltk050h3148w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0xff, 0x83, 0x94);
mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44,
0x71, 0x31, 0x55, 0x2f);
mipi_dsi_dcs_write_seq(dsi, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x88);
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07);
mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70,
0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74,
0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86);
mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e,
0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54,
0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05,
0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07,
0x17, 0x11, 0x40);
mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b,
0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01,
0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b,
0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06,
0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14,
0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c,
0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58,
0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00,
0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e,
0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88,
0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b,
0x5d, 0x61, 0x65, 0x7f);
mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x0b);
mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x1f, 0x31);
mipi_dsi_dcs_write_seq(dsi, 0xb6, 0xc4, 0xc4);
mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x01);
mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xbd, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc6, 0xef);
mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x02);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0xff, 0x83, 0x94);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x50, 0x15, 0x75, 0x09, 0x32, 0x44,
0x71, 0x31, 0x55, 0x2f);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xba, 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x88);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0x80, 0x64, 0x10, 0x07);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb4, 0x05, 0x70, 0x05, 0x70, 0x01, 0x70,
0x01, 0x0c, 0x86, 0x75, 0x00, 0x3f, 0x01, 0x74,
0x01, 0x74, 0x01, 0x74, 0x01, 0x0c, 0x86);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x00, 0x00, 0x07, 0x07, 0x40, 0x1e,
0x08, 0x00, 0x32, 0x10, 0x08, 0x00, 0x08, 0x54,
0x15, 0x10, 0x05, 0x04, 0x02, 0x12, 0x10, 0x05,
0x07, 0x33, 0x34, 0x0c, 0x0c, 0x37, 0x10, 0x07,
0x17, 0x11, 0x40);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b,
0x1a, 0x1a, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01,
0x02, 0x03, 0x20, 0x21, 0x18, 0x18, 0x22, 0x23,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd6, 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b,
0x1a, 0x1a, 0x03, 0x02, 0x01, 0x00, 0x07, 0x06,
0x05, 0x04, 0x23, 0x22, 0x18, 0x18, 0x21, 0x20,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xe0, 0x00, 0x03, 0x09, 0x11, 0x11, 0x14,
0x18, 0x16, 0x2e, 0x3d, 0x4d, 0x4d, 0x58, 0x6c,
0x72, 0x78, 0x88, 0x8b, 0x86, 0xa4, 0xb2, 0x58,
0x55, 0x59, 0x5b, 0x5d, 0x60, 0x64, 0x7f, 0x00,
0x03, 0x09, 0x0f, 0x11, 0x14, 0x18, 0x16, 0x2e,
0x3d, 0x4d, 0x4d, 0x58, 0x6d, 0x73, 0x78, 0x88,
0x8b, 0x87, 0xa5, 0xb2, 0x58, 0x55, 0x58, 0x5b,
0x5d, 0x61, 0x65, 0x7f);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcc, 0x0b);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x1f, 0x31);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0xc4, 0xc4);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x01);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb1, 0x00);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbd, 0x00);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc6, 0xef);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x02);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
return ret;
}
msleep(60);
return 0;
mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1);
mipi_dsi_msleep(dsi_ctx, 60);
}
static const struct drm_display_mode ltk050h3148w_mode = {
@ -327,74 +317,64 @@ static const struct ltk050h3146w_desc ltk050h3148w_data = {
MIPI_DSI_MODE_VIDEO_BURST,
};
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
static void ltk050h3146w_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
0x01);
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdf, 0x93, 0x65, 0xf8);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
0x01);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x00, 0xb5);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x00, 0xb5);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
0x28, 0x04, 0xcc, 0xcc, 0xcc);
mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
0x80);
mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
0x16, 0x00, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
0x21, 0x00, 0x60);
mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02);
mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11);
mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb9, 0x00, 0xc4, 0x23, 0x07);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
0x28, 0x04, 0xcc, 0xcc, 0xcc);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x0f, 0x04);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbe, 0x1e, 0xf2);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x26, 0x03);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x00, 0x12);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
0x80);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
0x16, 0x00, 0x00);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
0x21, 0x00, 0x60);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xdd, 0x2c, 0xa3, 0x00);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x02);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb2, 0x32, 0x1c);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb7, 0x3b, 0x70, 0x00, 0x04);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x11);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc2, 0x20, 0x38, 0x1e, 0x84);
mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xde, 0x00);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
return ret;
}
msleep(60);
return 0;
mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 1);
mipi_dsi_msleep(dsi_ctx, 60);
}
static const struct drm_display_mode ltk050h3146w_mode = {
@ -418,79 +398,42 @@ static const struct ltk050h3146w_desc ltk050h3146w_data = {
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
};
static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
static void ltk050h3146w_a2_select_page(struct mipi_dsi_multi_context *dsi_ctx, int page)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
u8 d[3] = { 0x98, 0x81, page };
u8 d[4] = { 0xff, 0x98, 0x81, page };
return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
mipi_dsi_dcs_write_buffer_multi(dsi_ctx, d, ARRAY_SIZE(d));
}
static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
static void ltk050h3146w_a2_write_page(struct mipi_dsi_multi_context *dsi_ctx, int page,
const struct ltk050h3146w_cmd *cmds,
int num)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int i, ret;
ltk050h3146w_a2_select_page(dsi_ctx, page);
ret = ltk050h3146w_a2_select_page(ctx, page);
if (ret < 0) {
dev_err(ctx->dev, "failed to select page %d: %d\n", page, ret);
return ret;
}
for (i = 0; i < num; i++) {
ret = mipi_dsi_generic_write(dsi, &cmds[i],
for (int i = 0; i < num; i++)
mipi_dsi_generic_write_multi(dsi_ctx, &cmds[i],
sizeof(struct ltk050h3146w_cmd));
if (ret < 0) {
dev_err(ctx->dev, "failed to write page %d init cmds: %d\n", page, ret);
return ret;
}
}
return 0;
}
static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
static void ltk050h3146w_a2_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
/*
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
ltk050h3146w_a2_write_page(dsi_ctx, 3, page3_cmds,
ARRAY_SIZE(page3_cmds));
if (ret < 0)
return ret;
ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
ltk050h3146w_a2_write_page(dsi_ctx, 4, page4_cmds,
ARRAY_SIZE(page4_cmds));
if (ret < 0)
return ret;
ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
ltk050h3146w_a2_write_page(dsi_ctx, 1, page1_cmds,
ARRAY_SIZE(page1_cmds));
if (ret < 0)
return ret;
ret = ltk050h3146w_a2_select_page(ctx, 0);
if (ret < 0) {
dev_err(ctx->dev, "failed to select page 0: %d\n", ret);
return ret;
}
ltk050h3146w_a2_select_page(dsi_ctx, 0);
/* vendor code called this without param, where there should be one */
ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
if (ret < 0) {
dev_err(ctx->dev, "failed to set tear on: %d\n", ret);
return ret;
}
mipi_dsi_dcs_set_tear_on_multi(dsi_ctx, 0);
msleep(60);
return 0;
mipi_dsi_msleep(dsi_ctx, 60);
}
static const struct drm_display_mode ltk050h3146w_a2_mode = {
@ -518,19 +461,12 @@ static int ltk050h3146w_unprepare(struct drm_panel *panel)
{
struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(ctx->dev, "failed to set display off: %d\n", ret);
return ret;
}
mipi_dsi_dcs_enter_sleep_mode(dsi);
if (ret < 0) {
dev_err(ctx->dev, "failed to enter sleep mode: %d\n", ret);
return ret;
}
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
if (dsi_ctx.accum_err)
return dsi_ctx.accum_err;
regulator_disable(ctx->iovcc);
regulator_disable(ctx->vci);
@ -542,17 +478,17 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
{
struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
dev_dbg(ctx->dev, "Resetting the panel\n");
ret = regulator_enable(ctx->vci);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable vci supply: %d\n", ret);
return ret;
dsi_ctx.accum_err = regulator_enable(ctx->vci);
if (dsi_ctx.accum_err) {
dev_err(ctx->dev, "Failed to enable vci supply: %d\n", dsi_ctx.accum_err);
return dsi_ctx.accum_err;
}
ret = regulator_enable(ctx->iovcc);
if (ret < 0) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
dsi_ctx.accum_err = regulator_enable(ctx->iovcc);
if (dsi_ctx.accum_err) {
dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", dsi_ctx.accum_err);
goto disable_vci;
}
@ -561,28 +497,15 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
msleep(20);
ret = ctx->panel_desc->init(ctx);
if (ret < 0) {
dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
goto disable_iovcc;
}
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
goto disable_iovcc;
}
ctx->panel_desc->init(&dsi_ctx);
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* T9: 120ms */
msleep(120);
mipi_dsi_msleep(&dsi_ctx, 120);
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
mipi_dsi_msleep(&dsi_ctx, 50);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
dev_err(ctx->dev, "Failed to set display on: %d\n", ret);
if (dsi_ctx.accum_err)
goto disable_iovcc;
}
msleep(50);
return 0;
@ -590,7 +513,7 @@ static int ltk050h3146w_prepare(struct drm_panel *panel)
regulator_disable(ctx->iovcc);
disable_vci:
regulator_disable(ctx->vci);
return ret;
return dsi_ctx.accum_err;
}
static int ltk050h3146w_get_modes(struct drm_panel *panel,

Some files were not shown because too many files have changed in this diff Show More