This commit is contained in:
Stephen Rothwell 2024-12-20 12:13:59 +11:00
commit cecc27b364
182 changed files with 6739 additions and 1290 deletions

View File

@ -19,6 +19,7 @@ properties:
enum:
- renesas,r8a779a0-dsi-csi2-tx # for V3U
- renesas,r8a779g0-dsi-csi2-tx # for V4H
- renesas,r8a779h0-dsi-csi2-tx # for V4M
reg:
maxItems: 1

View File

@ -80,12 +80,12 @@ properties:
- const: 4
port@2:
$ref: /schemas/graph.yaml#/properties/port
description: Video port for LVDS Channel-A output (panel or bridge).
$ref: '#/$defs/lvds-port'
port@3:
$ref: /schemas/graph.yaml#/properties/port
description: Video port for LVDS Channel-B output (panel or bridge).
$ref: '#/$defs/lvds-port'
required:
- port@0
@ -96,6 +96,36 @@ required:
- reg
- ports
$defs:
lvds-port:
$ref: /schemas/graph.yaml#/$defs/port-base
unevaluatedProperties: false
properties:
endpoint:
$ref: /schemas/media/video-interfaces.yaml#
unevaluatedProperties: false
properties:
ti,lvds-termination-ohms:
description: The value of near end differential termination in ohms.
enum: [100, 200]
default: 200
ti,lvds-vod-swing-clock-microvolt:
description: LVDS diferential output voltage <min max> for clock
lanes in microvolts.
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
ti,lvds-vod-swing-data-microvolt:
description: LVDS diferential output voltage <min max> for data
lanes in microvolts.
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
allOf:
- if:
properties:

View File

@ -42,6 +42,8 @@ properties:
# Admatec 9904379 10.1" 1024x600 LVDS panel
- admatec,9904379
- auo,b101ew05
# AUO G084SN05 V9 8.4" 800x600 LVDS panel
- auo,g084sn05
# Chunghwa Picture Tubes Ltd. 7" WXGA (800x1280) TFT LCD LVDS panel
- chunghwa,claa070wp03xg
# EDT ETML0700Z9NDHA 7.0" WSVGA (1024x600) color TFT LCD LVDS panel

View File

@ -206,12 +206,16 @@ properties:
- mitsubishi,aa070mc01-ca1
# Mitsubishi AA084XE01 8.4" XGA TFT LCD panel
- mitsubishi,aa084xe01
# Multi-Inno Technology Co.,Ltd MI0700A2T-30 7" 800x480 TFT Resistive Touch Module
- multi-inno,mi0700a2t-30
# Multi-Inno Technology Co.,Ltd MI0700S4T-6 7" 800x480 TFT Resistive Touch Module
- multi-inno,mi0700s4t-6
# Multi-Inno Technology Co.,Ltd MI0800FT-9 8" 800x600 TFT Resistive Touch Module
- multi-inno,mi0800ft-9
# Multi-Inno Technology Co.,Ltd MI1010AIT-1CP 10.1" 1280x800 LVDS IPS Cap Touch Mod.
- multi-inno,mi1010ait-1cp
# Multi-Inno Technology Co.,Ltd MI1010Z1T-1CP11 10.1" 1024x600 TFT Resistive Touch Module
- multi-inno,mi1010z1t-1cp11
# NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
- nec,nl12880bc20-05
# NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
@ -280,6 +284,8 @@ properties:
- team-source-display,tst043015cmhx
# Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
- tianma,tm070jdhg30
# Tianma Micro-electronics TM070JDHG34-00 7.0" WXGA (1280x800) LVDS TFT LCD panel
- tianma,tm070jdhg34-00
# Tianma Micro-electronics TM070JVHG33 7.0" WXGA TFT LCD panel
- tianma,tm070jvhg33
# Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel

View File

@ -41,6 +41,7 @@ properties:
- renesas,du-r8a77995 # for R-Car D3 compatible DU
- renesas,du-r8a779a0 # for R-Car V3U compatible DU
- renesas,du-r8a779g0 # for R-Car V4H compatible DU
- renesas,du-r8a779h0 # for R-Car V4M compatible DU
reg:
maxItems: 1
@ -69,14 +70,12 @@ properties:
$ref: /schemas/graph.yaml#/properties/port
unevaluatedProperties: false
required:
- port@0
- port@1
unevaluatedProperties: false
renesas,cmms:
$ref: /schemas/types.yaml#/definitions/phandle-array
minItems: 1
maxItems: 4
items:
maxItems: 1
description:
@ -85,6 +84,8 @@ properties:
renesas,vsps:
$ref: /schemas/types.yaml#/definitions/phandle-array
minItems: 1
maxItems: 4
items:
items:
- description: phandle to VSP instance that serves the DU channel
@ -489,9 +490,11 @@ allOf:
renesas,cmms:
minItems: 4
maxItems: 4
renesas,vsps:
minItems: 4
maxItems: 4
required:
- clock-names
@ -558,9 +561,11 @@ allOf:
renesas,cmms:
minItems: 3
maxItems: 3
renesas,vsps:
minItems: 3
maxItems: 3
required:
- clock-names
@ -627,9 +632,11 @@ allOf:
renesas,cmms:
minItems: 3
maxItems: 3
renesas,vsps:
minItems: 3
maxItems: 3
required:
- clock-names
@ -683,7 +690,7 @@ allOf:
- port@1
renesas,vsps:
minItems: 1
maxItems: 1
required:
- clock-names
@ -746,9 +753,11 @@ allOf:
renesas,cmms:
minItems: 2
maxItems: 2
renesas,vsps:
minItems: 2
maxItems: 2
required:
- clock-names
@ -799,6 +808,54 @@ allOf:
renesas,vsps:
minItems: 2
maxItems: 2
required:
- clock-names
- interrupts
- resets
- reset-names
- renesas,vsps
- if:
properties:
compatible:
contains:
enum:
- renesas,du-r8a779h0
then:
properties:
clocks:
items:
- description: Functional clock
clock-names:
items:
- const: du.0
interrupts:
maxItems: 1
resets:
maxItems: 1
reset-names:
items:
- const: du.0
ports:
properties:
port@0:
description: DSI 0
port@1: false
port@2: false
port@3: false
required:
- port@0
renesas,vsps:
maxItems: 1
required:
- clock-names

View File

@ -0,0 +1,120 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/rockchip/rockchip,rk3588-mipi-dsi2.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Rockchip specific extensions to the Synopsys Designware MIPI DSI2
maintainers:
- Heiko Stuebner <heiko@sntech.de>
properties:
compatible:
enum:
- rockchip,rk3588-mipi-dsi2
reg:
maxItems: 1
interrupts:
maxItems: 1
clocks:
maxItems: 2
clock-names:
items:
- const: pclk
- const: sys
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
This SoC uses GRF regs to switch between vopl/vopb.
phys:
maxItems: 1
phy-names:
const: dcphy
power-domains:
maxItems: 1
resets:
maxItems: 1
reset-names:
const: apb
ports:
$ref: /schemas/graph.yaml#/properties/ports
properties:
port@0:
$ref: /schemas/graph.yaml#/properties/port
description: Input node to receive pixel data.
port@1:
$ref: /schemas/graph.yaml#/properties/port
description: DSI output node to panel.
required:
- port@0
- port@1
required:
- compatible
- clocks
- clock-names
- rockchip,grf
- phys
- phy-names
- ports
- reg
allOf:
- $ref: /schemas/display/dsi-controller.yaml#
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/rockchip,rk3588-cru.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/power/rk3588-power.h>
#include <dt-bindings/reset/rockchip,rk3588-cru.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
dsi@fde20000 {
compatible = "rockchip,rk3588-mipi-dsi2";
reg = <0x0 0xfde20000 0x0 0x10000>;
interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&cru PCLK_DSIHOST0>, <&cru CLK_DSIHOST0>;
clock-names = "pclk", "sys";
resets = <&cru SRST_P_DSIHOST0>;
reset-names = "apb";
power-domains = <&power RK3588_PD_VOP>;
phys = <&mipidcphy0 PHY_TYPE_DPHY>;
phy-names = "dcphy";
rockchip,grf = <&vop_grf>;
ports {
#address-cells = <1>;
#size-cells = <0>;
dsi0_in: port@0 {
reg = <0>;
};
dsi0_out: port@1 {
reg = <1>;
};
};
};
};

View File

@ -100,12 +100,16 @@ properties:
- description: Video layer, plane 1 (U/V or U)
- description: Video layer, plane 2 (V)
- description: Graphics layer
- description: Audio channel 0
- description: Audio channel 1
dma-names:
items:
- const: vid0
- const: vid1
- const: vid2
- const: gfx0
- const: aud0
- const: aud1
phys:
description: PHYs for the DP data lanes
@ -194,11 +198,13 @@ examples:
power-domains = <&pd_dp>;
resets = <&reset ZYNQMP_RESET_DP>;
dma-names = "vid0", "vid1", "vid2", "gfx0";
dma-names = "vid0", "vid1", "vid2", "gfx0", "aud0", "aud1";
dmas = <&xlnx_dpdma 0>,
<&xlnx_dpdma 1>,
<&xlnx_dpdma 2>,
<&xlnx_dpdma 3>;
<&xlnx_dpdma 3>,
<&xlnx_dpdma 4>,
<&xlnx_dpdma 5>;
phys = <&psgtr 1 PHY_TYPE_DP 0 3>,
<&psgtr 0 PHY_TYPE_DP 1 3>;

View File

@ -145,57 +145,57 @@ both.
Memory
^^^^^^
- drm-memory-<region>: <uint> [KiB|MiB]
Each possible memory type which can be used to store buffer objects by the
GPU in question shall be given a stable and unique name to be returned as the
string here.
Each possible memory type which can be used to store buffer objects by the GPU
in question shall be given a stable and unique name to be used as the "<region>"
string.
The region name "memory" is reserved to refer to normal system memory.
Value shall reflect the amount of storage currently consumed by the buffer
The value shall reflect the amount of storage currently consumed by the buffer
objects belong to this client, in the respective memory region.
Default unit shall be bytes with optional unit specifiers of 'KiB' or 'MiB'
indicating kibi- or mebi-bytes.
This key is deprecated and is an alias for drm-resident-<region>. Only one of
the two should be present in the output.
- drm-total-<region>: <uint> [KiB|MiB]
The total size of all requested buffers, including both shared and private
memory. The backing store for the buffers does not need to be currently
instantiated to count under this category. To avoid double-counting, if a buffer
has multiple regions where it can be allocated to, the implementation should
consistently select a single region for accounting purposes.
- drm-shared-<region>: <uint> [KiB|MiB]
The total size of buffers that are shared with another file (e.g., have more
than a single handle).
- drm-total-<region>: <uint> [KiB|MiB]
The total size of all created buffers including shared and private memory. The
backing store for the buffers does not have to be currently instantiated to be
counted under this category.
The total size of buffers that are shared with another file (i.e., have more
than one handle). The same requirement to avoid double-counting that applies to
drm-total-<region> also applies here.
- drm-resident-<region>: <uint> [KiB|MiB]
The total size of buffers that are resident (have their backing store present or
instantiated) in the specified region.
The total size of buffers that are resident (i.e., have their backing store
present or instantiated) in the specified region.
This is an alias for drm-memory-<region> and only one of the two should be
present in the output.
- drm-memory-<region>: <uint> [KiB|MiB]
This key is deprecated and is only printed by amdgpu; it is an alias for
drm-resident-<region>.
- drm-purgeable-<region>: <uint> [KiB|MiB]
The total size of buffers that are purgeable.
The total size of buffers that are resident and purgeable.
For example drivers which implement a form of 'madvise' like functionality can
here count buffers which have instantiated backing store, but have been marked
with an equivalent of MADV_DONTNEED.
For example, drivers that implement functionality similar to 'madvise' can count
buffers that have instantiated backing stores but have been marked with an
equivalent of MADV_DONTNEED.
- drm-active-<region>: <uint> [KiB|MiB]
The total size of buffers that are active on one or more engines.
One practical example of this can be presence of unsignaled fences in an GEM
buffer reservation object. Therefore the active category is a subset of
resident.
One practical example of this could be the presence of unsignaled fences in a
GEM buffer reservation object. Therefore, the active category is a subset of the
resident category.
Implementation Details
======================

View File

@ -1306,11 +1306,14 @@ zynqmp_dpsub: display@fd4a0000 {
"dp_vtc_pixel_clk_in";
power-domains = <&zynqmp_firmware PD_DP>;
resets = <&zynqmp_reset ZYNQMP_RESET_DP>;
dma-names = "vid0", "vid1", "vid2", "gfx0";
dma-names = "vid0", "vid1", "vid2", "gfx0",
"aud0", "aud1";
dmas = <&zynqmp_dpdma ZYNQMP_DPDMA_VIDEO0>,
<&zynqmp_dpdma ZYNQMP_DPDMA_VIDEO1>,
<&zynqmp_dpdma ZYNQMP_DPDMA_VIDEO2>,
<&zynqmp_dpdma ZYNQMP_DPDMA_GRAPHICS>;
<&zynqmp_dpdma ZYNQMP_DPDMA_GRAPHICS>,
<&zynqmp_dpdma ZYNQMP_DPDMA_AUDIO0>,
<&zynqmp_dpdma ZYNQMP_DPDMA_AUDIO1>;
ports {
#address-cells = <1>;

View File

@ -5,6 +5,7 @@ amdxdna-y := \
aie2_error.o \
aie2_message.o \
aie2_pci.o \
aie2_pm.o \
aie2_psp.o \
aie2_smu.o \
aie2_solver.o \
@ -17,5 +18,6 @@ amdxdna-y := \
npu1_regs.o \
npu2_regs.o \
npu4_regs.o \
npu5_regs.o
npu5_regs.o \
npu6_regs.o
obj-$(CONFIG_DRM_ACCEL_AMDXDNA) = amdxdna.o

View File

@ -1,5 +1,3 @@
- Replace idr with xa
- Add import and export BO support
- Add debugfs support
- Add debug BO support
- Improve power management

View File

@ -11,6 +11,7 @@
#include <drm/drm_syncobj.h>
#include <linux/hmm.h>
#include <linux/types.h>
#include <linux/xarray.h>
#include <trace/events/amdxdna.h>
#include "aie2_msg_priv.h"
@ -90,11 +91,11 @@ void aie2_restart_ctx(struct amdxdna_client *client)
{
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_hwctx *hwctx;
int next = 0;
unsigned long hwctx_id;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
mutex_lock(&client->hwctx_lock);
idr_for_each_entry_continue(&client->hwctx_idr, hwctx, next) {
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
if (hwctx->status != HWCTX_STAT_STOP)
continue;
@ -179,7 +180,7 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
up(&job->hwctx->priv->job_sem);
job->job_done = true;
dma_fence_put(fence);
mmput(job->mm);
mmput_async(job->mm);
aie2_job_put(job);
}
@ -518,6 +519,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
struct drm_gpu_scheduler *sched;
struct amdxdna_hwctx_priv *priv;
struct amdxdna_gem_obj *heap;
struct amdxdna_dev_hdl *ndev;
int i, ret;
priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
@ -612,6 +614,8 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
}
hwctx->status = HWCTX_STAT_INIT;
ndev = xdna->dev_handle;
ndev->hwctx_num++;
XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
@ -641,10 +645,13 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
{
struct amdxdna_dev_hdl *ndev;
struct amdxdna_dev *xdna;
int idx;
xdna = hwctx->client->xdna;
ndev = xdna->dev_handle;
ndev->hwctx_num--;
drm_sched_wqueue_stop(&hwctx->priv->sched);
/* Now, scheduler will not send command to device. */
@ -683,6 +690,9 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
int ret;
XDNA_DBG(xdna, "Config %d CU to %s", config->num_cus, hwctx->name);
if (XDNA_MBZ_DBG(xdna, config->pad, sizeof(config->pad)))
return -EINVAL;
if (hwctx->status != HWCTX_STAT_INIT) {
XDNA_ERR(xdna, "Not support re-config CU");
return -EINVAL;

View File

@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/types.h>
#include <linux/xarray.h>
#include "aie2_msg_priv.h"
#include "aie2_pci.h"
@ -70,11 +71,18 @@ int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
{
DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
int ret;
req.type = type;
req.value = value;
return aie2_send_mgmt_msg_wait(ndev, &msg);
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
if (ret) {
XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
return ret;
}
return 0;
}
int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
@ -93,32 +101,6 @@ int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
return 0;
}
int aie2_check_protocol_version(struct amdxdna_dev_hdl *ndev)
{
DECLARE_AIE2_MSG(protocol_version, MSG_OP_GET_PROTOCOL_VERSION);
struct amdxdna_dev *xdna = ndev->xdna;
int ret;
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
if (ret) {
XDNA_ERR(xdna, "Failed to get protocol version, ret %d", ret);
return ret;
}
if (resp.major != ndev->priv->protocol_major) {
XDNA_ERR(xdna, "Incompatible firmware protocol version major %d minor %d",
resp.major, resp.minor);
return -EINVAL;
}
if (resp.minor < ndev->priv->protocol_minor) {
XDNA_ERR(xdna, "Firmware minor version smaller than supported");
return -EINVAL;
}
return 0;
}
int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
{
DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
@ -315,10 +297,10 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
struct amdxdna_dev *xdna = ndev->xdna;
struct amdxdna_client *client;
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
dma_addr_t dma_addr;
u32 aie_bitmap = 0;
u8 *buff_addr;
int next = 0;
int ret, idx;
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
@ -329,7 +311,7 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
/* Go through each hardware context and mark the AIE columns that are active */
list_for_each_entry(client, &xdna->client_list, node) {
idx = srcu_read_lock(&client->hwctx_srcu);
idr_for_each_entry_continue(&client->hwctx_idr, hwctx, next)
amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
srcu_read_unlock(&client->hwctx_srcu, idx);
}
@ -413,6 +395,9 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
for (i = 0; i < hwctx->cus->num_cus; i++) {
struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
return -EINVAL;
gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
if (!gobj) {
XDNA_ERR(xdna, "Lookup GEM object failed");

View File

@ -15,6 +15,7 @@
#include <linux/iommu.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include <linux/xarray.h>
#include "aie2_msg_priv.h"
#include "aie2_pci.h"
@ -33,17 +34,51 @@ MODULE_PARM_DESC(aie2_max_col, "Maximum column could be used");
* The related register and ring buffer information is on SRAM BAR.
* This struct is the register layout.
*/
#define MGMT_MBOX_MAGIC 0x55504e5f /* _NPU */
struct mgmt_mbox_chann_info {
u32 x2i_tail;
u32 x2i_head;
u32 x2i_buf;
u32 x2i_buf_sz;
u32 i2x_tail;
u32 i2x_head;
u32 i2x_buf;
u32 i2x_buf_sz;
__u32 x2i_tail;
__u32 x2i_head;
__u32 x2i_buf;
__u32 x2i_buf_sz;
__u32 i2x_tail;
__u32 i2x_head;
__u32 i2x_buf;
__u32 i2x_buf_sz;
__u32 magic;
__u32 msi_id;
__u32 prot_major;
__u32 prot_minor;
__u32 rsvd[4];
};
static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
{
struct amdxdna_dev *xdna = ndev->xdna;
/*
* The driver supported mailbox behavior is defined by
* ndev->priv->protocol_major and protocol_minor.
*
* When protocol_major and fw_major are different, it means driver
* and firmware are incompatible.
*/
if (ndev->priv->protocol_major != fw_major) {
XDNA_ERR(xdna, "Incompatible firmware protocol major %d minor %d",
fw_major, fw_minor);
return -EINVAL;
}
/*
* When protocol_minor is greater then fw_minor, that means driver
* relies on operation the installed firmware does not support.
*/
if (ndev->priv->protocol_minor > fw_minor) {
XDNA_ERR(xdna, "Firmware minor version smaller than supported");
return -EINVAL;
}
return 0;
}
static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
{
struct amdxdna_dev *xdna = ndev->xdna;
@ -57,6 +92,8 @@ static void aie2_dump_chann_info_debug(struct amdxdna_dev_hdl *ndev)
XDNA_DBG(xdna, "x2i ringbuf 0x%x", ndev->mgmt_x2i.rb_start_addr);
XDNA_DBG(xdna, "x2i rsize 0x%x", ndev->mgmt_x2i.rb_size);
XDNA_DBG(xdna, "x2i chann index 0x%x", ndev->mgmt_chan_idx);
XDNA_DBG(xdna, "mailbox protocol major 0x%x", ndev->mgmt_prot_major);
XDNA_DBG(xdna, "mailbox protocol minor 0x%x", ndev->mgmt_prot_minor);
}
static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
@ -87,6 +124,12 @@ static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
for (i = 0; i < sizeof(info_regs) / sizeof(u32); i++)
reg[i] = readl(ndev->sram_base + off + i * sizeof(u32));
if (info_regs.magic != MGMT_MBOX_MAGIC) {
XDNA_ERR(ndev->xdna, "Invalid mbox magic 0x%x", info_regs.magic);
ret = -EINVAL;
goto done;
}
i2x = &ndev->mgmt_i2x;
x2i = &ndev->mgmt_x2i;
@ -99,38 +142,42 @@ static int aie2_get_mgmt_chann_info(struct amdxdna_dev_hdl *ndev)
x2i->mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, info_regs.x2i_tail);
x2i->rb_start_addr = AIE2_SRAM_OFF(ndev, info_regs.x2i_buf);
x2i->rb_size = info_regs.x2i_buf_sz;
ndev->mgmt_chan_idx = CHANN_INDEX(ndev, x2i->rb_start_addr);
ndev->mgmt_chan_idx = info_regs.msi_id;
ndev->mgmt_prot_major = info_regs.prot_major;
ndev->mgmt_prot_minor = info_regs.prot_minor;
ret = aie2_check_protocol(ndev, ndev->mgmt_prot_major, ndev->mgmt_prot_minor);
done:
aie2_dump_chann_info_debug(ndev);
/* Must clear address at FW_ALIVE_OFF */
writel(0, SRAM_GET_ADDR(ndev, FW_ALIVE_OFF));
return 0;
return ret;
}
static int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev)
int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
enum rt_config_category category, u32 *val)
{
const struct rt_config *cfg = &ndev->priv->rt_config;
u64 value;
const struct rt_config *cfg;
u32 value;
int ret;
ret = aie2_set_runtime_cfg(ndev, cfg->type, cfg->value);
if (ret) {
XDNA_ERR(ndev->xdna, "Set runtime type %d value %d failed",
cfg->type, cfg->value);
return ret;
}
for (cfg = ndev->priv->rt_config; cfg->type; cfg++) {
if (cfg->category != category)
continue;
ret = aie2_get_runtime_cfg(ndev, cfg->type, &value);
if (ret) {
XDNA_ERR(ndev->xdna, "Get runtime cfg failed");
return ret;
value = val ? *val : cfg->value;
ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
if (ret) {
XDNA_ERR(ndev->xdna, "Set type %d value %d failed",
cfg->type, value);
return ret;
}
}
if (value != cfg->value)
return -EINVAL;
return 0;
}
@ -157,13 +204,7 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
{
int ret;
ret = aie2_check_protocol_version(ndev);
if (ret) {
XDNA_ERR(ndev->xdna, "Check header hash failed");
return ret;
}
ret = aie2_runtime_cfg(ndev);
ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_INIT, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Runtime config failed");
return ret;
@ -257,9 +298,25 @@ static int aie2_xrs_unload(void *cb_arg)
return ret;
}
static int aie2_xrs_set_dft_dpm_level(struct drm_device *ddev, u32 dpm_level)
{
struct amdxdna_dev *xdna = to_xdna_dev(ddev);
struct amdxdna_dev_hdl *ndev;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
ndev = xdna->dev_handle;
ndev->dft_dpm_level = dpm_level;
if (ndev->pw_mode != POWER_MODE_DEFAULT || ndev->dpm_level == dpm_level)
return 0;
return ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
}
static struct xrs_action_ops aie2_xrs_actions = {
.load = aie2_xrs_load,
.unload = aie2_xrs_unload,
.set_dft_dpm_level = aie2_xrs_set_dft_dpm_level,
};
static void aie2_hw_stop(struct amdxdna_dev *xdna)
@ -267,12 +324,22 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
if (ndev->dev_status <= AIE2_DEV_INIT) {
XDNA_ERR(xdna, "device is already stopped");
return;
}
aie2_mgmt_fw_fini(ndev);
xdna_mailbox_stop_channel(ndev->mgmt_chann);
xdna_mailbox_destroy_channel(ndev->mgmt_chann);
ndev->mgmt_chann = NULL;
drmm_kfree(&xdna->ddev, ndev->mbox);
ndev->mbox = NULL;
aie2_psp_stop(ndev->psp_hdl);
aie2_smu_fini(ndev);
pci_disable_device(pdev);
ndev->dev_status = AIE2_DEV_INIT;
}
static int aie2_hw_start(struct amdxdna_dev *xdna)
@ -283,6 +350,11 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
u32 xdna_mailbox_intr_reg;
int mgmt_mb_irq, ret;
if (ndev->dev_status >= AIE2_DEV_START) {
XDNA_INFO(xdna, "device is already started");
return 0;
}
ret = pci_enable_device(pdev);
if (ret) {
XDNA_ERR(xdna, "failed to enable device, ret %d", ret);
@ -339,12 +411,20 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
goto stop_psp;
}
ret = aie2_pm_init(ndev);
if (ret) {
XDNA_ERR(xdna, "failed to init pm, ret %d", ret);
goto destroy_mgmt_chann;
}
ret = aie2_mgmt_fw_init(ndev);
if (ret) {
XDNA_ERR(xdna, "initial mgmt firmware failed, ret %d", ret);
goto destroy_mgmt_chann;
}
ndev->dev_status = AIE2_DEV_START;
return 0;
destroy_mgmt_chann:
@ -463,10 +543,9 @@ static int aie2_init(struct amdxdna_dev *xdna)
}
ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
xrs_cfg.clk_list.num_levels = 3;
xrs_cfg.clk_list.cu_clk_list[0] = 0;
xrs_cfg.clk_list.cu_clk_list[1] = 800;
xrs_cfg.clk_list.cu_clk_list[2] = 1000;
xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
xrs_cfg.clk_list.cu_clk_list[i] = ndev->priv->dpm_clk_tbl[i].hclk;
xrs_cfg.sys_eff_factor = 1;
xrs_cfg.ddev = &xdna->ddev;
xrs_cfg.actions = &aie2_xrs_actions;
@ -623,6 +702,39 @@ static int aie2_get_aie_version(struct amdxdna_client *client,
return 0;
}
static int aie2_get_firmware_version(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
struct amdxdna_drm_query_firmware_version version;
struct amdxdna_dev *xdna = client->xdna;
version.major = xdna->fw_ver.major;
version.minor = xdna->fw_ver.minor;
version.patch = xdna->fw_ver.sub;
version.build = xdna->fw_ver.build;
if (copy_to_user(u64_to_user_ptr(args->buffer), &version, sizeof(version)))
return -EFAULT;
return 0;
}
static int aie2_get_power_mode(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
struct amdxdna_drm_get_power_mode mode = {};
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_dev_hdl *ndev;
ndev = xdna->dev_handle;
mode.power_mode = ndev->pw_mode;
if (copy_to_user(u64_to_user_ptr(args->buffer), &mode, sizeof(mode)))
return -EFAULT;
return 0;
}
static int aie2_get_clock_metadata(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
@ -636,11 +748,11 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
if (!clock)
return -ENOMEM;
memcpy(clock->mp_npu_clock.name, ndev->mp_npu_clock.name,
sizeof(clock->mp_npu_clock.name));
clock->mp_npu_clock.freq_mhz = ndev->mp_npu_clock.freq_mhz;
memcpy(clock->h_clock.name, ndev->h_clock.name, sizeof(clock->h_clock.name));
clock->h_clock.freq_mhz = ndev->h_clock.freq_mhz;
snprintf(clock->mp_npu_clock.name, sizeof(clock->mp_npu_clock.name),
"MP-NPU Clock");
clock->mp_npu_clock.freq_mhz = ndev->npuclk_freq;
snprintf(clock->h_clock.name, sizeof(clock->h_clock.name), "H Clock");
clock->h_clock.freq_mhz = ndev->hclk_freq;
if (copy_to_user(u64_to_user_ptr(args->buffer), clock, sizeof(*clock)))
ret = -EFAULT;
@ -657,11 +769,11 @@ static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_query_hwctx *tmp;
struct amdxdna_client *tmp_client;
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
bool overflow = false;
u32 req_bytes = 0;
u32 hw_i = 0;
int ret = 0;
int next;
int idx;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
@ -673,8 +785,7 @@ static int aie2_get_hwctx_status(struct amdxdna_client *client,
buf = u64_to_user_ptr(args->buffer);
list_for_each_entry(tmp_client, &xdna->client_list, node) {
idx = srcu_read_lock(&tmp_client->hwctx_srcu);
next = 0;
idr_for_each_entry_continue(&tmp_client->hwctx_idr, hwctx, next) {
amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
req_bytes += sizeof(*tmp);
if (args->buffer_size < req_bytes) {
/* Continue iterating to get the required size */
@ -736,6 +847,12 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
ret = aie2_get_hwctx_status(client, args);
break;
case DRM_AMDXDNA_QUERY_FIRMWARE_VERSION:
ret = aie2_get_firmware_version(client, args);
break;
case DRM_AMDXDNA_GET_POWER_MODE:
ret = aie2_get_power_mode(client, args);
break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
@ -746,12 +863,61 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
return ret;
}
static int aie2_set_power_mode(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
struct amdxdna_drm_set_power_mode power_state;
enum amdxdna_power_mode_type power_mode;
struct amdxdna_dev *xdna = client->xdna;
if (copy_from_user(&power_state, u64_to_user_ptr(args->buffer),
sizeof(power_state))) {
XDNA_ERR(xdna, "Failed to copy power mode request into kernel");
return -EFAULT;
}
if (XDNA_MBZ_DBG(xdna, power_state.pad, sizeof(power_state.pad)))
return -EINVAL;
power_mode = power_state.power_mode;
if (power_mode > POWER_MODE_TURBO) {
XDNA_ERR(xdna, "Invalid power mode %d", power_mode);
return -EINVAL;
}
return aie2_pm_set_mode(xdna->dev_handle, power_mode);
}
static int aie2_set_state(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
struct amdxdna_dev *xdna = client->xdna;
int ret, idx;
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
switch (args->param) {
case DRM_AMDXDNA_SET_POWER_MODE:
ret = aie2_set_power_mode(client, args);
break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
break;
}
drm_dev_exit(idx);
return ret;
}
const struct amdxdna_dev_ops aie2_ops = {
.init = aie2_init,
.fini = aie2_fini,
.resume = aie2_hw_start,
.suspend = aie2_hw_stop,
.get_aie_info = aie2_get_info,
.set_aie_state = aie2_set_state,
.hwctx_init = aie2_hwctx_init,
.hwctx_fini = aie2_hwctx_fini,
.hwctx_config = aie2_hwctx_config,

View File

@ -6,6 +6,7 @@
#ifndef _AIE2_PCI_H_
#define _AIE2_PCI_H_
#include <drm/amdxdna_accel.h>
#include <linux/semaphore.h>
#include "amdxdna_mailbox.h"
@ -38,9 +39,6 @@
})
#define CHAN_SLOT_SZ SZ_8K
#define CHANN_INDEX(ndev, rbuf_off) \
(((rbuf_off) - SRAM_REG_OFF((ndev), MBOX_CHANN_OFF)) / CHAN_SLOT_SZ)
#define MBOX_SIZE(ndev) \
({ \
typeof(ndev) _ndev = (ndev); \
@ -48,9 +46,6 @@
pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
})
#define SMU_MPNPUCLK_FREQ_MAX(ndev) ((ndev)->priv->smu_mpnpuclk_freq_max)
#define SMU_HCLK_FREQ_MAX(ndev) ((ndev)->priv->smu_hclk_freq_max)
enum aie2_smu_reg_idx {
SMU_CMD_REG = 0,
SMU_ARG_REG,
@ -112,14 +107,20 @@ struct aie_metadata {
struct aie_tile_metadata shim;
};
struct clock_entry {
char name[16];
u32 freq_mhz;
enum rt_config_category {
AIE2_RT_CFG_INIT,
AIE2_RT_CFG_CLK_GATING,
};
struct rt_config {
u32 type;
u32 value;
u32 category;
};
struct dpm_clk_freq {
u32 npuclk;
u32 hclk;
};
/*
@ -149,6 +150,12 @@ struct amdxdna_hwctx_priv {
struct drm_syncobj *syncobj;
};
enum aie2_dev_status {
AIE2_DEV_UNINIT,
AIE2_DEV_INIT,
AIE2_DEV_START,
};
struct amdxdna_dev_hdl {
struct amdxdna_dev *xdna;
const struct amdxdna_dev_priv *priv;
@ -160,17 +167,29 @@ struct amdxdna_dev_hdl {
struct xdna_mailbox_chann_res mgmt_x2i;
struct xdna_mailbox_chann_res mgmt_i2x;
u32 mgmt_chan_idx;
u32 mgmt_prot_major;
u32 mgmt_prot_minor;
u32 total_col;
struct aie_version version;
struct aie_metadata metadata;
struct clock_entry mp_npu_clock;
struct clock_entry h_clock;
/* power management and clock*/
enum amdxdna_power_mode_type pw_mode;
u32 dpm_level;
u32 dft_dpm_level;
u32 max_dpm_level;
u32 clk_gating;
u32 npuclk_freq;
u32 hclk_freq;
/* Mailbox and the management channel */
struct mailbox *mbox;
struct mailbox_channel *mgmt_chann;
struct async_events *async_events;
enum aie2_dev_status dev_status;
u32 hwctx_num;
};
#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
@ -181,11 +200,17 @@ struct aie2_bar_off_pair {
u32 offset;
};
struct aie2_hw_ops {
int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
};
struct amdxdna_dev_priv {
const char *fw_path;
u64 protocol_major;
u64 protocol_minor;
struct rt_config rt_config;
const struct rt_config *rt_config;
const struct dpm_clk_freq *dpm_clk_tbl;
#define COL_ALIGN_NONE 0
#define COL_ALIGN_NATURE 1
u32 col_align;
@ -196,15 +221,29 @@ struct amdxdna_dev_priv {
struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
struct aie2_bar_off_pair smu_regs_off[SMU_MAX_REGS];
u32 smu_mpnpuclk_freq_max;
u32 smu_hclk_freq_max;
struct aie2_hw_ops hw_ops;
};
extern const struct amdxdna_dev_ops aie2_ops;
int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
enum rt_config_category category, u32 *val);
/* aie2 npu hw config */
extern const struct dpm_clk_freq npu1_dpm_clk_table[];
extern const struct dpm_clk_freq npu4_dpm_clk_table[];
extern const struct rt_config npu1_default_rt_cfg[];
extern const struct rt_config npu4_default_rt_cfg[];
/* aie2_smu.c */
int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
void aie2_smu_fini(struct amdxdna_dev_hdl *ndev);
int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
/* aie2_pm.c */
int aie2_pm_init(struct amdxdna_dev_hdl *ndev);
int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target);
/* aie2_psp.c */
struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *conf);
@ -222,7 +261,6 @@ int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value);
int aie2_check_protocol_version(struct amdxdna_dev_hdl *ndev);
int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid);
int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version);
int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata);

View File

@ -0,0 +1,108 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024, Advanced Micro Devices, Inc.
*/
#include <drm/amdxdna_accel.h>
#include <drm/drm_device.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include "aie2_pci.h"
#include "amdxdna_pci_drv.h"
#define AIE2_CLK_GATING_ENABLE 1
#define AIE2_CLK_GATING_DISABLE 0
static int aie2_pm_set_clk_gating(struct amdxdna_dev_hdl *ndev, u32 val)
{
int ret;
ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, &val);
if (ret)
return ret;
ndev->clk_gating = val;
return 0;
}
int aie2_pm_init(struct amdxdna_dev_hdl *ndev)
{
int ret;
if (ndev->dev_status != AIE2_DEV_UNINIT) {
/* Resume device */
ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->dpm_level);
if (ret)
return ret;
ret = aie2_pm_set_clk_gating(ndev, ndev->clk_gating);
if (ret)
return ret;
return 0;
}
while (ndev->priv->dpm_clk_tbl[ndev->max_dpm_level].hclk)
ndev->max_dpm_level++;
ndev->max_dpm_level--;
ret = ndev->priv->hw_ops.set_dpm(ndev, ndev->max_dpm_level);
if (ret)
return ret;
ret = aie2_pm_set_clk_gating(ndev, AIE2_CLK_GATING_ENABLE);
if (ret)
return ret;
ndev->pw_mode = POWER_MODE_DEFAULT;
ndev->dft_dpm_level = ndev->max_dpm_level;
return 0;
}
int aie2_pm_set_mode(struct amdxdna_dev_hdl *ndev, enum amdxdna_power_mode_type target)
{
struct amdxdna_dev *xdna = ndev->xdna;
u32 clk_gating, dpm_level;
int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
if (ndev->pw_mode == target)
return 0;
switch (target) {
case POWER_MODE_TURBO:
if (ndev->hwctx_num) {
XDNA_ERR(xdna, "Can not set turbo when there is active hwctx");
return -EINVAL;
}
clk_gating = AIE2_CLK_GATING_DISABLE;
dpm_level = ndev->max_dpm_level;
break;
case POWER_MODE_HIGH:
clk_gating = AIE2_CLK_GATING_ENABLE;
dpm_level = ndev->max_dpm_level;
break;
case POWER_MODE_DEFAULT:
clk_gating = AIE2_CLK_GATING_ENABLE;
dpm_level = ndev->dft_dpm_level;
break;
default:
return -EOPNOTSUPP;
}
ret = ndev->priv->hw_ops.set_dpm(ndev, dpm_level);
if (ret)
return ret;
ret = aie2_pm_set_clk_gating(ndev, clk_gating);
if (ret)
return ret;
ndev->pw_mode = target;
return 0;
}

View File

@ -19,8 +19,11 @@
#define AIE2_SMU_POWER_OFF 0x4
#define AIE2_SMU_SET_MPNPUCLK_FREQ 0x5
#define AIE2_SMU_SET_HCLK_FREQ 0x6
#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd, u32 reg_arg)
static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
u32 reg_arg, u32 *out)
{
u32 resp;
int ret;
@ -40,6 +43,9 @@ static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd, u32 reg_arg)
return ret;
}
if (out)
*out = readl(SMU_REG(ndev, SMU_OUT_REG));
if (resp != SMU_RESULT_OK) {
XDNA_ERR(ndev->xdna, "smu cmd %d failed, 0x%x", reg_cmd, resp);
return -EINVAL;
@ -48,64 +54,72 @@ static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd, u32 reg_arg)
return 0;
}
static int aie2_smu_set_mpnpu_clock_freq(struct amdxdna_dev_hdl *ndev, u32 freq_mhz)
int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
u32 freq;
int ret;
if (!freq_mhz || freq_mhz > SMU_MPNPUCLK_FREQ_MAX(ndev)) {
XDNA_ERR(ndev->xdna, "invalid mpnpu clock freq %d", freq_mhz);
return -EINVAL;
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
if (ret) {
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
}
ndev->npuclk_freq = freq;
ndev->mp_npu_clock.freq_mhz = freq_mhz;
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ, freq_mhz);
if (!ret)
XDNA_INFO_ONCE(ndev->xdna, "set mpnpu_clock = %d mhz", freq_mhz);
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ,
ndev->priv->dpm_clk_tbl[dpm_level].hclk, &freq);
if (ret) {
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
}
ndev->hclk_freq = freq;
ndev->dpm_level = dpm_level;
return ret;
XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
}
static int aie2_smu_set_hclock_freq(struct amdxdna_dev_hdl *ndev, u32 freq_mhz)
int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
int ret;
if (!freq_mhz || freq_mhz > SMU_HCLK_FREQ_MAX(ndev)) {
XDNA_ERR(ndev->xdna, "invalid hclock freq %d", freq_mhz);
return -EINVAL;
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
dpm_level, ret);
return ret;
}
ndev->h_clock.freq_mhz = freq_mhz;
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HCLK_FREQ, freq_mhz);
if (!ret)
XDNA_INFO_ONCE(ndev->xdna, "set npu_hclock = %d mhz", freq_mhz);
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
dpm_level, ret);
return ret;
}
return ret;
ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
ndev->dpm_level = dpm_level;
XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
}
int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
{
int ret;
ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0);
ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
return ret;
}
ret = aie2_smu_set_mpnpu_clock_freq(ndev, SMU_MPNPUCLK_FREQ_MAX(ndev));
if (ret) {
XDNA_ERR(ndev->xdna, "Set mpnpu clk freq failed, ret %d", ret);
return ret;
}
snprintf(ndev->mp_npu_clock.name, sizeof(ndev->mp_npu_clock.name), "MP-NPU Clock");
ret = aie2_smu_set_hclock_freq(ndev, SMU_HCLK_FREQ_MAX(ndev));
if (ret) {
XDNA_ERR(ndev->xdna, "Set hclk freq failed, ret %d", ret);
return ret;
}
snprintf(ndev->h_clock.name, sizeof(ndev->h_clock.name), "H Clock");
return 0;
}
@ -113,7 +127,8 @@ void aie2_smu_fini(struct amdxdna_dev_hdl *ndev)
{
int ret;
ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0);
ndev->priv->hw_ops.set_dpm(ndev, 0);
ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
if (ret)
XDNA_ERR(ndev->xdna, "Power off failed, ret %d", ret);
}

View File

@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include <linux/bitops.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
#include "aie2_solver.h"
@ -25,6 +26,7 @@ struct solver_node {
struct partition_node *pt_node;
void *cb_arg;
u32 dpm_level;
u32 cols_len;
u32 start_cols[] __counted_by(cols_len);
};
@ -95,6 +97,51 @@ static int sanity_check(struct solver_state *xrs, struct alloc_requests *req)
return 0;
}
static bool is_valid_qos_dpm_params(struct aie_qos *rqos)
{
/*
* gops is retrieved from the xmodel, so it's always set
* fps and latency are the configurable params from the application
*/
if (rqos->gops > 0 && (rqos->fps > 0 || rqos->latency > 0))
return true;
return false;
}
static int set_dpm_level(struct solver_state *xrs, struct alloc_requests *req, u32 *dpm_level)
{
struct solver_rgroup *rgp = &xrs->rgp;
struct cdo_parts *cdop = &req->cdo;
struct aie_qos *rqos = &req->rqos;
u32 freq, max_dpm_level, level;
struct solver_node *node;
max_dpm_level = xrs->cfg.clk_list.num_levels - 1;
/* If no QoS parameters are passed, set it to the max DPM level */
if (!is_valid_qos_dpm_params(rqos)) {
level = max_dpm_level;
goto set_dpm;
}
/* Find one CDO group that meet the GOPs requirement. */
for (level = 0; level < max_dpm_level; level++) {
freq = xrs->cfg.clk_list.cu_clk_list[level];
if (!qos_meet(xrs, rqos, cdop->qos_cap.opc * freq / 1000))
break;
}
/* set the dpm level which fits all the sessions */
list_for_each_entry(node, &rgp->node_list, list) {
if (node->dpm_level > level)
level = node->dpm_level;
}
set_dpm:
*dpm_level = level;
return xrs->cfg.actions->set_dft_dpm_level(xrs->cfg.ddev, level);
}
static struct solver_node *rg_search_node(struct solver_rgroup *rgp, u64 rid)
{
struct solver_node *node;
@ -159,12 +206,9 @@ static int get_free_partition(struct solver_state *xrs,
pt_node->ncols = ncols;
/*
* Before fully support latency in QoS, if a request
* specifies a non-zero latency value, it will not share
* the partition with other requests.
* Always set exclusive to false for now.
*/
if (req->rqos.latency)
pt_node->exclusive = true;
pt_node->exclusive = false;
list_add_tail(&pt_node->list, &xrs->rgp.pt_node_list);
xrs->rgp.npartition_node++;
@ -257,6 +301,7 @@ int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
struct xrs_action_load load_act;
struct solver_node *snode;
struct solver_state *xrs;
u32 dpm_level;
int ret;
xrs = (struct solver_state *)hdl;
@ -281,6 +326,11 @@ int xrs_allocate_resource(void *hdl, struct alloc_requests *req, void *cb_arg)
if (ret)
goto free_node;
ret = set_dpm_level(xrs, req, &dpm_level);
if (ret)
goto free_node;
snode->dpm_level = dpm_level;
snode->cb_arg = cb_arg;
drm_dbg(xrs->cfg.ddev, "start col %d ncols %d\n",

View File

@ -99,6 +99,7 @@ struct clk_list_info {
struct xrs_action_ops {
int (*load)(void *cb_arg, struct xrs_action_load *action);
int (*unload)(void *cb_arg);
int (*set_dft_dpm_level)(struct drm_device *ddev, u32 level);
};
/*

View File

@ -11,6 +11,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <linux/xarray.h>
#include <trace/events/amdxdna.h>
#include "amdxdna_ctx.h"
@ -63,11 +64,11 @@ void amdxdna_hwctx_suspend(struct amdxdna_client *client)
{
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_hwctx *hwctx;
int next = 0;
unsigned long hwctx_id;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
mutex_lock(&client->hwctx_lock);
idr_for_each_entry_continue(&client->hwctx_idr, hwctx, next)
amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
xdna->dev_info->ops->hwctx_suspend(hwctx);
mutex_unlock(&client->hwctx_lock);
}
@ -76,11 +77,11 @@ void amdxdna_hwctx_resume(struct amdxdna_client *client)
{
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_hwctx *hwctx;
int next = 0;
unsigned long hwctx_id;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
mutex_lock(&client->hwctx_lock);
idr_for_each_entry_continue(&client->hwctx_idr, hwctx, next)
amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
xdna->dev_info->ops->hwctx_resume(hwctx);
mutex_unlock(&client->hwctx_lock);
}
@ -149,13 +150,13 @@ int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
{
struct amdxdna_hwctx *hwctx;
int next = 0;
unsigned long hwctx_id;
mutex_lock(&client->hwctx_lock);
idr_for_each_entry_continue(&client->hwctx_idr, hwctx, next) {
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
XDNA_DBG(client->xdna, "PID %d close HW context %d",
client->pid, hwctx->id);
idr_remove(&client->hwctx_idr, hwctx->id);
xa_erase(&client->hwctx_xa, hwctx->id);
mutex_unlock(&client->hwctx_lock);
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
mutex_lock(&client->hwctx_lock);
@ -194,15 +195,13 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
hwctx->num_tiles = args->num_tiles;
hwctx->mem_size = args->mem_size;
hwctx->max_opc = args->max_opc;
mutex_lock(&client->hwctx_lock);
ret = idr_alloc_cyclic(&client->hwctx_idr, hwctx, 0, MAX_HWCTX_ID, GFP_KERNEL);
ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
&client->next_hwctxid, GFP_KERNEL);
if (ret < 0) {
mutex_unlock(&client->hwctx_lock);
XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
goto free_hwctx;
}
hwctx->id = ret;
mutex_unlock(&client->hwctx_lock);
hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
if (!hwctx->name) {
@ -228,9 +227,7 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
free_name:
kfree(hwctx->name);
rm_id:
mutex_lock(&client->hwctx_lock);
idr_remove(&client->hwctx_idr, hwctx->id);
mutex_unlock(&client->hwctx_lock);
xa_erase(&client->hwctx_xa, hwctx->id);
free_hwctx:
kfree(hwctx);
exit:
@ -246,27 +243,24 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
struct amdxdna_hwctx *hwctx;
int ret = 0, idx;
if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
return -EINVAL;
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
/*
* Use hwctx_lock to achieve exclusion with other hwctx writers,
* SRCU to synchronize with exec/wait command ioctls.
*
* The pushed jobs are handled by DRM scheduler during destroy.
*/
mutex_lock(&client->hwctx_lock);
hwctx = idr_find(&client->hwctx_idr, args->handle);
hwctx = xa_erase(&client->hwctx_xa, args->handle);
if (!hwctx) {
mutex_unlock(&client->hwctx_lock);
ret = -EINVAL;
XDNA_DBG(xdna, "PID %d HW context %d not exist",
client->pid, args->handle);
goto out;
}
idr_remove(&client->hwctx_idr, hwctx->id);
mutex_unlock(&client->hwctx_lock);
/*
* The pushed jobs are handled by DRM scheduler during destroy.
* SRCU to synchronize with exec command ioctls.
*/
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
@ -286,6 +280,9 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
void *buf;
u64 val;
if (XDNA_MBZ_DBG(xdna, &args->pad, sizeof(args->pad)))
return -EINVAL;
if (!xdna->dev_info->ops->hwctx_config)
return -EOPNOTSUPP;
@ -324,7 +321,7 @@ int amdxdna_drm_config_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
mutex_lock(&xdna->dev_lock);
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = idr_find(&client->hwctx_idr, args->handle);
hwctx = xa_load(&client->hwctx_xa, args->handle);
if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d", client->pid, args->handle);
ret = -EINVAL;
@ -436,7 +433,7 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
}
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = idr_find(&client->hwctx_idr, hwctx_hdl);
hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
if (!hwctx) {
XDNA_DBG(xdna, "PID %d failed to get hwctx %d",
client->pid, hwctx_hdl);

View File

@ -552,7 +552,7 @@ int amdxdna_drm_get_bo_info_ioctl(struct drm_device *dev, void *data, struct drm
struct drm_gem_object *gobj;
int ret = 0;
if (args->ext || args->ext_flags)
if (args->ext || args->ext_flags || args->pad)
return -EINVAL;
gobj = drm_gem_object_lookup(filp, args->handle);

View File

@ -6,7 +6,9 @@
#include <drm/drm_device.h>
#include <drm/drm_managed.h>
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/xarray.h>
#define CREATE_TRACE_POINTS
#include <trace/events/amdxdna.h>
@ -54,8 +56,8 @@ struct mailbox_channel {
struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
int msix_irq;
u32 iohub_int_addr;
struct idr chan_idr;
spinlock_t chan_idr_lock; /* protect chan_idr */
struct xarray chan_xa;
u32 next_msgid;
u32 x2i_tail;
/* Received msg related fields */
@ -164,19 +166,17 @@ static inline int mailbox_validate_msgid(int msg_id)
static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
{
unsigned long flags;
int msg_id;
u32 msg_id;
int ret;
spin_lock_irqsave(&mb_chann->chan_idr_lock, flags);
msg_id = idr_alloc_cyclic(&mb_chann->chan_idr, mb_msg, 0,
MAX_MSG_ID_ENTRIES, GFP_NOWAIT);
spin_unlock_irqrestore(&mb_chann->chan_idr_lock, flags);
if (msg_id < 0)
return msg_id;
ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
&mb_chann->next_msgid, GFP_NOWAIT);
if (ret < 0)
return ret;
/*
* The IDR becomes less efficient when dealing with larger IDs.
* Thus, add MAGIC_VAL to the higher bits.
* Add MAGIC_VAL to the higher bits.
*/
msg_id |= MAGIC_VAL;
return msg_id;
@ -184,25 +184,17 @@ static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbo
static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
{
unsigned long flags;
msg_id &= ~MAGIC_VAL_MASK;
spin_lock_irqsave(&mb_chann->chan_idr_lock, flags);
idr_remove(&mb_chann->chan_idr, msg_id);
spin_unlock_irqrestore(&mb_chann->chan_idr_lock, flags);
xa_erase_irq(&mb_chann->chan_xa, msg_id);
}
static int mailbox_release_msg(int id, void *p, void *data)
static void mailbox_release_msg(struct mailbox_channel *mb_chann,
struct mailbox_msg *mb_msg)
{
struct mailbox_channel *mb_chann = data;
struct mailbox_msg *mb_msg = p;
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
mb_msg->notify_cb(mb_msg->handle, NULL, 0);
kfree(mb_msg);
return 0;
}
static int
@ -254,7 +246,6 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
void *data)
{
struct mailbox_msg *mb_msg;
unsigned long flags;
int msg_id;
int ret;
@ -265,15 +256,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
}
msg_id &= ~MAGIC_VAL_MASK;
spin_lock_irqsave(&mb_chann->chan_idr_lock, flags);
mb_msg = idr_find(&mb_chann->chan_idr, msg_id);
mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
if (!mb_msg) {
MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
spin_unlock_irqrestore(&mb_chann->chan_idr_lock, flags);
return -EINVAL;
}
idr_remove(&mb_chann->chan_idr, msg_id);
spin_unlock_irqrestore(&mb_chann->chan_idr_lock, flags);
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
header->opcode, header->total_size, header->id);
@ -497,8 +484,7 @@ xdna_mailbox_create_channel(struct mailbox *mb,
memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
spin_lock_init(&mb_chann->chan_idr_lock);
idr_init(&mb_chann->chan_idr);
xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
@ -530,16 +516,18 @@ xdna_mailbox_create_channel(struct mailbox *mb,
int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
{
if (!mb_chann)
return 0;
struct mailbox_msg *mb_msg;
unsigned long msg_id;
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
free_irq(mb_chann->msix_irq, mb_chann);
destroy_workqueue(mb_chann->work_q);
/* We can clean up and release resources */
idr_for_each(&mb_chann->chan_idr, mailbox_release_msg, mb_chann);
idr_destroy(&mb_chann->chan_idr);
xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
mailbox_release_msg(mb_chann, mb_msg);
xa_destroy(&mb_chann->chan_xa);
MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
kfree(mb_chann);
@ -548,9 +536,6 @@ int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
{
if (!mb_chann)
return;
/* Disable an irq and wait. This might sleep. */
disable_irq(mb_chann->msix_irq);

View File

@ -39,6 +39,7 @@ static const struct amdxdna_device_id amdxdna_ids[] = {
{ 0x17f0, 0x0, &dev_npu2_info },
{ 0x17f0, 0x10, &dev_npu4_info },
{ 0x17f0, 0x11, &dev_npu5_info },
{ 0x17f0, 0x20, &dev_npu6_info },
{0}
};
@ -77,7 +78,7 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
}
mutex_init(&client->hwctx_lock);
init_srcu_struct(&client->hwctx_srcu);
idr_init_base(&client->hwctx_idr, AMDXDNA_INVALID_CTX_HANDLE + 1);
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
mutex_init(&client->mm_lock);
mutex_lock(&xdna->dev_lock);
@ -108,7 +109,7 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
XDNA_DBG(xdna, "closing pid %d", client->pid);
idr_destroy(&client->hwctx_idr);
xa_destroy(&client->hwctx_xa);
cleanup_srcu_struct(&client->hwctx_srcu);
mutex_destroy(&client->hwctx_lock);
mutex_destroy(&client->mm_lock);
@ -160,6 +161,24 @@ static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct
return ret;
}
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct amdxdna_drm_set_state *args = data;
int ret;
if (!xdna->dev_info->ops->set_aie_state)
return -EOPNOTSUPP;
XDNA_DBG(xdna, "Request parameter %u", args->param);
mutex_lock(&xdna->dev_lock);
ret = xdna->dev_info->ops->set_aie_state(client, args);
mutex_unlock(&xdna->dev_lock);
return ret;
}
static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
/* Context */
DRM_IOCTL_DEF_DRV(AMDXDNA_CREATE_HWCTX, amdxdna_drm_create_hwctx_ioctl, 0),
@ -173,6 +192,7 @@ static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
static const struct file_operations amdxdna_fops = {
@ -390,8 +410,8 @@ static int amdxdna_rpmops_resume(struct device *dev)
}
static const struct dev_pm_ops amdxdna_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
SET_RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
};
static struct pci_driver amdxdna_pci_driver = {

View File

@ -6,12 +6,29 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
#include <linux/xarray.h>
#define XDNA_INFO(xdna, fmt, args...) drm_info(&(xdna)->ddev, fmt, ##args)
#define XDNA_WARN(xdna, fmt, args...) drm_warn(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
#define XDNA_ERR(xdna, fmt, args...) drm_err(&(xdna)->ddev, "%s: "fmt, __func__, ##args)
#define XDNA_DBG(xdna, fmt, args...) drm_dbg(&(xdna)->ddev, fmt, ##args)
#define XDNA_INFO_ONCE(xdna, fmt, args...) drm_info_once(&(xdna)->ddev, fmt, ##args)
#define XDNA_MBZ_DBG(xdna, ptr, sz) \
({ \
int __i; \
int __ret = 0; \
u8 *__ptr = (u8 *)(ptr); \
for (__i = 0; __i < (sz); __i++) { \
if (__ptr[__i]) { \
XDNA_DBG(xdna, "MBZ check failed"); \
__ret = -EINVAL; \
break; \
} \
} \
__ret; \
})
#define to_xdna_dev(drm_dev) \
((struct amdxdna_dev *)container_of(drm_dev, struct amdxdna_dev, ddev))
@ -20,6 +37,7 @@ extern const struct drm_driver amdxdna_drm_drv;
struct amdxdna_client;
struct amdxdna_dev;
struct amdxdna_drm_get_info;
struct amdxdna_drm_set_state;
struct amdxdna_gem_obj;
struct amdxdna_hwctx;
struct amdxdna_sched_job;
@ -40,6 +58,7 @@ struct amdxdna_dev_ops {
void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
};
/*
@ -100,7 +119,8 @@ struct amdxdna_client {
struct mutex hwctx_lock; /* protect hwctx */
/* do NOT wait this srcu when hwctx_lock is held */
struct srcu_struct hwctx_srcu;
struct idr hwctx_idr;
struct xarray hwctx_xa;
u32 next_hwctxid;
struct amdxdna_dev *xdna;
struct drm_file *filp;
@ -111,11 +131,15 @@ struct amdxdna_client {
int pasid;
};
#define amdxdna_for_each_hwctx(client, hwctx_id, entry) \
xa_for_each(&(client)->hwctx_xa, hwctx_id, entry)
/* Add device info below */
extern const struct amdxdna_dev_info dev_npu1_info;
extern const struct amdxdna_dev_info dev_npu2_info;
extern const struct amdxdna_dev_info dev_npu4_info;
extern const struct amdxdna_dev_info dev_npu5_info;
extern const struct amdxdna_dev_info dev_npu6_info;
int amdxdna_sysfs_init(struct amdxdna_dev *xdna);
void amdxdna_sysfs_fini(struct amdxdna_dev *xdna);

View File

@ -44,18 +44,30 @@
#define NPU1_SMU_BAR_BASE MPNPU_APERTURE0_BASE
#define NPU1_SRAM_BAR_BASE MPNPU_APERTURE1_BASE
#define NPU1_RT_CFG_TYPE_PDI_LOAD 2
#define NPU1_RT_CFG_VAL_PDI_LOAD_MGMT 0
#define NPU1_RT_CFG_VAL_PDI_LOAD_APP 1
const struct rt_config npu1_default_rt_cfg[] = {
{ 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 0 },
};
#define NPU1_MPNPUCLK_FREQ_MAX 600
#define NPU1_HCLK_FREQ_MAX 1024
const struct dpm_clk_freq npu1_dpm_clk_table[] = {
{400, 800},
{600, 1024},
{600, 1024},
{600, 1024},
{600, 1024},
{720, 1309},
{720, 1309},
{847, 1600},
{ 0 }
};
const struct amdxdna_dev_priv npu1_dev_priv = {
.fw_path = "amdnpu/1502_00/npu.sbin",
.protocol_major = 0x5,
.protocol_minor = 0x1,
.rt_config = {NPU1_RT_CFG_TYPE_PDI_LOAD, NPU1_RT_CFG_VAL_PDI_LOAD_APP},
.protocol_minor = 0x7,
.rt_config = npu1_default_rt_cfg,
.dpm_clk_tbl = npu1_dpm_clk_table,
.col_align = COL_ALIGN_NONE,
.mbox_dev_addr = NPU1_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
@ -80,8 +92,9 @@ const struct amdxdna_dev_priv npu1_dev_priv = {
DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU1_SMU, MPNPU_PUB_SCRATCH6),
DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU1_SMU, MPNPU_PUB_SCRATCH7),
},
.smu_mpnpuclk_freq_max = NPU1_MPNPUCLK_FREQ_MAX,
.smu_hclk_freq_max = NPU1_HCLK_FREQ_MAX,
.hw_ops = {
.set_dpm = npu1_set_dpm,
},
};
const struct amdxdna_dev_info dev_npu1_info = {

View File

@ -61,18 +61,12 @@
#define NPU2_SMU_BAR_BASE MMNPU_APERTURE4_BASE
#define NPU2_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
#define NPU2_RT_CFG_TYPE_PDI_LOAD 5
#define NPU2_RT_CFG_VAL_PDI_LOAD_MGMT 0
#define NPU2_RT_CFG_VAL_PDI_LOAD_APP 1
#define NPU2_MPNPUCLK_FREQ_MAX 1267
#define NPU2_HCLK_FREQ_MAX 1800
const struct amdxdna_dev_priv npu2_dev_priv = {
.fw_path = "amdnpu/17f0_00/npu.sbin",
.protocol_major = 0x6,
.protocol_minor = 0x1,
.rt_config = {NPU2_RT_CFG_TYPE_PDI_LOAD, NPU2_RT_CFG_VAL_PDI_LOAD_APP},
.protocol_minor = 0x6,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU2_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
@ -97,8 +91,9 @@ const struct amdxdna_dev_priv npu2_dev_priv = {
DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU2_SMU, MP1_C2PMSG_61),
DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU2_SMU, MP1_C2PMSG_60),
},
.smu_mpnpuclk_freq_max = NPU2_MPNPUCLK_FREQ_MAX,
.smu_hclk_freq_max = NPU2_HCLK_FREQ_MAX,
.hw_ops = {
.set_dpm = npu4_set_dpm,
},
};
const struct amdxdna_dev_info dev_npu2_info = {

View File

@ -61,18 +61,33 @@
#define NPU4_SMU_BAR_BASE MMNPU_APERTURE4_BASE
#define NPU4_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
#define NPU4_RT_CFG_TYPE_PDI_LOAD 5
#define NPU4_RT_CFG_VAL_PDI_LOAD_MGMT 0
#define NPU4_RT_CFG_VAL_PDI_LOAD_APP 1
const struct rt_config npu4_default_rt_cfg[] = {
{ 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 0 },
};
#define NPU4_MPNPUCLK_FREQ_MAX 1267
#define NPU4_HCLK_FREQ_MAX 1800
const struct dpm_clk_freq npu4_dpm_clk_table[] = {
{396, 792},
{600, 1056},
{792, 1152},
{975, 1267},
{975, 1267},
{1056, 1408},
{1152, 1584},
{1267, 1800},
{ 0 }
};
const struct amdxdna_dev_priv npu4_dev_priv = {
.fw_path = "amdnpu/17f0_10/npu.sbin",
.protocol_major = 0x6,
.protocol_minor = 0x1,
.rt_config = {NPU4_RT_CFG_TYPE_PDI_LOAD, NPU4_RT_CFG_VAL_PDI_LOAD_APP},
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU4_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
@ -97,8 +112,9 @@ const struct amdxdna_dev_priv npu4_dev_priv = {
DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU4_SMU, MP1_C2PMSG_61),
DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU4_SMU, MP1_C2PMSG_60),
},
.smu_mpnpuclk_freq_max = NPU4_MPNPUCLK_FREQ_MAX,
.smu_hclk_freq_max = NPU4_HCLK_FREQ_MAX,
.hw_ops = {
.set_dpm = npu4_set_dpm,
},
};
const struct amdxdna_dev_info dev_npu4_info = {

View File

@ -61,18 +61,12 @@
#define NPU5_SMU_BAR_BASE MMNPU_APERTURE4_BASE
#define NPU5_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
#define NPU5_RT_CFG_TYPE_PDI_LOAD 5
#define NPU5_RT_CFG_VAL_PDI_LOAD_MGMT 0
#define NPU5_RT_CFG_VAL_PDI_LOAD_APP 1
#define NPU5_MPNPUCLK_FREQ_MAX 1267
#define NPU5_HCLK_FREQ_MAX 1800
const struct amdxdna_dev_priv npu5_dev_priv = {
.fw_path = "amdnpu/17f0_11/npu.sbin",
.protocol_major = 0x6,
.protocol_minor = 0x1,
.rt_config = {NPU5_RT_CFG_TYPE_PDI_LOAD, NPU5_RT_CFG_VAL_PDI_LOAD_APP},
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU5_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
@ -97,8 +91,9 @@ const struct amdxdna_dev_priv npu5_dev_priv = {
DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU5_SMU, MP1_C2PMSG_61),
DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU5_SMU, MP1_C2PMSG_60),
},
.smu_mpnpuclk_freq_max = NPU5_MPNPUCLK_FREQ_MAX,
.smu_hclk_freq_max = NPU5_HCLK_FREQ_MAX,
.hw_ops = {
.set_dpm = npu4_set_dpm,
},
};
const struct amdxdna_dev_info dev_npu5_info = {

View File

@ -0,0 +1,114 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2024, Advanced Micro Devices, Inc.
*/
#include <drm/amdxdna_accel.h>
#include <drm/drm_device.h>
#include <drm/gpu_scheduler.h>
#include <linux/sizes.h>
#include "aie2_pci.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
/* NPU Public Registers on MpNPUAxiXbar (refer to Diag npu_registers.h) */
#define MPNPU_PUB_SEC_INTR 0x3010060
#define MPNPU_PUB_PWRMGMT_INTR 0x3010064
#define MPNPU_PUB_SCRATCH0 0x301006C
#define MPNPU_PUB_SCRATCH1 0x3010070
#define MPNPU_PUB_SCRATCH2 0x3010074
#define MPNPU_PUB_SCRATCH3 0x3010078
#define MPNPU_PUB_SCRATCH4 0x301007C
#define MPNPU_PUB_SCRATCH5 0x3010080
#define MPNPU_PUB_SCRATCH6 0x3010084
#define MPNPU_PUB_SCRATCH7 0x3010088
#define MPNPU_PUB_SCRATCH8 0x301008C
#define MPNPU_PUB_SCRATCH9 0x3010090
#define MPNPU_PUB_SCRATCH10 0x3010094
#define MPNPU_PUB_SCRATCH11 0x3010098
#define MPNPU_PUB_SCRATCH12 0x301009C
#define MPNPU_PUB_SCRATCH13 0x30100A0
#define MPNPU_PUB_SCRATCH14 0x30100A4
#define MPNPU_PUB_SCRATCH15 0x30100A8
#define MP0_C2PMSG_73 0x3810A24
#define MP0_C2PMSG_123 0x3810AEC
#define MP1_C2PMSG_0 0x3B10900
#define MP1_C2PMSG_60 0x3B109F0
#define MP1_C2PMSG_61 0x3B109F4
#define MPNPU_SRAM_X2I_MAILBOX_0 0x3600000
#define MPNPU_SRAM_X2I_MAILBOX_15 0x361E000
#define MPNPU_SRAM_X2I_MAILBOX_31 0x363E000
#define MPNPU_SRAM_I2X_MAILBOX_31 0x363F000
#define MMNPU_APERTURE0_BASE 0x3000000
#define MMNPU_APERTURE1_BASE 0x3600000
#define MMNPU_APERTURE3_BASE 0x3810000
#define MMNPU_APERTURE4_BASE 0x3B10000
/* PCIe BAR Index for NPU6 */
#define NPU6_REG_BAR_INDEX 0
#define NPU6_MBOX_BAR_INDEX 0
#define NPU6_PSP_BAR_INDEX 4
#define NPU6_SMU_BAR_INDEX 5
#define NPU6_SRAM_BAR_INDEX 2
/* Associated BARs and Apertures */
#define NPU6_REG_BAR_BASE MMNPU_APERTURE0_BASE
#define NPU6_MBOX_BAR_BASE MMNPU_APERTURE0_BASE
#define NPU6_PSP_BAR_BASE MMNPU_APERTURE3_BASE
#define NPU6_SMU_BAR_BASE MMNPU_APERTURE4_BASE
#define NPU6_SRAM_BAR_BASE MMNPU_APERTURE1_BASE
const struct amdxdna_dev_priv npu6_dev_priv = {
.fw_path = "amdnpu/17f0_10/npu.sbin",
.protocol_major = 0x6,
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU6_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU6_SRAM_BAR_BASE,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
},
.psp_regs_off = {
DEFINE_BAR_OFFSET(PSP_CMD_REG, NPU6_PSP, MP0_C2PMSG_123),
DEFINE_BAR_OFFSET(PSP_ARG0_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
DEFINE_BAR_OFFSET(PSP_ARG1_REG, NPU6_REG, MPNPU_PUB_SCRATCH4),
DEFINE_BAR_OFFSET(PSP_ARG2_REG, NPU6_REG, MPNPU_PUB_SCRATCH9),
DEFINE_BAR_OFFSET(PSP_INTR_REG, NPU6_PSP, MP0_C2PMSG_73),
DEFINE_BAR_OFFSET(PSP_STATUS_REG, NPU6_PSP, MP0_C2PMSG_123),
DEFINE_BAR_OFFSET(PSP_RESP_REG, NPU6_REG, MPNPU_PUB_SCRATCH3),
},
.smu_regs_off = {
DEFINE_BAR_OFFSET(SMU_CMD_REG, NPU6_SMU, MP1_C2PMSG_0),
DEFINE_BAR_OFFSET(SMU_ARG_REG, NPU6_SMU, MP1_C2PMSG_60),
DEFINE_BAR_OFFSET(SMU_INTR_REG, NPU6_SMU, MMNPU_APERTURE4_BASE),
DEFINE_BAR_OFFSET(SMU_RESP_REG, NPU6_SMU, MP1_C2PMSG_61),
DEFINE_BAR_OFFSET(SMU_OUT_REG, NPU6_SMU, MP1_C2PMSG_60),
},
.hw_ops = {
.set_dpm = npu4_set_dpm,
},
};
const struct amdxdna_dev_info dev_npu6_info = {
.reg_bar = NPU6_REG_BAR_INDEX,
.mbox_bar = NPU6_MBOX_BAR_INDEX,
.sram_bar = NPU6_SRAM_BAR_INDEX,
.psp_bar = NPU6_PSP_BAR_INDEX,
.smu_bar = NPU6_SMU_BAR_INDEX,
.first_col = 0,
.dev_mem_buf_shift = 15, /* 32 KiB aligned */
.dev_mem_base = AIE2_DEVM_BASE,
.dev_mem_size = AIE2_DEVM_SIZE,
.vbnv = "RyzenAI-npu6",
.device_type = AMDXDNA_DEV_TYPE_KMQ,
.dev_priv = &npu6_dev_priv,
.ops = &aie2_ops,
};

View File

@ -103,10 +103,15 @@ config DRM_KMS_HELPER
help
CRTC helpers for KMS drivers.
config DRM_DRAW
bool
depends on DRM
config DRM_PANIC
bool "Display a user-friendly message when a kernel panic occurs"
depends on DRM
select FONT_SUPPORT
select DRM_DRAW
help
Enable a drm panic handler, which will display a user-friendly message
when a kernel panic occurs. It's useful when using a user-space

View File

@ -91,6 +91,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
drm-$(CONFIG_DRM_PANIC) += drm_panic.o
drm-$(CONFIG_DRM_DRAW) += drm_draw.o
drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
obj-$(CONFIG_DRM) += drm.o

View File

@ -1105,7 +1105,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
* We can't use gang submit on with reserved VMIDs when the VM changes
* can't be invalidated by more than one engine at the same time.
*/
if (p->gang_size > 1 && !p->adev->vm_manager.concurrent_flush) {
if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
struct drm_gpu_scheduler *sched = entity->rq->sched;
@ -1189,7 +1189,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (!bo)
continue;
amdgpu_vm_bo_invalidate(adev, bo, false);
amdgpu_vm_bo_invalidate(bo, false);
}
}

View File

@ -36,6 +36,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
#include <drm/amdgpu_drm.h>
#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
@ -60,6 +61,8 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
amdgpu_vm_bo_update_shared(bo);
return 0;
}
@ -345,7 +348,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
/* FIXME: This should be after the "if", but needs a fix to make sure
* DMABuf imports are initialized in the right VM list.
*/
amdgpu_vm_bo_invalidate(adev, bo, false);
amdgpu_vm_bo_invalidate(bo, false);
if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;

View File

@ -60,7 +60,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
struct amdgpu_fpriv *fpriv = file->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_mem_stats stats[__AMDGPU_PL_LAST + 1] = { };
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
ktime_t usage[AMDGPU_HW_IP_NUM];
const char *pl_name[] = {
[TTM_PL_VRAM] = "vram",
@ -72,15 +72,8 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_DOORBELL] = "doorbell",
};
unsigned int hw_ip, i;
int ret;
ret = amdgpu_bo_reserve(vm->root.bo, false);
if (ret)
return;
amdgpu_vm_get_memory(vm, stats, ARRAY_SIZE(stats));
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_vm_get_memory(vm, stats);
amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
/*
@ -114,9 +107,11 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
stats[TTM_PL_VRAM].evicted/1024UL);
drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
stats[TTM_PL_VRAM].requested/1024UL);
(stats[TTM_PL_VRAM].drm.shared +
stats[TTM_PL_VRAM].drm.private) / 1024UL);
drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
stats[TTM_PL_TT].requested/1024UL);
(stats[TTM_PL_TT].drm.shared +
stats[TTM_PL_TT].drm.private) / 1024UL);
for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
if (!usage[hw_ip])

View File

@ -42,6 +42,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_hmm.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_vm.h"
static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
@ -179,6 +180,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
if (r)
return r;
amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
@ -252,6 +254,7 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
goto out_unlock;
amdgpu_vm_bo_del(adev, bo_va);
amdgpu_vm_bo_update_shared(bo);
if (!amdgpu_vm_ready(vm))
goto out_unlock;
@ -839,7 +842,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
@ -899,7 +901,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(adev, robj, true);
amdgpu_vm_bo_invalidate(robj, true);
amdgpu_bo_unreserve(robj);
break;

View File

@ -1150,7 +1150,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
@ -1158,7 +1157,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = ttm_to_amdgpu_bo(bo);
amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_vm_bo_move(abo, new_mem, evict);
amdgpu_bo_kunmap(abo);
@ -1171,75 +1170,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
old_mem ? old_mem->mem_type : -1);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats,
unsigned int sz)
{
const unsigned int domain_to_pl[] = {
[ilog2(AMDGPU_GEM_DOMAIN_CPU)] = TTM_PL_SYSTEM,
[ilog2(AMDGPU_GEM_DOMAIN_GTT)] = TTM_PL_TT,
[ilog2(AMDGPU_GEM_DOMAIN_VRAM)] = TTM_PL_VRAM,
[ilog2(AMDGPU_GEM_DOMAIN_GDS)] = AMDGPU_PL_GDS,
[ilog2(AMDGPU_GEM_DOMAIN_GWS)] = AMDGPU_PL_GWS,
[ilog2(AMDGPU_GEM_DOMAIN_OA)] = AMDGPU_PL_OA,
[ilog2(AMDGPU_GEM_DOMAIN_DOORBELL)] = AMDGPU_PL_DOORBELL,
};
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *res = bo->tbo.resource;
struct drm_gem_object *obj = &bo->tbo.base;
uint64_t size = amdgpu_bo_size(bo);
unsigned int type;
if (!res) {
/*
* If no backing store use one of the preferred domain for basic
* stats. We take the MSB since that should give a reasonable
* view.
*/
BUILD_BUG_ON(TTM_PL_VRAM < TTM_PL_TT ||
TTM_PL_VRAM < TTM_PL_SYSTEM);
type = fls(bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK);
if (!type)
return;
type--;
if (drm_WARN_ON_ONCE(&adev->ddev,
type >= ARRAY_SIZE(domain_to_pl)))
return;
type = domain_to_pl[type];
} else {
type = res->mem_type;
}
if (drm_WARN_ON_ONCE(&adev->ddev, type >= sz))
return;
/* DRM stats common fields: */
if (drm_gem_object_is_shared_for_memory_stats(obj))
stats[type].drm.shared += size;
else
stats[type].drm.private += size;
if (res) {
stats[type].drm.resident += size;
if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_BOOKKEEP))
stats[type].drm.active += size;
else if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
stats[type].drm.purgeable += size;
}
/* amdgpu specific stats: */
if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) {
stats[TTM_PL_VRAM].requested += size;
if (type != TTM_PL_VRAM)
stats[TTM_PL_VRAM].evicted += size;
} else if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_GTT) {
stats[TTM_PL_TT].requested += size;
}
}
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@ -1454,6 +1384,45 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
return amdgpu_gmc_sign_extend(offset);
}
/**
* amdgpu_bo_mem_stats_placement - bo placement for memory accounting
* @bo: the buffer object we should look at
*
* BO can have multiple preferred placements, to avoid double counting we want
* to file it under a single placement for memory stats.
* Luckily, if we take the highest set bit in preferred_domains the result is
* quite sensible.
*
* Returns:
* Which of the placements should the BO be accounted under.
*/
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
{
uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
if (!domain)
return TTM_PL_SYSTEM;
switch (rounddown_pow_of_two(domain)) {
case AMDGPU_GEM_DOMAIN_CPU:
return TTM_PL_SYSTEM;
case AMDGPU_GEM_DOMAIN_GTT:
return TTM_PL_TT;
case AMDGPU_GEM_DOMAIN_VRAM:
return TTM_PL_VRAM;
case AMDGPU_GEM_DOMAIN_GDS:
return AMDGPU_PL_GDS;
case AMDGPU_GEM_DOMAIN_GWS:
return AMDGPU_PL_GWS;
case AMDGPU_GEM_DOMAIN_OA:
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
default:
return TTM_PL_SYSTEM;
}
}
/**
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object

View File

@ -300,9 +300,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
struct amdgpu_mem_stats *stats,
unsigned int size);
uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);

View File

@ -26,15 +26,15 @@
#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
#define __AMDGPU_PL_LAST (TTM_PL_PRIV + 4)
#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2

View File

@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/drm_exec.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
@ -310,6 +311,111 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
* Takes the vm status_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
uint64_t size = amdgpu_bo_size(bo);
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
if (shared) {
vm->stats[bo_memtype].drm.shared += size;
vm->stats[bo_memtype].drm.private -= size;
} else {
vm->stats[bo_memtype].drm.shared -= size;
vm->stats[bo_memtype].drm.private += size;
}
}
spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
* @bo: amdgpu buffer object
*
* Update the per VM stats for all the vm if needed from private to shared or
* vice versa.
*/
void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
{
struct amdgpu_vm_bo_base *base;
for (base = bo->vm_bo; base; base = base->next)
amdgpu_vm_update_shared(base);
}
/**
* amdgpu_vm_update_stats_locked - helper to update normal memory stat
* @base: base structure for tracking BO usage in a VM
* @res: the ttm_resource to use for the purpose of accounting, may or may not
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
* Caller need to have the vm status_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
int64_t size = sign * amdgpu_bo_size(bo);
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
/* For drm-total- and drm-shared-, BO are accounted by their preferred
* placement, see also amdgpu_bo_mem_stats_placement.
*/
if (base->shared)
vm->stats[bo_memtype].drm.shared += size;
else
vm->stats[bo_memtype].drm.private += size;
if (res && res->mem_type < __AMDGPU_PL_NUM) {
uint32_t res_memtype = res->mem_type;
vm->stats[res_memtype].drm.resident += size;
/* BO only count as purgeable if it is resident,
* since otherwise there's nothing to purge.
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
/**
* amdgpu_vm_update_stats - helper to update normal memory stat
* @base: base structure for tracking BO usage in a VM
* @res: the ttm_resource to use for the purpose of accounting, may or may not
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
* Updates the basic memory stat when bo is added/deleted/moved.
*/
void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
spin_lock(&vm->status_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
@ -333,6 +439,11 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
spin_lock(&vm->status_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
spin_unlock(&vm->status_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@ -1083,53 +1194,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va,
struct amdgpu_mem_stats *stats,
unsigned int size)
{
struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo *bo = bo_va->base.bo;
if (!bo)
return;
/*
* For now ignore BOs which are currently locked and potentially
* changing their location.
*/
if (!amdgpu_vm_is_bo_always_valid(vm, bo) &&
!dma_resv_trylock(bo->tbo.base.resv))
return;
amdgpu_bo_get_memory(bo, stats, size);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
dma_resv_unlock(bo->tbo.base.resv);
}
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats *stats,
unsigned int size)
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
struct amdgpu_bo_va *bo_va, *tmp;
spin_lock(&vm->status_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status)
amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status)
amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status)
amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status)
amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status)
amdgpu_vm_bo_get_memory(bo_va, stats, size);
list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status)
amdgpu_vm_bo_get_memory(bo_va, stats, size);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
spin_unlock(&vm->status_lock);
}
@ -2076,6 +2145,7 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
if (*base != &bo_va->base)
continue;
amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
*base = bo_va->base.next;
break;
}
@ -2144,14 +2214,12 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
* @bo: amdgpu buffer object
* @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted)
void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
@ -2176,6 +2244,32 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
/**
* amdgpu_vm_bo_move - handle BO move
*
* @bo: amdgpu buffer object
* @new_mem: the new placement of the BO move
* @evicted: is the BO evicted
*
* Update the memory stats for the new placement and mark @bo as invalid.
*/
void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
spin_lock(&vm->status_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
spin_unlock(&vm->status_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
}
/**
* amdgpu_vm_get_block_size - calculate VM page table size as power of two
*
@ -2595,6 +2689,16 @@ void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->is_compute_context = false;
}
static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
{
for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
vm->stats[i].evicted == 0))
return false;
}
return true;
}
/**
* amdgpu_vm_fini - tear down a vm instance
*
@ -2618,7 +2722,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
amdgpu_vm_put_task_info(vm->task_info);
amdgpu_vm_set_pasid(adev, vm, 0);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
@ -2667,6 +2770,16 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
if (!amdgpu_vm_stats_is_zero(vm)) {
struct amdgpu_task_info *ti = vm->task_info;
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
ti->process_name, ti->pid, ti->task_name, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
}
/**

View File

@ -35,6 +35,7 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_ids.h"
#include "amdgpu_ttm.h"
struct drm_exec;
@ -202,9 +203,13 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
/* protected by spinlock */
/* protected by vm status_lock */
struct list_head vm_status;
/* if the bo is counted as shared in mem stats
* protected by vm status_lock */
bool shared;
/* protected by the BO being reserved */
bool moved;
};
@ -324,10 +329,7 @@ struct amdgpu_vm_fault_info {
struct amdgpu_mem_stats {
struct drm_memory_stats drm;
/* buffers that requested this placement */
uint64_t requested;
/* buffers that requested this placement
* but are currently evicted */
/* buffers that requested this placement but are currently evicted */
uint64_t evicted;
};
@ -345,6 +347,9 @@ struct amdgpu_vm {
/* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
/* Memory statistics for this vm, protected by status_lock */
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
@ -524,8 +529,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
struct ttm_resource *new_res, int sign);
void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
bool evicted);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
@ -576,8 +585,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats *stats,
unsigned int size);
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);

View File

@ -537,6 +537,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
if (!entry->bo)
return;
amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);

View File

@ -1037,8 +1037,10 @@ static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
continue;
*enabled = true;
mutex_lock(&connector->eld_mutex);
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
mutex_unlock(&connector->eld_mutex);
break;
}

View File

@ -590,11 +590,12 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
if (drm_connector_init(
if (drm_connector_dynamic_init(
dev,
connector,
&dm_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort)) {
DRM_MODE_CONNECTOR_DisplayPort,
NULL)) {
kfree(aconnector);
return NULL;
}

View File

@ -793,7 +793,7 @@ static void anx6345_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx6345_id[] = {
{ "anx6345", 0 },
{ "anx6345" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, anx6345_id);

View File

@ -2002,8 +2002,10 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
memset(buf, 0, len);
} else {
dev_dbg(dev, "audio copy eld\n");
mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
mutex_unlock(&ctx->connector->eld_mutex);
}
return 0;
@ -2795,7 +2797,7 @@ static void anx7625_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id anx7625_id[] = {
{"anx7625", 0},
{ "anx7625" },
{}
};

View File

@ -597,7 +597,7 @@ static const struct of_device_id ch7033_dt_ids[] = {
MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
static const struct i2c_device_id ch7033_ids[] = {
{ "ch7033", 0 },
{ "ch7033" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7033_ids);

View File

@ -48,6 +48,7 @@
#define REG_COL_DEP GENMASK(1, 0)
#define BIT8 FIELD_PREP(REG_COL_DEP, 1)
#define OUT_MAP BIT(4)
#define VESA BIT(4)
#define JEIDA 0
#define REG_DESSC_ENB BIT(6)
#define DMODE BIT(7)
@ -428,12 +429,30 @@ static inline void it6263_lvds_reset(struct it6263 *it)
fsleep(10000);
}
static inline bool it6263_is_input_bus_fmt_valid(int input_fmt)
{
switch (input_fmt) {
case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
return true;
}
return false;
}
static inline void it6263_lvds_set_interface(struct it6263 *it)
{
u8 fmt;
/* color depth */
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, REG_COL_DEP, BIT8);
if (it->lvds_data_mapping == MEDIA_BUS_FMT_RGB888_1X7X4_SPWG)
fmt = VESA;
else
fmt = JEIDA;
/* output mapping */
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, JEIDA);
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, OUT_MAP, fmt);
if (it->lvds_dual_link) {
regmap_write_bits(it->lvds_regmap, LVDS_REG_2C, DMODE, DISO);
@ -714,14 +733,14 @@ it6263_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
*num_input_fmts = 0;
if (it->lvds_data_mapping != MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA)
if (!it6263_is_input_bus_fmt_valid(it->lvds_data_mapping))
return NULL;
input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
if (!input_fmts)
return NULL;
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
input_fmts[0] = it->lvds_data_mapping;
*num_input_fmts = 1;
return input_fmts;
@ -878,7 +897,7 @@ static const struct of_device_id it6263_of_match[] = {
MODULE_DEVICE_TABLE(of, it6263_of_match);
static const struct i2c_device_id it6263_i2c_ids[] = {
{ "it6263", 0 },
{ "it6263" },
{ }
};
MODULE_DEVICE_TABLE(i2c, it6263_i2c_ids);

View File

@ -3497,7 +3497,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id it6505_id[] = {
{ "it6505", 0 },
{ "it6505" },
{ }
};

View File

@ -1450,8 +1450,10 @@ static int it66121_audio_get_eld(struct device *dev, void *data,
dev_dbg(dev, "No connector present, passing empty EDID data");
memset(buf, 0, len);
} else {
mutex_lock(&ctx->connector->eld_mutex);
memcpy(buf, ctx->connector->eld,
min(sizeof(ctx->connector->eld), len));
mutex_unlock(&ctx->connector->eld_mutex);
}
mutex_unlock(&ctx->lock);

View File

@ -815,8 +815,8 @@ static const struct of_device_id lt8912_dt_match[] = {
MODULE_DEVICE_TABLE(of, lt8912_dt_match);
static const struct i2c_device_id lt8912_id[] = {
{"lt8912", 0},
{},
{ "lt8912" },
{}
};
MODULE_DEVICE_TABLE(i2c, lt8912_id);

View File

@ -757,7 +757,6 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned long long rate;
if (mode->hdisplay > 3840)
return MODE_BAD_HVALUE;
@ -765,8 +764,7 @@ static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
return MODE_PANEL;
rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
return bridge->funcs->hdmi_tmds_char_rate_valid(bridge, mode, rate);
return MODE_OK;
}
static int lt9611_bridge_atomic_check(struct drm_bridge *bridge,
@ -1236,7 +1234,7 @@ static void lt9611_remove(struct i2c_client *client)
}
static const struct i2c_device_id lt9611_id[] = {
{ "lontium,lt9611", 0 },
{ "lontium,lt9611" },
{}
};
MODULE_DEVICE_TABLE(i2c, lt9611_id);

View File

@ -914,7 +914,7 @@ static void lt9611uxc_remove(struct i2c_client *client)
}
static const struct i2c_device_id lt9611uxc_id[] = {
{ "lontium,lt9611uxc", 0 },
{ "lontium,lt9611uxc" },
{ /* sentinel */ }
};

View File

@ -318,8 +318,8 @@ static void stdp4028_ge_b850v3_fw_remove(struct i2c_client *stdp4028_i2c)
}
static const struct i2c_device_id stdp4028_ge_b850v3_fw_i2c_table[] = {
{"stdp4028_ge_fw", 0},
{},
{ "stdp4028_ge_fw" },
{}
};
MODULE_DEVICE_TABLE(i2c, stdp4028_ge_b850v3_fw_i2c_table);
@ -365,8 +365,8 @@ static void stdp2690_ge_b850v3_fw_remove(struct i2c_client *stdp2690_i2c)
}
static const struct i2c_device_id stdp2690_ge_b850v3_fw_i2c_table[] = {
{"stdp2690_ge_fw", 0},
{},
{ "stdp2690_ge_fw" },
{}
};
MODULE_DEVICE_TABLE(i2c, stdp2690_ge_b850v3_fw_i2c_table);

View File

@ -319,8 +319,8 @@ static void ptn3460_remove(struct i2c_client *client)
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
{"ptn3460", 0},
{},
{ "ptn3460" },
{}
};
MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);

View File

@ -1239,8 +1239,8 @@ static const struct of_device_id sii902x_dt_ids[] = {
MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
static const struct i2c_device_id sii902x_i2c_ids[] = {
{ "sii9022", 0 },
{ },
{ "sii9022" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);

View File

@ -945,8 +945,8 @@ static const struct of_device_id sii9234_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii9234_dt_match);
static const struct i2c_device_id sii9234_id[] = {
{ "SII9234", 0 },
{ },
{ "SII9234" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sii9234_id);

View File

@ -2368,8 +2368,8 @@ static const struct of_device_id sii8620_dt_match[] = {
MODULE_DEVICE_TABLE(of, sii8620_dt_match);
static const struct i2c_device_id sii8620_id[] = {
{ "sii8620", 0 },
{ },
{ "sii8620" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sii8620_id);

View File

@ -59,3 +59,9 @@ config DRM_DW_MIPI_DSI
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE
config DRM_DW_MIPI_DSI2
tristate
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL_BRIDGE

View File

@ -8,3 +8,4 @@ obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
obj-$(CONFIG_DRM_DW_HDMI_QP) += dw-hdmi-qp.o
obj-$(CONFIG_DRM_DW_MIPI_DSI) += dw-mipi-dsi.o
obj-$(CONFIG_DRM_DW_MIPI_DSI2) += dw-mipi-dsi2.o

View File

@ -442,16 +442,14 @@ dw_hdmi_qp_bridge_edid_read(struct drm_bridge *bridge,
}
static enum drm_mode_status
dw_hdmi_qp_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
dw_hdmi_qp_bridge_tmds_char_rate_valid(const struct drm_bridge *bridge,
const struct drm_display_mode *mode,
unsigned long long rate)
{
struct dw_hdmi_qp *hdmi = bridge->driver_private;
unsigned long long rate;
rate = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
if (rate > HDMI14_MAX_TMDSCLK) {
dev_dbg(hdmi->dev, "Unsupported mode clock: %d\n", mode->clock);
dev_dbg(hdmi->dev, "Unsupported TMDS char rate: %lld\n", rate);
return MODE_CLOCK_HIGH;
}
@ -510,7 +508,7 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_disable = dw_hdmi_qp_bridge_atomic_disable,
.detect = dw_hdmi_qp_bridge_detect,
.edid_read = dw_hdmi_qp_bridge_edid_read,
.mode_valid = dw_hdmi_qp_bridge_mode_valid,
.hdmi_tmds_char_rate_valid = dw_hdmi_qp_bridge_tmds_char_rate_valid,
.hdmi_clear_infoframe = dw_hdmi_qp_bridge_clear_infoframe,
.hdmi_write_infoframe = dw_hdmi_qp_bridge_write_infoframe,
};

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Rockchip Electronics Co.Ltd
* Copyright (C) Rockchip Electronics Co., Ltd.
* Author:
* Algea Cao <algea.cao@rock-chips.com>
*/

File diff suppressed because it is too large Load Diff

View File

@ -2587,7 +2587,7 @@ static void tc_remove(struct i2c_client *client)
}
static const struct i2c_device_id tc358767_i2c_ids[] = {
{ "tc358767", 0 },
{ "tc358767" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);

View File

@ -1244,8 +1244,8 @@ static const struct regmap_config tc358768_regmap_config = {
};
static const struct i2c_device_id tc358768_i2c_ids[] = {
{ "tc358768", 0 },
{ "tc358778", 0 },
{ "tc358768" },
{ "tc358778" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);

View File

@ -389,7 +389,7 @@ static void dlpc3433_remove(struct i2c_client *client)
}
static const struct i2c_device_id dlpc3433_id[] = {
{ "ti,dlpc3433", 0 },
{ "ti,dlpc3433" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, dlpc3433_id);

View File

@ -132,6 +132,16 @@
#define REG_IRQ_STAT_CHA_SOT_BIT_ERR BIT(2)
#define REG_IRQ_STAT_CHA_PLL_UNLOCK BIT(0)
enum sn65dsi83_channel {
CHANNEL_A,
CHANNEL_B
};
enum sn65dsi83_lvds_term {
OHM_100,
OHM_200
};
enum sn65dsi83_model {
MODEL_SN65DSI83,
MODEL_SN65DSI84,
@ -147,6 +157,8 @@ struct sn65dsi83 {
struct regulator *vcc;
bool lvds_dual_link;
bool lvds_dual_link_even_odd_swap;
int lvds_vod_swing_conf[2];
int lvds_term_conf[2];
};
static const struct regmap_range sn65dsi83_readable_ranges[] = {
@ -237,6 +249,36 @@ static const struct regmap_config sn65dsi83_regmap_config = {
.max_register = REG_IRQ_STAT,
};
static const int lvds_vod_swing_data_table[2][4][2] = {
{ /* 100 Ohm */
{ 180000, 313000 },
{ 215000, 372000 },
{ 250000, 430000 },
{ 290000, 488000 },
},
{ /* 200 Ohm */
{ 150000, 261000 },
{ 200000, 346000 },
{ 250000, 428000 },
{ 300000, 511000 },
},
};
static const int lvds_vod_swing_clock_table[2][4][2] = {
{ /* 100 Ohm */
{ 140000, 244000 },
{ 168000, 290000 },
{ 195000, 335000 },
{ 226000, 381000 },
},
{ /* 200 Ohm */
{ 117000, 204000 },
{ 156000, 270000 },
{ 195000, 334000 },
{ 234000, 399000 },
},
};
static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge)
{
return container_of(bridge, struct sn65dsi83, bridge);
@ -435,12 +477,16 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
val |= REG_LVDS_FMT_LVDS_LINK_CFG;
regmap_write(ctx->regmap, REG_LVDS_FMT, val);
regmap_write(ctx->regmap, REG_LVDS_VCOM, 0x05);
regmap_write(ctx->regmap, REG_LVDS_VCOM,
REG_LVDS_VCOM_CHA_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_A]) |
REG_LVDS_VCOM_CHB_LVDS_VOD_SWING(ctx->lvds_vod_swing_conf[CHANNEL_B]));
regmap_write(ctx->regmap, REG_LVDS_LANE,
(ctx->lvds_dual_link_even_odd_swap ?
REG_LVDS_LANE_EVEN_ODD_SWAP : 0) |
REG_LVDS_LANE_CHA_LVDS_TERM |
REG_LVDS_LANE_CHB_LVDS_TERM);
(ctx->lvds_term_conf[CHANNEL_A] ?
REG_LVDS_LANE_CHA_LVDS_TERM : 0) |
(ctx->lvds_term_conf[CHANNEL_B] ?
REG_LVDS_LANE_CHB_LVDS_TERM : 0));
regmap_write(ctx->regmap, REG_LVDS_CM, 0x00);
le16val = cpu_to_le16(mode->hdisplay);
@ -576,10 +622,103 @@ static const struct drm_bridge_funcs sn65dsi83_funcs = {
.atomic_get_input_bus_fmts = sn65dsi83_atomic_get_input_bus_fmts,
};
static int sn65dsi83_select_lvds_vod_swing(struct device *dev,
u32 lvds_vod_swing_data[2], u32 lvds_vod_swing_clk[2], u8 lvds_term)
{
int i;
for (i = 0; i <= 3; i++) {
if (lvds_vod_swing_data_table[lvds_term][i][0] >= lvds_vod_swing_data[0] &&
lvds_vod_swing_data_table[lvds_term][i][1] <= lvds_vod_swing_data[1] &&
lvds_vod_swing_clock_table[lvds_term][i][0] >= lvds_vod_swing_clk[0] &&
lvds_vod_swing_clock_table[lvds_term][i][1] <= lvds_vod_swing_clk[1])
return i;
}
dev_err(dev, "failed to find appropriate LVDS_VOD_SWING configuration\n");
return -EINVAL;
}
static int sn65dsi83_parse_lvds_endpoint(struct sn65dsi83 *ctx, int channel)
{
struct device *dev = ctx->dev;
struct device_node *endpoint;
int endpoint_reg;
/* Set so the property can be freely selected if not defined */
u32 lvds_vod_swing_data[2] = { 0, 1000000 };
u32 lvds_vod_swing_clk[2] = { 0, 1000000 };
/* Set default near end terminataion to 200 Ohm */
u32 lvds_term = 200;
int lvds_vod_swing_conf;
int ret = 0;
int ret_data;
int ret_clock;
if (channel == CHANNEL_A)
endpoint_reg = 2;
else
endpoint_reg = 3;
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, endpoint_reg, -1);
of_property_read_u32(endpoint, "ti,lvds-termination-ohms", &lvds_term);
if (lvds_term == 100)
ctx->lvds_term_conf[channel] = OHM_100;
else if (lvds_term == 200)
ctx->lvds_term_conf[channel] = OHM_200;
else {
ret = -EINVAL;
goto exit;
}
ret_data = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-data-microvolt",
lvds_vod_swing_data, ARRAY_SIZE(lvds_vod_swing_data));
if (ret_data != 0 && ret_data != -EINVAL) {
ret = ret_data;
goto exit;
}
ret_clock = of_property_read_u32_array(endpoint, "ti,lvds-vod-swing-clock-microvolt",
lvds_vod_swing_clk, ARRAY_SIZE(lvds_vod_swing_clk));
if (ret_clock != 0 && ret_clock != -EINVAL) {
ret = ret_clock;
goto exit;
}
/* Use default value if both properties are NOT defined. */
if (ret_data == -EINVAL && ret_clock == -EINVAL)
lvds_vod_swing_conf = 0x1;
/* Use lookup table if any of the two properties is defined. */
if (!ret_data || !ret_clock) {
lvds_vod_swing_conf = sn65dsi83_select_lvds_vod_swing(dev, lvds_vod_swing_data,
lvds_vod_swing_clk, ctx->lvds_term_conf[channel]);
if (lvds_vod_swing_conf < 0) {
ret = lvds_vod_swing_conf;
goto exit;
}
}
ctx->lvds_vod_swing_conf[channel] = lvds_vod_swing_conf;
ret = 0;
exit:
of_node_put(endpoint);
return ret;
}
static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
{
struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
int ret;
ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_A);
if (ret < 0)
return ret;
ret = sn65dsi83_parse_lvds_endpoint(ctx, CHANNEL_B);
if (ret < 0)
return ret;
ctx->lvds_dual_link = false;
ctx->lvds_dual_link_even_odd_swap = false;
@ -606,7 +745,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model)
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 2, 0);
if (IS_ERR(panel_bridge))
return PTR_ERR(panel_bridge);
return dev_err_probe(dev, PTR_ERR(panel_bridge), "Failed to get panel bridge\n");
ctx->panel_bridge = panel_bridge;

View File

@ -1971,8 +1971,8 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
}
static const struct i2c_device_id ti_sn65dsi86_id[] = {
{ "ti,sn65dsi86", 0},
{},
{ "ti,sn65dsi86" },
{}
};
MODULE_DEVICE_TABLE(i2c, ti_sn65dsi86_id);

View File

@ -435,7 +435,7 @@ static void tfp410_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tfp410_i2c_ids[] = {
{ "tfp410", 0 },
{ "tfp410" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tfp410_i2c_ids);

View File

@ -12,6 +12,7 @@ config DRM_CLIENT_LIB
config DRM_CLIENT_SELECTION
tristate
depends on DRM
select DRM_CLIENT_LIB if DRM_CLIENT_LOG
select DRM_CLIENT_LIB if DRM_FBDEV_EMULATION
help
Drivers that support in-kernel DRM clients have to select this
@ -70,4 +71,53 @@ config DRM_FBDEV_LEAK_PHYS_SMEM
If in doubt, say "N" or spread the word to your closed source
library vendor.
config DRM_CLIENT_LOG
bool "Print the kernel boot message on the screen"
depends on DRM_CLIENT_SELECTION
select DRM_CLIENT
select DRM_CLIENT_SETUP
select DRM_DRAW
select FONT_SUPPORT
help
This enable a drm logger, that will print the kernel messages to the
screen until the userspace is ready to take over.
If you only need logs, but no terminal, or if you prefer userspace
terminal, say "Y".
choice
prompt "Default DRM Client"
depends on DRM_CLIENT_SELECTION
depends on DRM_FBDEV_EMULATION || DRM_CLIENT_LOG
default DRM_CLIENT_DEFAULT_FBDEV
help
Selects the default drm client.
The selection made here can be overridden by using the kernel
command line 'drm_client_lib.active=fbdev' option.
config DRM_CLIENT_DEFAULT_FBDEV
bool "fbdev"
depends on DRM_FBDEV_EMULATION
help
Use fbdev emulation as default drm client. This is needed to have
fbcon on top of a drm driver.
config DRM_CLIENT_DEFAULT_LOG
bool "log"
depends on DRM_CLIENT_LOG
help
Use drm log as default drm client. This will display boot logs on the
screen, but doesn't implement a full terminal. For that you will need
a userspace terminal using drm/kms.
endchoice
config DRM_CLIENT_DEFAULT
string
depends on DRM_CLIENT
default "fbdev" if DRM_CLIENT_DEFAULT_FBDEV
default "log" if DRM_CLIENT_DEFAULT_LOG
default ""
endmenu

View File

@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
subdir-ccflags-y += -I$(src)/..
drm_client_lib-y := drm_client_setup.o
drm_client_lib-$(CONFIG_DRM_CLIENT_LOG) += drm_log.o
drm_client_lib-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_client.o
obj-$(CONFIG_DRM_CLIENT_LIB) += drm_client_lib.o

View File

@ -16,4 +16,10 @@ static inline int drm_fbdev_client_setup(struct drm_device *dev,
}
#endif
#ifdef CONFIG_DRM_CLIENT_LOG
void drm_log_register(struct drm_device *dev);
#else
static inline void drm_log_register(struct drm_device *dev) {}
#endif
#endif

View File

@ -7,6 +7,12 @@
#include "drm_client_internal.h"
static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
MODULE_PARM_DESC(active,
"Choose which drm client to start, default is"
CONFIG_DRM_CLIENT_DEFAULT "]");
/**
* drm_client_setup() - Setup in-kernel DRM clients
* @dev: DRM device
@ -25,11 +31,26 @@
*/
void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format)
{
int ret;
ret = drm_fbdev_client_setup(dev, format);
if (ret)
drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (!strcmp(drm_client_default, "fbdev")) {
int ret;
ret = drm_fbdev_client_setup(dev, format);
if (ret)
drm_warn(dev, "Failed to set up DRM client; error %d\n", ret);
return;
}
#endif
#ifdef CONFIG_DRM_CLIENT_LOG
if (!strcmp(drm_client_default, "log")) {
drm_log_register(dev);
return;
}
#endif
if (strcmp(drm_client_default, ""))
drm_warn(dev, "Unknown DRM client %s\n", drm_client_default);
}
EXPORT_SYMBOL(drm_client_setup);

View File

@ -0,0 +1,420 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (c) 2024 Red Hat.
* Author: Jocelyn Falempe <jfalempe@redhat.com>
*/
#include <linux/console.h>
#include <linux/font.h>
#include <linux/init.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/types.h>
#include <drm/drm_client.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_print.h>
#include "drm_client_internal.h"
#include "drm_draw_internal.h"
#include "drm_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM boot logger");
MODULE_LICENSE("GPL");
static unsigned int scale = 1;
module_param(scale, uint, 0444);
MODULE_PARM_DESC(scale, "Integer scaling factor for drm_log, default is 1");
/**
* DOC: overview
*
* This is a simple graphic logger, to print the kernel message on screen, until
* a userspace application is able to take over.
* It is only for debugging purpose.
*/
struct drm_log_scanout {
struct drm_client_buffer *buffer;
const struct font_desc *font;
u32 rows;
u32 columns;
u32 scaled_font_h;
u32 scaled_font_w;
u32 line;
u32 format;
u32 px_width;
u32 front_color;
u32 prefix_color;
};
struct drm_log {
struct mutex lock;
struct drm_client_dev client;
struct console con;
bool probed;
u32 n_scanout;
struct drm_log_scanout *scanout;
};
static struct drm_log *client_to_drm_log(struct drm_client_dev *client)
{
return container_of(client, struct drm_log, client);
}
static struct drm_log *console_to_drm_log(struct console *con)
{
return container_of(con, struct drm_log, con);
}
static void drm_log_blit(struct iosys_map *dst, unsigned int dst_pitch,
const u8 *src, unsigned int src_pitch,
u32 height, u32 width, u32 px_width, u32 color)
{
switch (px_width) {
case 2:
drm_draw_blit16(dst, dst_pitch, src, src_pitch, height, width, scale, color);
break;
case 3:
drm_draw_blit24(dst, dst_pitch, src, src_pitch, height, width, scale, color);
break;
case 4:
drm_draw_blit32(dst, dst_pitch, src, src_pitch, height, width, scale, color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", px_width);
}
}
static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line)
{
struct drm_framebuffer *fb = scanout->buffer->fb;
unsigned long height = scanout->scaled_font_h;
struct iosys_map map;
struct drm_rect r = DRM_RECT_INIT(0, line * height, fb->width, height);
if (drm_client_buffer_vmap_local(scanout->buffer, &map))
return;
iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
drm_client_buffer_vunmap_local(scanout->buffer);
drm_client_framebuffer_flush(scanout->buffer, &r);
}
static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
unsigned int len, unsigned int prefix_len)
{
struct drm_framebuffer *fb = scanout->buffer->fb;
struct iosys_map map;
const struct font_desc *font = scanout->font;
size_t font_pitch = DIV_ROUND_UP(font->width, 8);
const u8 *src;
u32 px_width = fb->format->cpp[0];
struct drm_rect r = DRM_RECT_INIT(0, scanout->line * scanout->scaled_font_h,
fb->width, (scanout->line + 1) * scanout->scaled_font_h);
u32 i;
if (drm_client_buffer_vmap_local(scanout->buffer, &map))
return;
iosys_map_incr(&map, r.y1 * fb->pitches[0]);
for (i = 0; i < len && i < scanout->columns; i++) {
u32 color = (i < prefix_len) ? scanout->prefix_color : scanout->front_color;
src = drm_draw_get_char_bitmap(font, s[i], font_pitch);
drm_log_blit(&map, fb->pitches[0], src, font_pitch,
scanout->scaled_font_h, scanout->scaled_font_w,
px_width, color);
iosys_map_incr(&map, scanout->scaled_font_w * px_width);
}
scanout->line++;
if (scanout->line >= scanout->rows)
scanout->line = 0;
drm_client_buffer_vunmap_local(scanout->buffer);
drm_client_framebuffer_flush(scanout->buffer, &r);
}
static void drm_log_draw_new_line(struct drm_log_scanout *scanout,
const char *s, unsigned int len, unsigned int prefix_len)
{
if (scanout->line == 0) {
drm_log_clear_line(scanout, 0);
drm_log_clear_line(scanout, 1);
drm_log_clear_line(scanout, 2);
} else if (scanout->line + 2 < scanout->rows)
drm_log_clear_line(scanout, scanout->line + 2);
drm_log_draw_line(scanout, s, len, prefix_len);
}
/*
* Depends on print_time() in printk.c
* Timestamp is written with "[%5lu.%06lu]"
*/
#define TS_PREFIX_LEN 13
static void drm_log_draw_kmsg_record(struct drm_log_scanout *scanout,
const char *s, unsigned int len)
{
u32 prefix_len = 0;
if (len > TS_PREFIX_LEN && s[0] == '[' && s[6] == '.' && s[TS_PREFIX_LEN] == ']')
prefix_len = TS_PREFIX_LEN + 1;
/* do not print the ending \n character */
if (s[len - 1] == '\n')
len--;
while (len > scanout->columns) {
drm_log_draw_new_line(scanout, s, scanout->columns, prefix_len);
s += scanout->columns;
len -= scanout->columns;
prefix_len = 0;
}
if (len)
drm_log_draw_new_line(scanout, s, len, prefix_len);
}
static u32 drm_log_find_usable_format(struct drm_plane *plane)
{
int i;
for (i = 0; i < plane->format_count; i++)
if (drm_draw_color_from_xrgb8888(0xffffff, plane->format_types[i]) != 0)
return plane->format_types[i];
return DRM_FORMAT_INVALID;
}
static int drm_log_setup_modeset(struct drm_client_dev *client,
struct drm_mode_set *mode_set,
struct drm_log_scanout *scanout)
{
struct drm_crtc *crtc = mode_set->crtc;
u32 width = mode_set->mode->hdisplay;
u32 height = mode_set->mode->vdisplay;
u32 format;
scanout->font = get_default_font(width, height, NULL, NULL);
if (!scanout->font)
return -ENOENT;
format = drm_log_find_usable_format(crtc->primary);
if (format == DRM_FORMAT_INVALID)
return -EINVAL;
scanout->buffer = drm_client_framebuffer_create(client, width, height, format);
if (IS_ERR(scanout->buffer)) {
drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n",
width, height, &format);
return -ENOMEM;
}
mode_set->fb = scanout->buffer->fb;
scanout->scaled_font_h = scanout->font->height * scale;
scanout->scaled_font_w = scanout->font->width * scale;
scanout->rows = height / scanout->scaled_font_h;
scanout->columns = width / scanout->scaled_font_w;
scanout->front_color = drm_draw_color_from_xrgb8888(0xffffff, format);
scanout->prefix_color = drm_draw_color_from_xrgb8888(0x4e9a06, format);
return 0;
}
static int drm_log_count_modeset(struct drm_client_dev *client)
{
struct drm_mode_set *mode_set;
int count = 0;
mutex_lock(&client->modeset_mutex);
drm_client_for_each_modeset(mode_set, client)
count++;
mutex_unlock(&client->modeset_mutex);
return count;
}
static void drm_log_init_client(struct drm_log *dlog)
{
struct drm_client_dev *client = &dlog->client;
struct drm_mode_set *mode_set;
int i, max_modeset;
int n_modeset = 0;
dlog->probed = true;
if (drm_client_modeset_probe(client, 0, 0))
return;
max_modeset = drm_log_count_modeset(client);
if (!max_modeset)
return;
dlog->scanout = kcalloc(max_modeset, sizeof(*dlog->scanout), GFP_KERNEL);
if (!dlog->scanout)
return;
mutex_lock(&client->modeset_mutex);
drm_client_for_each_modeset(mode_set, client) {
if (!mode_set->mode)
continue;
if (drm_log_setup_modeset(client, mode_set, &dlog->scanout[n_modeset]))
continue;
n_modeset++;
}
mutex_unlock(&client->modeset_mutex);
if (n_modeset == 0)
goto err_nomodeset;
if (drm_client_modeset_commit(client))
goto err_failed_commit;
dlog->n_scanout = n_modeset;
return;
err_failed_commit:
for (i = 0; i < n_modeset; i++)
drm_client_framebuffer_delete(dlog->scanout[i].buffer);
err_nomodeset:
kfree(dlog->scanout);
dlog->scanout = NULL;
}
static void drm_log_free_scanout(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
int i;
if (dlog->n_scanout) {
for (i = 0; i < dlog->n_scanout; i++)
drm_client_framebuffer_delete(dlog->scanout[i].buffer);
dlog->n_scanout = 0;
kfree(dlog->scanout);
dlog->scanout = NULL;
}
}
static void drm_log_client_unregister(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
struct drm_device *dev = client->dev;
unregister_console(&dlog->con);
mutex_lock(&dlog->lock);
drm_log_free_scanout(client);
drm_client_release(client);
mutex_unlock(&dlog->lock);
kfree(dlog);
drm_dbg(dev, "Unregistered with drm log\n");
}
static int drm_log_client_hotplug(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
mutex_lock(&dlog->lock);
drm_log_free_scanout(client);
dlog->probed = false;
mutex_unlock(&dlog->lock);
return 0;
}
static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
{
struct drm_log *dlog = client_to_drm_log(client);
console_stop(&dlog->con);
return 0;
}
static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
{
struct drm_log *dlog = client_to_drm_log(client);
console_start(&dlog->con);
return 0;
}
static const struct drm_client_funcs drm_log_client_funcs = {
.owner = THIS_MODULE,
.unregister = drm_log_client_unregister,
.hotplug = drm_log_client_hotplug,
.suspend = drm_log_client_suspend,
.resume = drm_log_client_resume,
};
static void drm_log_write_thread(struct console *con, struct nbcon_write_context *wctxt)
{
struct drm_log *dlog = console_to_drm_log(con);
int i;
if (!dlog->probed)
drm_log_init_client(dlog);
/* Check that we are still the master before drawing */
if (drm_master_internal_acquire(dlog->client.dev)) {
drm_master_internal_release(dlog->client.dev);
for (i = 0; i < dlog->n_scanout; i++)
drm_log_draw_kmsg_record(&dlog->scanout[i], wctxt->outbuf, wctxt->len);
}
}
static void drm_log_lock(struct console *con, unsigned long *flags)
{
struct drm_log *dlog = console_to_drm_log(con);
mutex_lock(&dlog->lock);
migrate_disable();
}
static void drm_log_unlock(struct console *con, unsigned long flags)
{
struct drm_log *dlog = console_to_drm_log(con);
migrate_enable();
mutex_unlock(&dlog->lock);
}
static void drm_log_register_console(struct console *con)
{
strscpy(con->name, "drm_log");
con->write_thread = drm_log_write_thread;
con->device_lock = drm_log_lock;
con->device_unlock = drm_log_unlock;
con->flags = CON_PRINTBUFFER | CON_NBCON;
con->index = -1;
register_console(con);
}
/**
* drm_log_register() - Register a drm device to drm_log
* @dev: the drm device to register.
*/
void drm_log_register(struct drm_device *dev)
{
struct drm_log *new;
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
goto err_warn;
mutex_init(&new->lock);
if (drm_client_init(dev, &new->client, "drm_log", &drm_log_client_funcs))
goto err_free;
drm_client_register(&new->client);
drm_log_register_console(&new->con);
drm_dbg(dev, "Registered with drm log as %s\n", new->con.name);
return;
err_free:
kfree(new);
err_warn:
drm_warn(dev, "Failed to register with drm log\n");
}

View File

@ -18,6 +18,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
/**
@ -299,9 +300,22 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
return 0;
}
static enum drm_mode_status
drm_bridge_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
if (bridge_connector->bridge_hdmi)
return drm_hdmi_connector_mode_valid(connector, mode);
return MODE_OK;
}
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
/* No need for .mode_valid(), the bridges are checked by the core. */
.mode_valid = drm_bridge_connector_mode_valid,
.enable_hpd = drm_bridge_connector_enable_hpd,
.disable_hpd = drm_bridge_connector_disable_hpd,
};

View File

@ -2281,7 +2281,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
drm_connector_register(port->connector);
drm_connector_dynamic_register(port->connector);
return;
error:

View File

@ -347,6 +347,8 @@ static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
is_limited_range ? HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL;
int ret;
infoframe->set = false;
ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector, mode);
if (ret)
return ret;
@ -376,6 +378,8 @@ static int hdmi_generate_spd_infoframe(const struct drm_connector *connector,
&infoframe->data.spd;
int ret;
infoframe->set = false;
ret = hdmi_spd_infoframe_init(frame,
connector->hdmi.vendor,
connector->hdmi.product);
@ -398,6 +402,8 @@ static int hdmi_generate_hdr_infoframe(const struct drm_connector *connector,
&infoframe->data.drm;
int ret;
infoframe->set = false;
if (connector->max_bpc < 10)
return 0;
@ -425,6 +431,8 @@ static int hdmi_generate_hdmi_vendor_infoframe(const struct drm_connector *conne
&infoframe->data.vendor.hdmi;
int ret;
infoframe->set = false;
if (!info->has_hdmi_infoframe)
return 0;
@ -521,6 +529,27 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_atomic_helper_connector_hdmi_check);
/**
* drm_hdmi_connector_mode_valid() - Check if mode is valid for HDMI connector
* @connector: DRM connector to validate the mode
* @mode: Display mode to validate
*
* Generic .mode_valid implementation for HDMI connectors.
*/
enum drm_mode_status
drm_hdmi_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
unsigned long long clock;
clock = drm_hdmi_compute_mode_clock(mode, 8, HDMI_COLORSPACE_RGB);
if (!clock)
return MODE_ERROR;
return hdmi_clock_valid(connector, mode, clock);
}
EXPORT_SYMBOL(drm_hdmi_connector_mode_valid);
static int clear_device_infoframe(struct drm_connector *connector,
enum hdmi_infoframe_type type)
{

View File

@ -218,11 +218,11 @@ void drm_connector_free_work_fn(struct work_struct *work)
}
}
static int __drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
static int drm_connector_init_only(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
{
struct drm_mode_config *config = &dev->mode_config;
int ret;
@ -273,10 +273,12 @@ static int __drm_connector_init(struct drm_device *dev,
/* provide ddc symlink in sysfs */
connector->ddc = ddc;
INIT_LIST_HEAD(&connector->head);
INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
connector->edid_blob_ptr = NULL;
@ -288,14 +290,6 @@ static int __drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
/* We should add connectors at the end to avoid upsetting the connector
* index too much.
*/
spin_lock_irq(&config->connector_list_lock);
list_add_tail(&connector->head, &config->connector_list);
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
drm_connector_attach_edid_property(connector);
@ -332,6 +326,54 @@ static int __drm_connector_init(struct drm_device *dev,
return ret;
}
static void drm_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (drm_WARN_ON(dev, !list_empty(&connector->head)))
return;
spin_lock_irq(&config->connector_list_lock);
list_add_tail(&connector->head, &config->connector_list);
config->num_connector++;
spin_unlock_irq(&config->connector_list_lock);
}
static void drm_connector_remove(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
/*
* For dynamic connectors drm_connector_cleanup() can call this function
* before the connector is registered and added to the list.
*/
if (list_empty(&connector->head))
return;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del_init(&connector->head);
dev->mode_config.num_connector--;
spin_unlock_irq(&dev->mode_config.connector_list_lock);
}
static int drm_connector_init_and_add(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
{
int ret;
ret = drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
drm_connector_add(connector);
return 0;
}
/**
* drm_connector_init - Init a preallocated connector
* @dev: DRM device
@ -361,10 +403,51 @@ int drm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
return __drm_connector_init(dev, connector, funcs, connector_type, NULL);
return drm_connector_init_and_add(dev, connector, funcs, connector_type, NULL);
}
EXPORT_SYMBOL(drm_connector_init);
/**
* drm_connector_dynamic_init - Init a preallocated dynamic connector
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
* @connector_type: user visible type of the connector
* @ddc: pointer to the associated ddc adapter
*
* Initialises a preallocated dynamic connector. Connectors should be
* subclassed as part of driver connector objects. The connector
* structure should not be allocated with devm_kzalloc().
*
* Drivers should call this for dynamic connectors which can be hotplugged
* after drm_dev_register() has been called already, e.g. DP MST connectors.
* For all other - static - connectors, drivers should call one of the
* drm_connector_init*()/drmm_connector_init*() functions.
*
* After calling this function the drivers must call
* drm_connector_dynamic_register().
*
* To remove the connector the driver must call drm_connector_unregister()
* followed by drm_connector_put(). Putting the last reference will call the
* driver's &drm_connector_funcs.destroy hook, which in turn must call
* drm_connector_cleanup() and free the connector structure.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_dynamic_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type,
struct i2c_adapter *ddc)
{
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
return drm_connector_init_only(dev, connector, funcs, connector_type, ddc);
}
EXPORT_SYMBOL(drm_connector_dynamic_init);
/**
* drm_connector_init_with_ddc - Init a preallocated connector
* @dev: DRM device
@ -398,7 +481,7 @@ int drm_connector_init_with_ddc(struct drm_device *dev,
if (drm_WARN_ON(dev, !(funcs && funcs->destroy)))
return -EINVAL;
return __drm_connector_init(dev, connector, funcs, connector_type, ddc);
return drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
}
EXPORT_SYMBOL(drm_connector_init_with_ddc);
@ -442,7 +525,7 @@ int drmm_connector_init(struct drm_device *dev,
if (drm_WARN_ON(dev, funcs && funcs->destroy))
return -EINVAL;
ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc);
ret = drm_connector_init_and_add(dev, connector, funcs, connector_type, ddc);
if (ret)
return ret;
@ -659,10 +742,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->name = NULL;
fwnode_handle_put(connector->fwnode);
connector->fwnode = NULL;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
spin_unlock_irq(&dev->mode_config.connector_list_lock);
drm_connector_remove(connector);
WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
if (connector->state && connector->funcs->atomic_destroy_state)
@ -683,14 +764,17 @@ EXPORT_SYMBOL(drm_connector_cleanup);
* drm_connector_register - register a connector
* @connector: the connector to register
*
* Register userspace interfaces for a connector. Only call this for connectors
* which can be hotplugged after drm_dev_register() has been called already,
* e.g. DP MST connectors. All other connectors will be registered automatically
* when calling drm_dev_register().
* Register userspace interfaces for a connector. Drivers shouldn't call this
* function. Static connectors will be registered automatically by DRM core
* from drm_dev_register(), dynamic connectors (MST) should be registered by
* drivers calling drm_connector_dynamic_register().
*
* When the connector is no longer available, callers must call
* drm_connector_unregister().
*
* Note: Existing uses of this function in drivers should be a nop already and
* are scheduled to be removed.
*
* Returns:
* Zero on success, error code on failure.
*/
@ -749,13 +833,44 @@ int drm_connector_register(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_connector_register);
/**
* drm_connector_dynamic_register - register a dynamic connector
* @connector: the connector to register
*
* Register userspace interfaces for a connector. Only call this for connectors
* initialized by calling drm_connector_dynamic_init(). All other connectors
* will be registered automatically when calling drm_dev_register().
*
* When the connector is no longer available the driver must call
* drm_connector_unregister().
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_connector_dynamic_register(struct drm_connector *connector)
{
/* Was the connector inited already? */
if (WARN_ON(!(connector->funcs && connector->funcs->destroy)))
return -EINVAL;
drm_connector_add(connector);
return drm_connector_register(connector);
}
EXPORT_SYMBOL(drm_connector_dynamic_register);
/**
* drm_connector_unregister - unregister a connector
* @connector: the connector to unregister
*
* Unregister userspace interfaces for a connector. Only call this for
* connectors which have been registered explicitly by calling
* drm_connector_register().
* Unregister userspace interfaces for a connector. Drivers should call this
* for dynamic connectors (MST) only, which were registered explicitly by
* calling drm_connector_dynamic_register(). All other - static - connectors
* will be unregistered automatically by DRM core and drivers shouldn't call
* this function for those.
*
* Note: Existing uses of this function in drivers for static connectors
* should be a nop already and are scheduled to be removed.
*/
void drm_connector_unregister(struct drm_connector *connector)
{

233
drivers/gpu/drm/drm_draw.c Normal file
View File

@ -0,0 +1,233 @@
// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (c) 2023 Red Hat.
* Author: Jocelyn Falempe <jfalempe@redhat.com>
*/
#include <linux/bits.h>
#include <linux/iosys-map.h>
#include <linux/types.h>
#include <drm/drm_fourcc.h>
#include "drm_draw_internal.h"
/*
* Conversions from xrgb8888
*/
static u16 convert_xrgb8888_to_rgb565(u32 pix)
{
return ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
}
static u16 convert_xrgb8888_to_rgba5551(u32 pix)
{
return ((pix & 0x00f80000) >> 8) |
((pix & 0x0000f800) >> 5) |
((pix & 0x000000f8) >> 2) |
BIT(0); /* set alpha bit */
}
static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
{
return ((pix & 0x00f80000) >> 9) |
((pix & 0x0000f800) >> 6) |
((pix & 0x000000f8) >> 3);
}
static u16 convert_xrgb8888_to_argb1555(u32 pix)
{
return BIT(15) | /* set alpha bit */
((pix & 0x00f80000) >> 9) |
((pix & 0x0000f800) >> 6) |
((pix & 0x000000f8) >> 3);
}
static u32 convert_xrgb8888_to_argb8888(u32 pix)
{
return pix | GENMASK(31, 24); /* fill alpha bits */
}
static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
{
return ((pix & 0x00ff0000) >> 16) << 0 |
((pix & 0x0000ff00) >> 8) << 8 |
((pix & 0x000000ff) >> 0) << 16 |
((pix & 0xff000000) >> 24) << 24;
}
static u32 convert_xrgb8888_to_abgr8888(u32 pix)
{
return ((pix & 0x00ff0000) >> 16) << 0 |
((pix & 0x0000ff00) >> 8) << 8 |
((pix & 0x000000ff) >> 0) << 16 |
GENMASK(31, 24); /* fill alpha bits */
}
static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
{
pix = ((pix & 0x000000FF) << 2) |
((pix & 0x0000FF00) << 4) |
((pix & 0x00FF0000) << 6);
return pix | ((pix >> 8) & 0x00300C03);
}
static u32 convert_xrgb8888_to_argb2101010(u32 pix)
{
pix = ((pix & 0x000000FF) << 2) |
((pix & 0x0000FF00) << 4) |
((pix & 0x00FF0000) << 6);
return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
}
static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
{
pix = ((pix & 0x00FF0000) >> 14) |
((pix & 0x0000FF00) << 4) |
((pix & 0x000000FF) << 22);
return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
}
/**
* drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
* @color: input color, in xrgb8888 format
* @format: output format
*
* Returns:
* Color in the format specified, casted to u32.
* Or 0 if the format is not supported.
*/
u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
return convert_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
return convert_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
return convert_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
return convert_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
return convert_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
return convert_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
return convert_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
return convert_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
return convert_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
return convert_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
}
}
EXPORT_SYMBOL(drm_draw_color_from_xrgb8888);
/*
* Blit functions
*/
void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u16 fg16)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
}
EXPORT_SYMBOL(drm_draw_blit16);
void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u32 fg32)
{
unsigned int y, x;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
u32 off = y * dpitch + x * 3;
if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
/* write blue-green-red to output in little endianness */
iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
}
}
}
}
EXPORT_SYMBOL(drm_draw_blit24);
void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u32 fg32)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
}
EXPORT_SYMBOL(drm_draw_blit32);
/*
* Fill functions
*/
void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u16 color)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
}
EXPORT_SYMBOL(drm_draw_fill16);
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u16 color)
{
unsigned int y, x;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
unsigned int off = y * dpitch + x * 3;
/* write blue-green-red to output in little endianness */
iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
}
}
}
EXPORT_SYMBOL(drm_draw_fill24);
void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u32 color)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
}
EXPORT_SYMBOL(drm_draw_fill32);

View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0 or MIT */
/*
* Copyright (c) 2023 Red Hat.
* Author: Jocelyn Falempe <jfalempe@redhat.com>
*/
#ifndef __DRM_DRAW_INTERNAL_H__
#define __DRM_DRAW_INTERNAL_H__
#include <linux/font.h>
#include <linux/types.h>
struct iosys_map;
/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
static inline bool drm_draw_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
{
return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
}
static inline const u8 *drm_draw_get_char_bitmap(const struct font_desc *font,
char c, size_t font_pitch)
{
return font->data + (c * font->height) * font_pitch;
}
u32 drm_draw_color_from_xrgb8888(u32 color, u32 format);
void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u16 fg16);
void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u32 fg32);
void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u32 fg32);
void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u16 color);
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u16 color);
void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u32 color);
#endif /* __DRM_DRAW_INTERNAL_H__ */

View File

@ -5605,7 +5605,9 @@ EXPORT_SYMBOL(drm_edid_get_monitor_name);
static void clear_eld(struct drm_connector *connector)
{
mutex_lock(&connector->eld_mutex);
memset(connector->eld, 0, sizeof(connector->eld));
mutex_unlock(&connector->eld_mutex);
connector->latency_present[0] = false;
connector->latency_present[1] = false;
@ -5657,6 +5659,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
if (!drm_edid)
return;
mutex_lock(&connector->eld_mutex);
mnl = get_monitor_name(drm_edid, &eld[DRM_ELD_MONITOR_NAME_STRING]);
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD monitor %s\n",
connector->base.id, connector->name,
@ -5717,6 +5721,8 @@ static void drm_edid_to_eld(struct drm_connector *connector,
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] ELD size %d, SAD count %d\n",
connector->base.id, connector->name,
drm_eld_size(eld), total_sad_count);
mutex_unlock(&connector->eld_mutex);
}
static int _drm_edid_to_sad(const struct drm_edid *drm_edid,

View File

@ -845,6 +845,16 @@ static void print_size(struct drm_printer *p, const char *stat,
drm_printf(p, "drm-%s-%s:\t%llu%s\n", stat, region, sz, units[u]);
}
int drm_memory_stats_is_zero(const struct drm_memory_stats *stats)
{
return (stats->shared == 0 &&
stats->private == 0 &&
stats->resident == 0 &&
stats->purgeable == 0 &&
stats->active == 0);
}
EXPORT_SYMBOL(drm_memory_stats_is_zero);
/**
* drm_print_memory_stats - A helper to print memory stats
* @p: The printer to print output to
@ -860,7 +870,9 @@ void drm_print_memory_stats(struct drm_printer *p,
{
print_size(p, "total", region, stats->private + stats->shared);
print_size(p, "shared", region, stats->shared);
print_size(p, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_ACTIVE)
print_size(p, "active", region, stats->active);
if (supported_status & DRM_GEM_OBJECT_RESIDENT)
print_size(p, "resident", region, stats->resident);
@ -893,15 +905,13 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (obj->funcs && obj->funcs->status) {
s = obj->funcs->status(obj);
supported_status = DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE;
supported_status |= s;
}
if (drm_gem_object_is_shared_for_memory_stats(obj)) {
if (drm_gem_object_is_shared_for_memory_stats(obj))
status.shared += obj->size;
} else {
else
status.private += obj->size;
}
if (s & DRM_GEM_OBJECT_RESIDENT) {
status.resident += add_size;
@ -914,6 +924,7 @@ void drm_show_memory_stats(struct drm_printer *p, struct drm_file *file)
if (!dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true))) {
status.active += add_size;
supported_status |= DRM_GEM_OBJECT_ACTIVE;
/* If still active, don't count as purgeable: */
s &= ~DRM_GEM_OBJECT_PURGEABLE;

View File

@ -150,6 +150,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
drm_connector_list_iter_begin(dev, &conn_iter);
count = 0;
connector_id = u64_to_user_ptr(card_res->connector_id_ptr);
/*
* FIXME: the connectors on the list may not be fully initialized yet,
* if the ioctl is called before the connectors are registered. (See
* drm_dev_register()->drm_modeset_register_all() for static and
* drm_connector_dynamic_register() for dynamic connectors.)
* The driver should only get registered after static connectors are
* fully initialized and dynamic connectors should be added to the
* connector list only after fully initializing them.
*/
drm_for_each_connector_iter(connector, &conn_iter) {
/* only expose writeback connectors if userspace understands them */
if (!file_priv->writeback_connectors &&

View File

@ -1282,8 +1282,7 @@ EXPORT_SYMBOL(drm_mode_set_name);
* @mode: mode
*
* Returns:
* @modes's vrefresh rate in Hz, rounded to the nearest integer. Calculates the
* value first if it is not yet set.
* @modes's vrefresh rate in Hz, rounded to the nearest integer.
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{

View File

@ -24,6 +24,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>

View File

@ -31,6 +31,7 @@
#include <drm/drm_rect.h>
#include "drm_crtc_internal.h"
#include "drm_draw_internal.h"
MODULE_AUTHOR("Jocelyn Falempe");
MODULE_DESCRIPTION("DRM panic handler");
@ -139,181 +140,8 @@ device_initcall(drm_panic_setup_logo);
#endif
/*
* Color conversion
* Blit & Fill functions
*/
static u16 convert_xrgb8888_to_rgb565(u32 pix)
{
return ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
}
static u16 convert_xrgb8888_to_rgba5551(u32 pix)
{
return ((pix & 0x00f80000) >> 8) |
((pix & 0x0000f800) >> 5) |
((pix & 0x000000f8) >> 2) |
BIT(0); /* set alpha bit */
}
static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
{
return ((pix & 0x00f80000) >> 9) |
((pix & 0x0000f800) >> 6) |
((pix & 0x000000f8) >> 3);
}
static u16 convert_xrgb8888_to_argb1555(u32 pix)
{
return BIT(15) | /* set alpha bit */
((pix & 0x00f80000) >> 9) |
((pix & 0x0000f800) >> 6) |
((pix & 0x000000f8) >> 3);
}
static u32 convert_xrgb8888_to_argb8888(u32 pix)
{
return pix | GENMASK(31, 24); /* fill alpha bits */
}
static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
{
return ((pix & 0x00ff0000) >> 16) << 0 |
((pix & 0x0000ff00) >> 8) << 8 |
((pix & 0x000000ff) >> 0) << 16 |
((pix & 0xff000000) >> 24) << 24;
}
static u32 convert_xrgb8888_to_abgr8888(u32 pix)
{
return ((pix & 0x00ff0000) >> 16) << 0 |
((pix & 0x0000ff00) >> 8) << 8 |
((pix & 0x000000ff) >> 0) << 16 |
GENMASK(31, 24); /* fill alpha bits */
}
static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
{
pix = ((pix & 0x000000FF) << 2) |
((pix & 0x0000FF00) << 4) |
((pix & 0x00FF0000) << 6);
return pix | ((pix >> 8) & 0x00300C03);
}
static u32 convert_xrgb8888_to_argb2101010(u32 pix)
{
pix = ((pix & 0x000000FF) << 2) |
((pix & 0x0000FF00) << 4) |
((pix & 0x00FF0000) << 6);
return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
}
static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
{
pix = ((pix & 0x00FF0000) >> 14) |
((pix & 0x0000FF00) << 4) |
((pix & 0x000000FF) << 22);
return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
}
/*
* convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
* @color: input color, in xrgb8888 format
* @format: output format
*
* Returns:
* Color in the format specified, casted to u32.
* Or 0 if the format is not supported.
*/
static u32 convert_from_xrgb8888(u32 color, u32 format)
{
switch (format) {
case DRM_FORMAT_RGB565:
return convert_xrgb8888_to_rgb565(color);
case DRM_FORMAT_RGBA5551:
return convert_xrgb8888_to_rgba5551(color);
case DRM_FORMAT_XRGB1555:
return convert_xrgb8888_to_xrgb1555(color);
case DRM_FORMAT_ARGB1555:
return convert_xrgb8888_to_argb1555(color);
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
return color;
case DRM_FORMAT_ARGB8888:
return convert_xrgb8888_to_argb8888(color);
case DRM_FORMAT_XBGR8888:
return convert_xrgb8888_to_xbgr8888(color);
case DRM_FORMAT_ABGR8888:
return convert_xrgb8888_to_abgr8888(color);
case DRM_FORMAT_XRGB2101010:
return convert_xrgb8888_to_xrgb2101010(color);
case DRM_FORMAT_ARGB2101010:
return convert_xrgb8888_to_argb2101010(color);
case DRM_FORMAT_ABGR2101010:
return convert_xrgb8888_to_abgr2101010(color);
default:
WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
return 0;
}
}
/*
* Blit & Fill
*/
/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
{
return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
}
static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u16 fg16)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
}
static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u32 fg32)
{
unsigned int y, x;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
u32 off = y * dpitch + x * 3;
if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
/* write blue-green-red to output in little endianness */
iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
}
}
}
}
static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
const u8 *sbuf8, unsigned int spitch,
unsigned int height, unsigned int width,
unsigned int scale, u32 fg32)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
}
static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
const u8 *sbuf8, unsigned int spitch, unsigned int scale,
u32 fg_color)
@ -322,7 +150,7 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
for (y = 0; y < drm_rect_height(clip); y++)
for (x = 0; x < drm_rect_width(clip); x++)
if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
}
@ -354,62 +182,22 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 3:
drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
case 4:
drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
break;
default:
WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
}
}
static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u16 color)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
}
static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u32 color)
{
unsigned int y, x;
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
unsigned int off = y * dpitch + x * 3;
/* write blue-green-red to output in little endianness */
iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
}
}
}
static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
u32 color)
{
unsigned int y, x;
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
}
static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
struct drm_rect *clip,
u32 color)
@ -442,27 +230,22 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
switch (sb->format->cpp[0]) {
case 2:
drm_panic_fill16(&map, sb->pitch[0], drm_rect_height(clip),
drm_rect_width(clip), color);
drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
drm_rect_width(clip), color);
break;
case 3:
drm_panic_fill24(&map, sb->pitch[0], drm_rect_height(clip),
drm_rect_width(clip), color);
drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
drm_rect_width(clip), color);
break;
case 4:
drm_panic_fill32(&map, sb->pitch[0], drm_rect_height(clip),
drm_rect_width(clip), color);
drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
drm_rect_width(clip), color);
break;
default:
WARN_ONCE(1, "Can't fill with pixel width %d\n", sb->format->cpp[0]);
}
}
static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
{
return font->data + (c * font->height) * font_pitch;
}
static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
{
int i;
@ -501,7 +284,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
rec.x1 += (drm_rect_width(clip) - (line_len * font->width)) / 2;
for (j = 0; j < line_len; j++) {
src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
src = drm_draw_get_char_bitmap(font, msg[i].txt[j], font_pitch);
rec.x2 = rec.x1 + font->width;
drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
rec.x1 += font->width;
@ -533,8 +316,10 @@ static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *
static void draw_panic_static_user(struct drm_scanout_buffer *sb)
{
u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
sb->format->format);
u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg;
unsigned int msg_width, msg_height;
@ -600,8 +385,10 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
*/
static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
{
u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
sb->format->format);
u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
struct kmsg_dump_iter iter;
@ -791,8 +578,10 @@ static int drm_panic_get_qr_code(u8 **qr_image)
*/
static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
{
u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
sb->format->format);
u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
sb->format->format);
const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
unsigned int max_qr_size, scale;
@ -878,7 +667,7 @@ static bool drm_panic_is_format_supported(const struct drm_format_info *format)
{
if (format->num_planes != 1)
return false;
return convert_from_xrgb8888(0xffffff, format->format) != 0;
return drm_draw_color_from_xrgb8888(0xffffff, format->format) != 0;
}
static void draw_panic_dispatch(struct drm_scanout_buffer *sb)

View File

@ -1648,7 +1648,9 @@ static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
struct hdmi_context *hdata = dev_get_drvdata(dev);
struct drm_connector *connector = &hdata->connector;
mutex_lock(&connector->eld_mutex);
memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
mutex_unlock(&connector->eld_mutex);
return 0;
}

View File

@ -6,6 +6,7 @@
*/
#include <linux/backlight.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <drm/drm_atomic_helper.h>

View File

@ -486,7 +486,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id ch7006_ids[] = {
{ "ch7006", 0 },
{ "ch7006" },
{ }
};
MODULE_DEVICE_TABLE(i2c, ch7006_ids);

View File

@ -413,7 +413,7 @@ sil164_encoder_init(struct i2c_client *client,
}
static const struct i2c_device_id sil164_ids[] = {
{ "sil164", 0 },
{ "sil164" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sil164_ids);

View File

@ -486,8 +486,8 @@ static void tda9950_remove(struct i2c_client *client)
}
static struct i2c_device_id tda9950_ids[] = {
{ "tda9950", 0 },
{ },
{ "tda9950" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda9950_ids);

View File

@ -2094,7 +2094,7 @@ MODULE_DEVICE_TABLE(of, tda998x_dt_ids);
#endif
static const struct i2c_device_id tda998x_ids[] = {
{ "tda998x", 0 },
{ "tda998x" },
{ }
};
MODULE_DEVICE_TABLE(i2c, tda998x_ids);

View File

@ -699,10 +699,12 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
mutex_lock(&connector->eld_mutex);
if (!connector->eld[0]) {
drm_dbg_kms(&i915->drm,
"Bogus ELD on [CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
mutex_unlock(&connector->eld_mutex);
return false;
}
@ -710,6 +712,7 @@ bool intel_audio_compute_config(struct intel_encoder *encoder,
memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
mutex_unlock(&connector->eld_mutex);
return true;
}

View File

@ -1715,6 +1715,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (!intel_connector)
return NULL;
connector = &intel_connector->base;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
@ -1723,20 +1725,19 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_init_modeset_retry_work(intel_connector);
intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
intel_connector->dp.dsc_hblank_expansion_quirk =
detect_dsc_hblank_expansion_quirk(intel_connector);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort);
ret = drm_connector_dynamic_init(&dev_priv->drm, connector, &intel_dp_mst_connector_funcs,
DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
drm_dp_mst_put_port_malloc(port);
intel_connector_free(intel_connector);
return NULL;
}
intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
intel_connector->dp.dsc_hblank_expansion_quirk =
detect_dsc_hblank_expansion_quirk(intel_connector);
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
for_each_pipe(dev_priv, pipe) {

View File

@ -102,6 +102,7 @@ static void show_meminfo(struct drm_printer *p, struct drm_file *file)
for_each_memory_region(mr, i915, id)
drm_print_memory_stats(p,
&stats[id],
DRM_GEM_OBJECT_ACTIVE |
DRM_GEM_OBJECT_RESIDENT |
DRM_GEM_OBJECT_PURGEABLE,
mr->uabi_name);

View File

@ -414,8 +414,10 @@ static int msm_dp_audio_get_eld(struct device *dev,
return -ENODEV;
}
mutex_lock(&msm_dp_display->connector->eld_mutex);
memcpy(buf, msm_dp_display->connector->eld,
min(sizeof(msm_dp_display->connector->eld), len));
mutex_unlock(&msm_dp_display->connector->eld_mutex);
return 0;
}

View File

@ -1265,8 +1265,8 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
mstc->mstm = mstm;
mstc->port = port;
ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
DRM_MODE_CONNECTOR_DisplayPort);
ret = drm_connector_dynamic_init(dev, &mstc->connector, &nv50_mstc,
DRM_MODE_CONNECTOR_DisplayPort, NULL);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;

View File

@ -9,6 +9,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>

View File

@ -11,6 +11,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>

View File

@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/media-bus-format.h>

View File

@ -3222,6 +3222,33 @@ static const struct panel_desc mitsubishi_aa084xe01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
static const struct display_timing multi_inno_mi0700a2t_30_timing = {
.pixelclock = { 26400000, 33000000, 46800000 },
.hactive = { 800, 800, 800 },
.hfront_porch = { 16, 204, 354 },
.hback_porch = { 46, 46, 46 },
.hsync_len = { 1, 6, 40 },
.vactive = { 480, 480, 480 },
.vfront_porch = { 7, 22, 147 },
.vback_porch = { 23, 23, 23 },
.vsync_len = { 1, 3, 20 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc multi_inno_mi0700a2t_30 = {
.timings = &multi_inno_mi0700a2t_30_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 153,
.height = 92,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing multi_inno_mi0700s4t_6_timing = {
.pixelclock = { 29000000, 33000000, 38000000 },
.hactive = { 800, 800, 800 },
@ -3313,6 +3340,33 @@ static const struct panel_desc multi_inno_mi1010ait_1cp = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing multi_inno_mi1010z1t_1cp11_timing = {
.pixelclock = { 40800000, 51200000, 67200000 },
.hactive = { 1024, 1024, 1024 },
.hfront_porch = { 30, 110, 130 },
.hback_porch = { 30, 110, 130 },
.hsync_len = { 30, 100, 116 },
.vactive = { 600, 600, 600 },
.vfront_porch = { 4, 13, 80 },
.vback_porch = { 4, 13, 80 },
.vsync_len = { 2, 9, 40 },
.flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc multi_inno_mi1010z1t_1cp11 = {
.timings = &multi_inno_mi1010z1t_1cp11_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 260,
.height = 162,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing nec_nl12880bc20_05_timing = {
.pixelclock = { 67000000, 71000000, 75000000 },
.hactive = { 1280, 1280, 1280 },
@ -4280,6 +4334,45 @@ static const struct panel_desc tianma_tm070jvhg33 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
};
/*
* The datasheet computes total blanking as back porch + front porch, not
* including sync pulse width. This is for both H and V. To make the total
* blanking and period correct, subtract the pulse width from the front
* porch.
*
* This works well for the Min and Typ values, but for Max values the sync
* pulse width is higher than back porch + front porch, so work around that
* by reducing the Max sync length value to 1 and then treating the Max
* porches as in the Min and Typ cases.
*
* Exact datasheet values are added as a comment where they differ from the
* ones implemented for the above reason.
*/
static const struct display_timing tianma_tm070jdhg34_00_timing = {
.pixelclock = { 68400000, 71900000, 78100000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 130, 138, 158 }, /* 131, 139, 159 */
.hback_porch = { 5, 5, 5 },
.hsync_len = { 1, 1, 1 }, /* 1, 1, 256 */
.vactive = { 800, 800, 800 },
.vfront_porch = { 2, 39, 98 }, /* 3, 40, 99 */
.vback_porch = { 2, 2, 2 },
.vsync_len = { 1, 1, 1 }, /* 1, 1, 128 */
.flags = DISPLAY_FLAGS_DE_HIGH,
};
static const struct panel_desc tianma_tm070jdhg34_00 = {
.timings = &tianma_tm070jdhg34_00_timing,
.num_timings = 1,
.bpc = 8,
.size = {
.width = 150, /* 149.76 */
.height = 94, /* 93.60 */
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct display_timing tianma_tm070rvhg71_timing = {
.pixelclock = { 27700000, 29200000, 39600000 },
.hactive = { 800, 800, 800 },
@ -4905,6 +4998,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "mitsubishi,aa084xe01",
.data = &mitsubishi_aa084xe01,
}, {
.compatible = "multi-inno,mi0700a2t-30",
.data = &multi_inno_mi0700a2t_30,
}, {
.compatible = "multi-inno,mi0700s4t-6",
.data = &multi_inno_mi0700s4t_6,
@ -4914,6 +5010,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "multi-inno,mi1010ait-1cp",
.data = &multi_inno_mi1010ait_1cp,
}, {
.compatible = "multi-inno,mi1010z1t-1cp11",
.data = &multi_inno_mi1010z1t_1cp11,
}, {
.compatible = "nec,nl12880bc20-05",
.data = &nec_nl12880bc20_05,
@ -5022,6 +5121,9 @@ static const struct of_device_id platform_of_match[] = {
}, {
.compatible = "tianma,tm070jdhg30",
.data = &tianma_tm070jdhg30,
}, {
.compatible = "tianma,tm070jdhg34-00",
.data = &tianma_tm070jdhg34_00,
}, {
.compatible = "tianma,tm070jvhg33",
.data = &tianma_tm070jvhg33,

Some files were not shown because too many files have changed in this diff Show More