mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
drm next for 6.11-rc1:
core: - deprecate DRM data and return 0 date - connector: Create a set of helpers to help with HDMI support - Remove driver owner assignments - Allow more drivers to compile with COMPILE_TEST - Conversions to drm_edid - Sprinkle MODULE_DESCRIPTIONS everywhere they are missing - Remove drm_mm_replace_node - print: Add a drm prefix to warn level messages too, remove ___drm_dbg, consolidate prefix handling - New monochrome TV mode variant ttm: - improve number of page faults on some platforms - fix test builds under PREEMPT_RT - more test coverage ci: - Require a more recent version of mesa, - improve farm setup and test generation dma-buf: - warn if reserving 0 fence slots - internal API heap enhancements fbdev: - Create memory manager optimized fbdev emulation panic: - Allow to select fonts, - improve drm_fb_dma_get_scanout_buffer - Allow to dump kmsg to the screen bridge: - Remove redundant checks on bridge->encoder - Remove drm_bridge_chain_mode_fixup - bridge-connector: Plumb in the new HDMI helper - analogix_dp: Various improvements, handle AUX transfers timeout - samsung-dsim: Fix timings calculation - tc358767: Plenty of small fixes, fix no connector attach, fix clocks - sii902x: state validation improvements panels: - Switch panels from register table initialization to proper code - Now that the panel code tracks the panel state, remove every ad-hoc implementation in the panel drivers - More cleanup of prepare / enable state tracking in drivers - edp: Drop legacy panel compatibles - simple-bridge: Switch to devm_drm_bridge_add - New panels: Lincoln Tech Sol LCD185-101CT, Microtips Technology 13-101HIEBCAF0-C, Microtips Technology MF-103HIEB0GA0, BOE nv110wum-l60, IVO t109nw41, WL-355608-A8, PrimeView PM070WL4, Lincoln Technologies LCD197, Ortustech COM35H3P70ULC, AUO G104STN01, K&d kd101ne3-40ti amdgpu: - DCN 4.0.x support - GC 12.0 support - GMC 12.0 support - SDMA 7.0 support - MES12 support - MMHUB 4.1 support - GFX12 modifier and DCC support - lots of IP fixes/updates amdkfd: - Contiguous VRAM allocations - GC 12.0 support - SDMA 7.0 support - SR-IOV fixes - KFD GFX ALU exceptions i915: - Battlemage Xe2 HPD display enablement - Panel Replay enabling - DP AUX-less ALPM/LOBF - Enable link training failure fallback for DP MST links - CMRR (Content Match Refresh Rate) enabling - Increase ADL-S/ADL-P/DG2+ max TMDS bitrate to 6 Gbps - Enable eDP AUX based HDR backlight - Support replaying GPU hangs with captured context image - Automate CCS Mode setting during engine resets - lots of refactoring - Support replaying GPU hangs with captured context image - Increase FLR timeout from 3s to 9s - Enable w/a 16021333562 for DG2, MTL and ARL [guc] xe: - update MAINATINERS - New uapi adding OA functionality to Xe - expose l3 bank mask - fix display detect on ADL-N - runtime PM Fixes - Fix silent backmerge issues - More prep for SR-IOV - HWmon additions - per client usage info - Rework GPU page fault handling - Drop EXEC_QUEUE_FLAG_BANNED - Add BMG PCI IDs - Scheduler fixes and improvements - Rename xe_exec_queue::compute to xe_exec_queue::lr - Use ttm_uncached for BO with NEEDS_UC flag - Rename xe perf layer as xe observation layer - lots of refactoring radeon: - Backlight workaround for iMac - Silence UBSAN flex array warnings msm: - Validate registers XML description against schema in CI - core/dpu: SM7150 support - mdp5: Add support for MSM8937 - gpu: Add param for userspace to know if raytracing is supported - gpu: X185 support (aka gpu in X1 laptop chips) - gpu: a505 support ivpu: - hardware scheduler support - profiling support - improvements to the platform support layer - firmware handling improvements - clocks/power mgmt improvements - scheduler/logging improvements habanalabs: - Gradual sleep in polling memory macro. - Reduce Gaudi2 MSI-X interrupt count to 128. - Add Gaudi2-D revision support. - Add timestamp to CPLD info. - Gaudi2: Assume hard-reset by firmware upon MC SEI severe error. - Align Gaudi2 interrupt names. - Check for errors after preboot is ready. - Change habanalabs maintainer and git repo path. mgag200: - refactoring and improvements - Add BMC output - enable polling nouveau: - add registry command line v3d: - perf counters improvements zynqmp: - irq and debugfs improvements atmel-hlcdc: - Support XLCDC in sam9x7 mipi-dbi: - Remove mipi_dbi_machine_little_endian - make SPI bits per word configurable - support RGB888 - allow pixel formats to be specified in the DT sun4i: - Rework the blender setup for DE2 panfrost: - Enable MT8188 support vc4: - Monochrome TV support exynos: - fix fallback mode regression - fix memory leak - Use drm_edid_duplicate() instead of kmemdup() etnaviv: - fix i.MX8MP NPU clock gating - workaround FE register cdc issues on some cores - fix DMA sync handling for cached buffers - fix job timeout handling - keep TS enabled on MMUv2 cores for improved performance mediatek: - Convert to platform remove callback returning void- - Drop chain_mode_fixup call in mode_valid() - Fixes the errors of MediaTek display driver found by IGT. - Add display support for the MT8365-EVK board - Fix bit depth overwritten for mtk_ovl_set bit_depth() - Fix possible_crtcs calculation - Fix spurious kfree() ast: - refactor mode setting code stm: - Add LVDS support - DSI PHY updates -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmaYqVEACgkQDHTzWXnE hr5p3Q/+OOxTHKJ/8WMwfV1Tuep5otkCZdBgNdcuu9zqzpEMEDUDwmV1iboIvT9x qJsDwSAJomwbZAnVjDKsbZuycSHUBV6HQdf+5+rtq6be1EfFRwJVzOq0u5+D3KGt 7f2vy6sM9tw4tR6EikiuP7vCvnSz4iGrWERvEJDEtXECbALhju8sulht8ZMnr6GW /MfUetULLSDjq0L1x3TWAq2MPGnJ5UxIkIeOBUP6n4etAUX1BPTNA6N76eN/xMvn a40JhtM+pCjjkHxvloIZ+KTYN3S+hskIRksczPHh9HtNX7y/A437wyhOHJZ1NvZb yc5ke9GjXxGcxyZH+PY5aCS7O/XElzSSkR1jFZ2s3/MX7PVKgCahGK7+yWjPsiK2 R5oXebdObshUa8LHDE/3WgBUmTchkvKRTXV9cvGqzxEPhC2zrxArvwP5v6B4mhCn Vqo3Pv0Cyr+n65Z5Dzqz/9+m999LJjFTsTrug0p5b/qBJQKu2rQONe4lpZ0NFwwY ExyjdxILj7mqrQpKcA6V5Bel5ZCnlVsGfTshFL6Iux54VFlJyRMzKWZ+Gdv4av5k dbjz+re+CojKabn3ML/7pAQujK6Rqe58vPuHV78zkvAGJnQgJOOTrmYNYtn3oBqe ogdCN+/PREb/9U7i6mQv5hhdHs4tT9ROXaT9jyb8XSHXW+t9lBM= =g+Ad -----END PGP SIGNATURE----- Merge tag 'drm-next-2024-07-18' of https://gitlab.freedesktop.org/drm/kernel Pull drm updates from Dave Airlie: "There's a lot of stuff in here, amd, i915 and xe have new platform work, lots of core rework around EDID handling, some new COMPILE_TEST options, maintainer changes and a lots of other stuff. Summary: core: - deprecate DRM data and return 0 date - connector: Create a set of helpers to help with HDMI support - Remove driver owner assignments - Allow more drivers to compile with COMPILE_TEST - Conversions to drm_edid - Sprinkle MODULE_DESCRIPTIONS everywhere they are missing - Remove drm_mm_replace_node - print: Add a drm prefix to warn level messages too, remove ___drm_dbg, consolidate prefix handling - New monochrome TV mode variant ttm: - improve number of page faults on some platforms - fix test builds under PREEMPT_RT - more test coverage ci: - Require a more recent version of mesa - improve farm setup and test generation dma-buf: - warn if reserving 0 fence slots - internal API heap enhancements fbdev: - Create memory manager optimized fbdev emulation panic: - Allow to select fonts - improve drm_fb_dma_get_scanout_buffer - Allow to dump kmsg to the screen bridge: - Remove redundant checks on bridge->encoder - Remove drm_bridge_chain_mode_fixup - bridge-connector: Plumb in the new HDMI helper - analogix_dp: Various improvements, handle AUX transfers timeout - samsung-dsim: Fix timings calculation - tc358767: Plenty of small fixes, fix no connector attach, fix clocks - sii902x: state validation improvements panels: - Switch panels from register table initialization to proper code - Now that the panel code tracks the panel state, remove every ad-hoc implementation in the panel drivers - More cleanup of prepare / enable state tracking in drivers - edp: Drop legacy panel compatibles - simple-bridge: Switch to devm_drm_bridge_add - New panels: Lincoln Tech Sol LCD185-101CT, Microtips Technology 13-101HIEBCAF0-C, Microtips Technology MF-103HIEB0GA0, BOE nv110wum-l60, IVO t109nw41, WL-355608-A8, PrimeView PM070WL4, Lincoln Technologies LCD197, Ortustech COM35H3P70ULC, AUO G104STN01, K&d kd101ne3-40ti amdgpu: - DCN 4.0.x support - GC 12.0 support - GMC 12.0 support - SDMA 7.0 support - MES12 support - MMHUB 4.1 support - GFX12 modifier and DCC support - lots of IP fixes/updates amdkfd: - Contiguous VRAM allocations - GC 12.0 support - SDMA 7.0 support - SR-IOV fixes - KFD GFX ALU exceptions i915: - Battlemage Xe2 HPD display enablement - Panel Replay enabling - DP AUX-less ALPM/LOBF - Enable link training failure fallback for DP MST links - CMRR (Content Match Refresh Rate) enabling - Increase ADL-S/ADL-P/DG2+ max TMDS bitrate to 6 Gbps - Enable eDP AUX based HDR backlight - Support replaying GPU hangs with captured context image - Automate CCS Mode setting during engine resets - lots of refactoring - Support replaying GPU hangs with captured context image - Increase FLR timeout from 3s to 9s - Enable w/a 16021333562 for DG2, MTL and ARL [guc] xe: - update MAINATINERS - New uapi adding OA functionality to Xe - expose l3 bank mask - fix display detect on ADL-N - runtime PM Fixes - Fix silent backmerge issues - More prep for SR-IOV - HWmon additions - per client usage info - Rework GPU page fault handling - Drop EXEC_QUEUE_FLAG_BANNED - Add BMG PCI IDs - Scheduler fixes and improvements - Rename xe_exec_queue::compute to xe_exec_queue::lr - Use ttm_uncached for BO with NEEDS_UC flag - Rename xe perf layer as xe observation layer - lots of refactoring radeon: - Backlight workaround for iMac - Silence UBSAN flex array warnings msm: - Validate registers XML description against schema in CI - core/dpu: SM7150 support - mdp5: Add support for MSM8937 - gpu: Add param for userspace to know if raytracing is supported - gpu: X185 support (aka gpu in X1 laptop chips) - gpu: a505 support ivpu: - hardware scheduler support - profiling support - improvements to the platform support layer - firmware handling improvements - clocks/power mgmt improvements - scheduler/logging improvements habanalabs: - Gradual sleep in polling memory macro - Reduce Gaudi2 MSI-X interrupt count to 128 - Add Gaudi2-D revision support - Add timestamp to CPLD info - Gaudi2: Assume hard-reset by firmware upon MC SEI severe error - Align Gaudi2 interrupt names - Check for errors after preboot is ready - Change habanalabs maintainer and git repo path mgag200: - refactoring and improvements - Add BMC output - enable polling nouveau: - add registry command line v3d: - perf counters improvements zynqmp: - irq and debugfs improvements atmel-hlcdc: - Support XLCDC in sam9x7 mipi-dbi: - Remove mipi_dbi_machine_little_endian - make SPI bits per word configurable - support RGB888 - allow pixel formats to be specified in the DT sun4i: - Rework the blender setup for DE2 panfrost: - Enable MT8188 support vc4: - Monochrome TV support exynos: - fix fallback mode regression - fix memory leak - Use drm_edid_duplicate() instead of kmemdup() etnaviv: - fix i.MX8MP NPU clock gating - workaround FE register cdc issues on some cores - fix DMA sync handling for cached buffers - fix job timeout handling - keep TS enabled on MMUv2 cores for improved performance mediatek: - Convert to platform remove callback returning void- - Drop chain_mode_fixup call in mode_valid() - Fixes the errors of MediaTek display driver found by IGT - Add display support for the MT8365-EVK board - Fix bit depth overwritten for mtk_ovl_set bit_depth() - Fix possible_crtcs calculation - Fix spurious kfree() ast: - refactor mode setting code stm: - Add LVDS support - DSI PHY updates" * tag 'drm-next-2024-07-18' of https://gitlab.freedesktop.org/drm/kernel: (2501 commits) drm/amdgpu/mes12: add missing opcode string drm/amdgpu/mes11: update opcode strings Revert "drm/amd/display: Reset freesync config before update new state" drm/omap: Restrict compile testing to PAGE_SIZE less than 64KB drm/xe: Drop trace_xe_hw_fence_free drm/xe/uapi: Rename xe perf layer as xe observation layer drm/amdgpu: remove exp hw support check for gfx12 drm/amdgpu: timely save bad pages to eeprom after gpu ras reset is completed drm/amdgpu: flush all cached ras bad pages to eeprom drm/amdgpu: select compute ME engines dynamically drm/amd/display: Allow display DCC for DCN401 drm/amdgpu: select compute ME engines dynamically drm/amdgpu/job: Replace DRM_INFO/ERROR logging drm/amdgpu: select compute ME engines dynamically drm/amd/pm: Ignore initial value in smu response register drm/amdgpu: Initialize VF partition mode drm/amd/amdgpu: fix SDMA IRQ client ID <-> req mapping MAINTAINERS: fix Xinhui's name MAINTAINERS: update powerplay and swsmu drm/qxl: Pin buffer objects for internal mappings ...
This commit is contained in:
commit
b3ce7a3084
@ -217,7 +217,7 @@ Description: Displays the hop values and physical address for a given ASID
|
||||
and virtual address. The user should write the ASID and VA into
|
||||
the file and then read the file to get the result.
|
||||
e.g. to display info about VA 0x1000 for ASID 1 you need to do:
|
||||
echo "1 0x1000" > /sys/kernel/debug/accel/0/mmu
|
||||
echo "1 0x1000" > /sys/kernel/debug/accel/<parent_device>/mmu
|
||||
|
||||
What: /sys/kernel/debug/accel/<parent_device>/mmu_error
|
||||
Date: Mar 2021
|
||||
@ -226,8 +226,8 @@ Contact: fkassabri@habana.ai
|
||||
Description: Check and display page fault or access violation mmu errors for
|
||||
all MMUs specified in mmu_cap_mask.
|
||||
e.g. to display error info for MMU hw cap bit 9, you need to do:
|
||||
echo "0x200" > /sys/kernel/debug/accel/0/mmu_error
|
||||
cat /sys/kernel/debug/accel/0/mmu_error
|
||||
echo "0x200" > /sys/kernel/debug/accel/<parent_device>/mmu_error
|
||||
cat /sys/kernel/debug/accel/<parent_device>/mmu_error
|
||||
|
||||
What: /sys/kernel/debug/accel/<parent_device>/monitor_dump
|
||||
Date: Mar 2022
|
||||
@ -253,6 +253,12 @@ Description: Triggers dump of monitor data. The value to trigger the operatio
|
||||
When the write is finished, the user can read the "monitor_dump"
|
||||
blob
|
||||
|
||||
What: /sys/kernel/debug/accel/<parent_device>/server_type
|
||||
Date: Feb 2024
|
||||
KernelVersion: 6.11
|
||||
Contact: trisin@habana.ai
|
||||
Description: Exposes the device's server type, maps to enum hl_server_type.
|
||||
|
||||
What: /sys/kernel/debug/accel/<parent_device>/set_power_state
|
||||
Date: Jan 2019
|
||||
KernelVersion: 5.1
|
||||
|
@ -71,6 +71,10 @@ properties:
|
||||
- const: iahb
|
||||
- const: venci
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
description: phandle to the associated power domain
|
||||
|
||||
resets:
|
||||
minItems: 3
|
||||
|
||||
@ -129,6 +133,7 @@ examples:
|
||||
reset-names = "hdmitx_apb", "hdmitx", "hdmitx_phy";
|
||||
clocks = <&clk_isfr>, <&clk_iahb>, <&clk_venci>;
|
||||
clock-names = "isfr", "iahb", "venci";
|
||||
power-domains = <&pd_vpu>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
|
@ -45,6 +45,19 @@ properties:
|
||||
- const: isfr
|
||||
additionalItems: true
|
||||
|
||||
ddc-i2c-bus:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
deprecated: true
|
||||
description:
|
||||
The HDMI DDC bus can be connected to either a system I2C master or the
|
||||
functionally-reduced I2C master contained in the DWC HDMI. When connected
|
||||
to a system I2C master this property contains a phandle to that I2C
|
||||
master controller.
|
||||
|
||||
This property is deprecated, the system I2C master controller should
|
||||
be referenced through the ddc-i2c-bus property of the HDMI connector
|
||||
node.
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
|
@ -25,8 +25,8 @@ properties:
|
||||
|
||||
reg:
|
||||
enum:
|
||||
- 0x68
|
||||
- 0x0f
|
||||
- 0x68
|
||||
description: |
|
||||
i2c address of the bridge, 0x68 or 0x0f, depending on bootstrap pins
|
||||
|
||||
|
@ -31,14 +31,6 @@ properties:
|
||||
clock-names:
|
||||
maxItems: 2
|
||||
|
||||
ddc-i2c-bus:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
The HDMI DDC bus can be connected to either a system I2C master or the
|
||||
functionally-reduced I2C master contained in the DWC HDMI. When connected
|
||||
to a system I2C master this property contains a phandle to that I2C
|
||||
master controller.
|
||||
|
||||
gpr:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
|
@ -36,6 +36,7 @@ properties:
|
||||
- mediatek,mt8188-disp-aal
|
||||
- mediatek,mt8192-disp-aal
|
||||
- mediatek,mt8195-disp-aal
|
||||
- mediatek,mt8365-disp-aal
|
||||
- const: mediatek,mt8183-disp-aal
|
||||
|
||||
reg:
|
||||
|
@ -24,6 +24,9 @@ properties:
|
||||
- enum:
|
||||
- mediatek,mt8183-disp-ccorr
|
||||
- mediatek,mt8192-disp-ccorr
|
||||
- items:
|
||||
- const: mediatek,mt8365-disp-ccorr
|
||||
- const: mediatek,mt8183-disp-ccorr
|
||||
- items:
|
||||
- enum:
|
||||
- mediatek,mt8186-disp-ccorr
|
||||
|
@ -40,6 +40,7 @@ properties:
|
||||
- mediatek,mt8188-disp-color
|
||||
- mediatek,mt8192-disp-color
|
||||
- mediatek,mt8195-disp-color
|
||||
- mediatek,mt8365-disp-color
|
||||
- const: mediatek,mt8173-disp-color
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -30,6 +30,7 @@ properties:
|
||||
- mediatek,mt8188-disp-dither
|
||||
- mediatek,mt8192-disp-dither
|
||||
- mediatek,mt8195-disp-dither
|
||||
- mediatek,mt8365-disp-dither
|
||||
- const: mediatek,mt8183-disp-dither
|
||||
|
||||
reg:
|
||||
|
@ -31,6 +31,10 @@ properties:
|
||||
- enum:
|
||||
- mediatek,mt6795-dpi
|
||||
- const: mediatek,mt8183-dpi
|
||||
- items:
|
||||
- enum:
|
||||
- mediatek,mt8365-dpi
|
||||
- const: mediatek,mt8192-dpi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
@ -37,6 +37,7 @@ properties:
|
||||
- items:
|
||||
- enum:
|
||||
- mediatek,mt8195-dsi
|
||||
- mediatek,mt8365-dsi
|
||||
- const: mediatek,mt8183-dsi
|
||||
|
||||
reg:
|
||||
|
@ -35,6 +35,7 @@ properties:
|
||||
- mediatek,mt8188-disp-gamma
|
||||
- mediatek,mt8192-disp-gamma
|
||||
- mediatek,mt8195-disp-gamma
|
||||
- mediatek,mt8365-disp-gamma
|
||||
- const: mediatek,mt8183-disp-gamma
|
||||
- items:
|
||||
- enum:
|
||||
|
@ -44,6 +44,7 @@ properties:
|
||||
- items:
|
||||
- enum:
|
||||
- mediatek,mt8186-disp-ovl
|
||||
- mediatek,mt8365-disp-ovl
|
||||
- const: mediatek,mt8192-disp-ovl
|
||||
|
||||
reg:
|
||||
|
@ -45,6 +45,7 @@ properties:
|
||||
- enum:
|
||||
- mediatek,mt8186-disp-rdma
|
||||
- mediatek,mt8192-disp-rdma
|
||||
- mediatek,mt8365-disp-rdma
|
||||
- const: mediatek,mt8183-disp-rdma
|
||||
|
||||
reg:
|
||||
|
@ -32,6 +32,7 @@ properties:
|
||||
- qcom,sm6125-dsi-ctrl
|
||||
- qcom,sm6350-dsi-ctrl
|
||||
- qcom,sm6375-dsi-ctrl
|
||||
- qcom,sm7150-dsi-ctrl
|
||||
- qcom,sm8150-dsi-ctrl
|
||||
- qcom,sm8250-dsi-ctrl
|
||||
- qcom,sm8350-dsi-ctrl
|
||||
@ -162,6 +163,22 @@ properties:
|
||||
items:
|
||||
enum: [ 0, 1, 2, 3 ]
|
||||
|
||||
qcom,te-source:
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
description:
|
||||
Specifies the source of vsync signal from the panel used for
|
||||
tearing elimination.
|
||||
default: mdp_vsync_p
|
||||
enum:
|
||||
- mdp_vsync_p
|
||||
- mdp_vsync_s
|
||||
- mdp_vsync_e
|
||||
- timer0
|
||||
- timer1
|
||||
- timer2
|
||||
- timer3
|
||||
- timer4
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@1
|
||||
@ -332,6 +349,7 @@ allOf:
|
||||
enum:
|
||||
- qcom,sc7180-dsi-ctrl
|
||||
- qcom,sc7280-dsi-ctrl
|
||||
- qcom,sm7150-dsi-ctrl
|
||||
- qcom,sm8150-dsi-ctrl
|
||||
- qcom,sm8250-dsi-ctrl
|
||||
- qcom,sm8350-dsi-ctrl
|
||||
@ -452,6 +470,7 @@ examples:
|
||||
dsi0_out: endpoint {
|
||||
remote-endpoint = <&sn65dsi86_in>;
|
||||
data-lanes = <0 1 2 3>;
|
||||
qcom,te-source = "mdp_vsync_e";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -16,6 +16,7 @@ properties:
|
||||
compatible:
|
||||
enum:
|
||||
- qcom,dsi-phy-28nm-8226
|
||||
- qcom,dsi-phy-28nm-8937
|
||||
- qcom,dsi-phy-28nm-8960
|
||||
- qcom,dsi-phy-28nm-hpm
|
||||
- qcom,dsi-phy-28nm-hpm-fam-b
|
||||
|
@ -23,6 +23,9 @@ properties:
|
||||
- items:
|
||||
- pattern: '^qcom,adreno-gmu-[67][0-9][0-9]\.[0-9]$'
|
||||
- const: qcom,adreno-gmu
|
||||
- items:
|
||||
- pattern: '^qcom,adreno-gmu-x[1-9][0-9][0-9]\.[0-9]$'
|
||||
- const: qcom,adreno-gmu
|
||||
- const: qcom,adreno-gmu-wrapper
|
||||
|
||||
reg:
|
||||
@ -225,6 +228,7 @@ allOf:
|
||||
- qcom,adreno-gmu-730.1
|
||||
- qcom,adreno-gmu-740.1
|
||||
- qcom,adreno-gmu-750.1
|
||||
- qcom,adreno-gmu-x185.1
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
|
@ -10,6 +10,18 @@ title: Adreno or Snapdragon GPUs
|
||||
maintainers:
|
||||
- Rob Clark <robdclark@gmail.com>
|
||||
|
||||
# dtschema does not select nodes based on pattern+const, so add custom select
|
||||
# as a work-around:
|
||||
select:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,adreno
|
||||
- amd,imageon
|
||||
required:
|
||||
- compatible
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
oneOf:
|
||||
@ -17,7 +29,7 @@ properties:
|
||||
The driver is parsing the compat string for Adreno to
|
||||
figure out the chip-id.
|
||||
items:
|
||||
- pattern: '^qcom,adreno-[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]$'
|
||||
- pattern: '^qcom,adreno-[0-9a-f]{8}$'
|
||||
- const: qcom,adreno
|
||||
- description: |
|
||||
The driver is parsing the compat string for Adreno to
|
||||
@ -32,9 +44,13 @@ properties:
|
||||
- pattern: '^amd,imageon-200\.[0-1]$'
|
||||
- const: amd,imageon
|
||||
|
||||
clocks: true
|
||||
clocks:
|
||||
minItems: 2
|
||||
maxItems: 7
|
||||
|
||||
clock-names: true
|
||||
clock-names:
|
||||
minItems: 2
|
||||
maxItems: 7
|
||||
|
||||
reg:
|
||||
minItems: 1
|
||||
@ -42,7 +58,10 @@ properties:
|
||||
|
||||
reg-names:
|
||||
minItems: 1
|
||||
maxItems: 3
|
||||
items:
|
||||
- const: kgsl_3d0_reg_memory
|
||||
- const: cx_mem
|
||||
- const: cx_dbgc
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
@ -25,6 +25,7 @@ properties:
|
||||
- qcom,msm8226-mdp5
|
||||
- qcom,msm8916-mdp5
|
||||
- qcom,msm8917-mdp5
|
||||
- qcom,msm8937-mdp5
|
||||
- qcom,msm8953-mdp5
|
||||
- qcom,msm8974-mdp5
|
||||
- qcom,msm8976-mdp5
|
||||
|
@ -126,6 +126,7 @@ patternProperties:
|
||||
- qcom,dsi-phy-14nm-8953
|
||||
- qcom,dsi-phy-20nm
|
||||
- qcom,dsi-phy-28nm-8226
|
||||
- qcom,dsi-phy-28nm-8937
|
||||
- qcom,dsi-phy-28nm-hpm
|
||||
- qcom,dsi-phy-28nm-hpm-fam-b
|
||||
- qcom,dsi-phy-28nm-lp
|
||||
|
@ -0,0 +1,143 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/msm/qcom,sm7150-dpu.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcomm SM7150 Display Processing Unit (DPU)
|
||||
|
||||
maintainers:
|
||||
- Danila Tikhonov <danila@jiaxyga.com>
|
||||
|
||||
$ref: /schemas/display/msm/dpu-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm7150-dpu
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: Address offset and size for mdp register set
|
||||
- description: Address offset and size for vbif register set
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: mdp
|
||||
- const: vbif
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Display hf axi clock
|
||||
- description: Display ahb clock
|
||||
- description: Display rotator clock
|
||||
- description: Display lut clock
|
||||
- description: Display core clock
|
||||
- description: Display vsync clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: bus
|
||||
- const: iface
|
||||
- const: rot
|
||||
- const: lut
|
||||
- const: core
|
||||
- const: vsync
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- reg-names
|
||||
- clocks
|
||||
- clock-names
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/qcom,rpmhpd.h>
|
||||
|
||||
display-controller@ae01000 {
|
||||
compatible = "qcom,sm7150-dpu";
|
||||
reg = <0x0ae01000 0x8f000>,
|
||||
<0x0aeb0000 0x2008>;
|
||||
reg-names = "mdp", "vbif";
|
||||
|
||||
clocks = <&gcc_disp_hf_axi_clk>,
|
||||
<&dispcc_mdss_ahb_clk>,
|
||||
<&dispcc_mdss_rot_clk>,
|
||||
<&dispcc_mdss_mdp_lut_clk>,
|
||||
<&dispcc_mdss_mdp_clk>,
|
||||
<&dispcc_mdss_vsync_clk>;
|
||||
clock-names = "bus",
|
||||
"iface",
|
||||
"rot",
|
||||
"lut",
|
||||
"core",
|
||||
"vsync";
|
||||
|
||||
assigned-clocks = <&dispcc_mdss_vsync_clk>;
|
||||
assigned-clock-rates = <19200000>;
|
||||
|
||||
operating-points-v2 = <&mdp_opp_table>;
|
||||
power-domains = <&rpmhpd RPMHPD_CX>;
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dpu_intf1_out: endpoint {
|
||||
remote-endpoint = <&mdss_dsi0_in>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dpu_intf2_out: endpoint {
|
||||
remote-endpoint = <&mdss_dsi1_in>;
|
||||
};
|
||||
};
|
||||
|
||||
port@2 {
|
||||
reg = <2>;
|
||||
dpu_intf0_out: endpoint {
|
||||
remote-endpoint = <&dp_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mdp_opp_table: opp-table {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-19200000 {
|
||||
opp-hz = /bits/ 64 <19200000>;
|
||||
required-opps = <&rpmhpd_opp_min_svs>;
|
||||
};
|
||||
|
||||
opp-200000000 {
|
||||
opp-hz = /bits/ 64 <200000000>;
|
||||
required-opps = <&rpmhpd_opp_low_svs>;
|
||||
};
|
||||
|
||||
opp-300000000 {
|
||||
opp-hz = /bits/ 64 <300000000>;
|
||||
required-opps = <&rpmhpd_opp_svs>;
|
||||
};
|
||||
|
||||
opp-344000000 {
|
||||
opp-hz = /bits/ 64 <344000000>;
|
||||
required-opps = <&rpmhpd_opp_svs_l1>;
|
||||
};
|
||||
|
||||
opp-430000000 {
|
||||
opp-hz = /bits/ 64 <430000000>;
|
||||
required-opps = <&rpmhpd_opp_nom>;
|
||||
};
|
||||
};
|
||||
};
|
||||
...
|
@ -0,0 +1,458 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/msm/qcom,sm7150-mdss.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcomm SM7150 Display MDSS
|
||||
|
||||
maintainers:
|
||||
- Danila Tikhonov <danila@jiaxyga.com>
|
||||
|
||||
description:
|
||||
SM7150 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
|
||||
DPU display controller, DSI and DP interfaces etc.
|
||||
|
||||
$ref: /schemas/display/msm/mdss-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm7150-mdss
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: Display ahb clock from gcc
|
||||
- description: Display hf axi clock
|
||||
- description: Display sf axi clock
|
||||
- description: Display core clock
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: iface
|
||||
- const: bus
|
||||
- const: nrt_bus
|
||||
- const: core
|
||||
|
||||
iommus:
|
||||
maxItems: 1
|
||||
|
||||
interconnects:
|
||||
items:
|
||||
- description: Interconnect path from mdp0 port to the data bus
|
||||
- description: Interconnect path from mdp1 port to the data bus
|
||||
- description: Interconnect path from CPU to the reg bus
|
||||
|
||||
interconnect-names:
|
||||
items:
|
||||
- const: mdp0-mem
|
||||
- const: mdp1-mem
|
||||
- const: cpu-cfg
|
||||
|
||||
patternProperties:
|
||||
"^display-controller@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm7150-dpu
|
||||
|
||||
"^displayport-controller@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,sm7150-dp
|
||||
|
||||
"^dsi@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: qcom,sm7150-dsi-ctrl
|
||||
- const: qcom,mdss-dsi-ctrl
|
||||
|
||||
"^phy@[0-9a-f]+$":
|
||||
type: object
|
||||
additionalProperties: true
|
||||
properties:
|
||||
compatible:
|
||||
const: qcom,dsi-phy-10nm
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,rpmh.h>
|
||||
#include <dt-bindings/interconnect/qcom,icc.h>
|
||||
#include <dt-bindings/interconnect/qcom,sm7150-rpmh.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/power/qcom,rpmhpd.h>
|
||||
|
||||
display-subsystem@ae00000 {
|
||||
compatible = "qcom,sm7150-mdss";
|
||||
reg = <0x0ae00000 0x1000>;
|
||||
reg-names = "mdss";
|
||||
|
||||
power-domains = <&dispcc_mdss_gdsc>;
|
||||
|
||||
clocks = <&dispcc_mdss_ahb_clk>,
|
||||
<&gcc_disp_hf_axi_clk>,
|
||||
<&gcc_disp_sf_axi_clk>,
|
||||
<&dispcc_mdss_mdp_clk>;
|
||||
clock-names = "iface",
|
||||
"bus",
|
||||
"nrt_bus",
|
||||
"core";
|
||||
|
||||
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
interconnects = <&mmss_noc MASTER_MDP_PORT0 QCOM_ICC_TAG_ALWAYS
|
||||
&mc_virt SLAVE_EBI_CH0 QCOM_ICC_TAG_ALWAYS>,
|
||||
<&mmss_noc MASTER_MDP_PORT1 QCOM_ICC_TAG_ALWAYS
|
||||
&mc_virt SLAVE_EBI_CH0 QCOM_ICC_TAG_ALWAYS>,
|
||||
<&gem_noc MASTER_AMPSS_M0 QCOM_ICC_TAG_ACTIVE_ONLY
|
||||
&config_noc SLAVE_DISPLAY_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
|
||||
interconnect-names = "mdp0-mem",
|
||||
"mdp1-mem",
|
||||
"cpu-cfg";
|
||||
|
||||
iommus = <&apps_smmu 0x800 0x440>;
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
display-controller@ae01000 {
|
||||
compatible = "qcom,sm7150-dpu";
|
||||
reg = <0x0ae01000 0x8f000>,
|
||||
<0x0aeb0000 0x2008>;
|
||||
reg-names = "mdp", "vbif";
|
||||
|
||||
clocks = <&gcc_disp_hf_axi_clk>,
|
||||
<&dispcc_mdss_ahb_clk>,
|
||||
<&dispcc_mdss_rot_clk>,
|
||||
<&dispcc_mdss_mdp_lut_clk>,
|
||||
<&dispcc_mdss_mdp_clk>,
|
||||
<&dispcc_mdss_vsync_clk>;
|
||||
clock-names = "bus",
|
||||
"iface",
|
||||
"rot",
|
||||
"lut",
|
||||
"core",
|
||||
"vsync";
|
||||
|
||||
assigned-clocks = <&dispcc_mdss_vsync_clk>;
|
||||
assigned-clock-rates = <19200000>;
|
||||
|
||||
operating-points-v2 = <&mdp_opp_table>;
|
||||
power-domains = <&rpmhpd RPMHPD_CX>;
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dpu_intf1_out: endpoint {
|
||||
remote-endpoint = <&mdss_dsi0_in>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dpu_intf2_out: endpoint {
|
||||
remote-endpoint = <&mdss_dsi1_in>;
|
||||
};
|
||||
};
|
||||
|
||||
port@2 {
|
||||
reg = <2>;
|
||||
dpu_intf0_out: endpoint {
|
||||
remote-endpoint = <&dp_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mdp_opp_table: opp-table {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-19200000 {
|
||||
opp-hz = /bits/ 64 <19200000>;
|
||||
required-opps = <&rpmhpd_opp_min_svs>;
|
||||
};
|
||||
|
||||
opp-200000000 {
|
||||
opp-hz = /bits/ 64 <200000000>;
|
||||
required-opps = <&rpmhpd_opp_low_svs>;
|
||||
};
|
||||
|
||||
opp-300000000 {
|
||||
opp-hz = /bits/ 64 <300000000>;
|
||||
required-opps = <&rpmhpd_opp_svs>;
|
||||
};
|
||||
|
||||
opp-344000000 {
|
||||
opp-hz = /bits/ 64 <344000000>;
|
||||
required-opps = <&rpmhpd_opp_svs_l1>;
|
||||
};
|
||||
|
||||
opp-430000000 {
|
||||
opp-hz = /bits/ 64 <430000000>;
|
||||
required-opps = <&rpmhpd_opp_nom>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dsi@ae94000 {
|
||||
compatible = "qcom,sm7150-dsi-ctrl",
|
||||
"qcom,mdss-dsi-ctrl";
|
||||
reg = <0x0ae94000 0x400>;
|
||||
reg-names = "dsi_ctrl";
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <4>;
|
||||
|
||||
clocks = <&dispcc_mdss_byte0_clk>,
|
||||
<&dispcc_mdss_byte0_intf_clk>,
|
||||
<&dispcc_mdss_pclk0_clk>,
|
||||
<&dispcc_mdss_esc0_clk>,
|
||||
<&dispcc_mdss_ahb_clk>,
|
||||
<&gcc_disp_hf_axi_clk>;
|
||||
clock-names = "byte",
|
||||
"byte_intf",
|
||||
"pixel",
|
||||
"core",
|
||||
"iface",
|
||||
"bus";
|
||||
|
||||
assigned-clocks = <&dispcc_mdss_byte0_clk_src>,
|
||||
<&dispcc_mdss_pclk0_clk_src>;
|
||||
assigned-clock-parents = <&mdss_dsi0_phy 0>,
|
||||
<&mdss_dsi0_phy 1>;
|
||||
|
||||
operating-points-v2 = <&dsi_opp_table>;
|
||||
power-domains = <&rpmhpd RPMHPD_CX>;
|
||||
|
||||
phys = <&mdss_dsi0_phy>;
|
||||
phy-names = "dsi";
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
mdss_dsi0_in: endpoint {
|
||||
remote-endpoint = <&dpu_intf1_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
mdss_dsi0_out: endpoint {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dsi_opp_table: opp-table {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-180000000 {
|
||||
opp-hz = /bits/ 64 <180000000>;
|
||||
required-opps = <&rpmhpd_opp_low_svs>;
|
||||
};
|
||||
|
||||
opp-275000000 {
|
||||
opp-hz = /bits/ 64 <275000000>;
|
||||
required-opps = <&rpmhpd_opp_svs>;
|
||||
};
|
||||
|
||||
opp-358000000 {
|
||||
opp-hz = /bits/ 64 <358000000>;
|
||||
required-opps = <&rpmhpd_opp_svs_l1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mdss_dsi0_phy: phy@ae94400 {
|
||||
compatible = "qcom,dsi-phy-10nm";
|
||||
reg = <0x0ae94400 0x200>,
|
||||
<0x0ae94600 0x280>,
|
||||
<0x0ae94a00 0x1e0>;
|
||||
reg-names = "dsi_phy",
|
||||
"dsi_phy_lane",
|
||||
"dsi_pll";
|
||||
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
clocks = <&dispcc_mdss_ahb_clk>,
|
||||
<&rpmhcc RPMH_CXO_CLK>;
|
||||
clock-names = "iface", "ref";
|
||||
vdds-supply = <&vdda_mipi_dsi0_pll>;
|
||||
};
|
||||
|
||||
dsi@ae96000 {
|
||||
compatible = "qcom,sm7150-dsi-ctrl",
|
||||
"qcom,mdss-dsi-ctrl";
|
||||
reg = <0x0ae96000 0x400>;
|
||||
reg-names = "dsi_ctrl";
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <5>;
|
||||
|
||||
clocks = <&dispcc_mdss_byte1_clk>,
|
||||
<&dispcc_mdss_byte1_intf_clk>,
|
||||
<&dispcc_mdss_pclk1_clk>,
|
||||
<&dispcc_mdss_esc1_clk>,
|
||||
<&dispcc_mdss_ahb_clk>,
|
||||
<&gcc_disp_hf_axi_clk>;
|
||||
clock-names = "byte",
|
||||
"byte_intf",
|
||||
"pixel",
|
||||
"core",
|
||||
"iface",
|
||||
"bus";
|
||||
|
||||
assigned-clocks = <&dispcc_mdss_byte1_clk_src>,
|
||||
<&dispcc_mdss_pclk1_clk_src>;
|
||||
assigned-clock-parents = <&mdss_dsi1_phy 0>,
|
||||
<&mdss_dsi1_phy 1>;
|
||||
|
||||
operating-points-v2 = <&dsi_opp_table>;
|
||||
power-domains = <&rpmhpd RPMHPD_CX>;
|
||||
|
||||
phys = <&mdss_dsi1_phy>;
|
||||
phy-names = "dsi";
|
||||
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
mdss_dsi1_in: endpoint {
|
||||
remote-endpoint = <&dpu_intf2_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
mdss_dsi1_out: endpoint {
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mdss_dsi1_phy: phy@ae96400 {
|
||||
compatible = "qcom,dsi-phy-10nm";
|
||||
reg = <0x0ae96400 0x200>,
|
||||
<0x0ae96600 0x280>,
|
||||
<0x0ae96a00 0x1e0>;
|
||||
reg-names = "dsi_phy",
|
||||
"dsi_phy_lane",
|
||||
"dsi_pll";
|
||||
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
clocks = <&dispcc_mdss_ahb_clk>,
|
||||
<&rpmhcc RPMH_CXO_CLK>;
|
||||
clock-names = "iface", "ref";
|
||||
vdds-supply = <&vdda_mipi_dsi1_pll>;
|
||||
};
|
||||
|
||||
displayport-controller@ae90000 {
|
||||
compatible = "qcom,sm7150-dp";
|
||||
reg = <0xae90000 0x200>,
|
||||
<0xae90200 0x200>,
|
||||
<0xae90400 0xc00>,
|
||||
<0xae91000 0x400>,
|
||||
<0xae91400 0x400>;
|
||||
|
||||
interrupt-parent = <&mdss>;
|
||||
interrupts = <12>;
|
||||
|
||||
clocks = <&dispcc_mdss_ahb_clk>,
|
||||
<&dispcc_mdss_dp_aux_clk>,
|
||||
<&dispcc_mdss_dp_link_clk>,
|
||||
<&dispcc_mdss_dp_link_intf_clk>,
|
||||
<&dispcc_mdss_dp_pixel_clk>;
|
||||
clock-names = "core_iface",
|
||||
"core_aux",
|
||||
"ctrl_link",
|
||||
"ctrl_link_iface",
|
||||
"stream_pixel";
|
||||
|
||||
assigned-clocks = <&dispcc_mdss_dp_link_clk_src>,
|
||||
<&dispcc_mdss_dp_pixel_clk_src>;
|
||||
assigned-clock-parents = <&dp_phy 0>,
|
||||
<&dp_phy 1>;
|
||||
|
||||
operating-points-v2 = <&dp_opp_table>;
|
||||
power-domains = <&rpmhpd RPMHPD_CX>;
|
||||
|
||||
phys = <&dp_phy>;
|
||||
phy-names = "dp";
|
||||
|
||||
#sound-dai-cells = <0>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
dp_in: endpoint {
|
||||
remote-endpoint = <&dpu_intf0_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
dp_out: endpoint {
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
dp_opp_table: opp-table {
|
||||
compatible = "operating-points-v2";
|
||||
|
||||
opp-160000000 {
|
||||
opp-hz = /bits/ 64 <160000000>;
|
||||
required-opps = <&rpmhpd_opp_low_svs>;
|
||||
};
|
||||
|
||||
opp-270000000 {
|
||||
opp-hz = /bits/ 64 <270000000>;
|
||||
required-opps = <&rpmhpd_opp_svs>;
|
||||
};
|
||||
|
||||
opp-540000000 {
|
||||
opp-hz = /bits/ 64 <540000000>;
|
||||
required-opps = <&rpmhpd_opp_svs_l1>;
|
||||
};
|
||||
|
||||
opp-810000000 {
|
||||
opp-hz = /bits/ 64 <810000000>;
|
||||
required-opps = <&rpmhpd_opp_nom>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
...
|
@ -32,8 +32,6 @@ properties:
|
||||
- innolux,hj110iz-01a
|
||||
# STARRY 2081101QFH032011-53G 10.1" WUXGA TFT LCD panel
|
||||
- starry,2081101qfh032011-53g
|
||||
# STARRY himax83102-j02 10.51" WUXGA TFT LCD panel
|
||||
- starry,himax83102-j02
|
||||
# STARRY ili9882t 10.51" WUXGA TFT LCD panel
|
||||
- starry,ili9882t
|
||||
|
||||
|
@ -0,0 +1,77 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/himax,hx83102.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Himax HX83102 MIPI-DSI LCD panel controller
|
||||
|
||||
maintainers:
|
||||
- Cong Yang <yangcong5@huaqin.corp-partner.google.com>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
# Boe nv110wum-l60 11.0" WUXGA TFT LCD panel
|
||||
- boe,nv110wum-l60
|
||||
# IVO t109nw41 11.0" WUXGA TFT LCD panel
|
||||
- ivo,t109nw41
|
||||
# STARRY himax83102-j02 10.51" WUXGA TFT LCD panel
|
||||
- starry,himax83102-j02
|
||||
- const: himax,hx83102
|
||||
|
||||
reg:
|
||||
description: the virtual channel number of a DSI peripheral
|
||||
|
||||
enable-gpios:
|
||||
description: a GPIO spec for the enable pin
|
||||
|
||||
pp1800-supply:
|
||||
description: core voltage supply
|
||||
|
||||
avdd-supply:
|
||||
description: phandle of the regulator that provides positive voltage
|
||||
|
||||
avee-supply:
|
||||
description: phandle of the regulator that provides negative voltage
|
||||
|
||||
backlight: true
|
||||
port: true
|
||||
rotation: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- enable-gpios
|
||||
- pp1800-supply
|
||||
- avdd-supply
|
||||
- avee-supply
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
dsi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
panel@0 {
|
||||
compatible = "starry,himax83102-j02", "himax,hx83102";
|
||||
reg = <0>;
|
||||
enable-gpios = <&pio 45 0>;
|
||||
avdd-supply = <&ppvarn_lcd>;
|
||||
avee-supply = <&ppvarp_lcd>;
|
||||
pp1800-supply = <&pp1800_lcd>;
|
||||
backlight = <&backlight_lcd0>;
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&dsi_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -0,0 +1,63 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/ilitek,ili9806e.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Ilitek ILI9806E based MIPI-DSI panels
|
||||
|
||||
maintainers:
|
||||
- Michael Walle <mwalle@kernel.org>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- ortustech,com35h3p70ulc
|
||||
- const: ilitek,ili9806e
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
vdd-supply: true
|
||||
vccio-supply: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- vdd-supply
|
||||
- vccio-supply
|
||||
- reset-gpios
|
||||
- backlight
|
||||
- port
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
dsi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
panel@0 {
|
||||
compatible = "ortustech,com35h3p70ulc", "ilitek,ili9806e";
|
||||
reg = <0>;
|
||||
vdd-supply = <®_vdd_panel>;
|
||||
vccio-supply = <®_vccio_panel>;
|
||||
reset-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
|
||||
backlight = <&backlight>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&dsi_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -17,6 +17,7 @@ properties:
|
||||
items:
|
||||
- enum:
|
||||
- chongzhou,cz101b4001
|
||||
- kingdisplay,kd101ne3-40ti
|
||||
- radxa,display-10hd-ad001
|
||||
- radxa,display-8hd-ad002
|
||||
- const: jadard,jd9365da-h3
|
||||
|
@ -0,0 +1,117 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/panel-edp-legacy.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Legacy eDP panels from before the "edp-panel" compatible
|
||||
|
||||
maintainers:
|
||||
- Douglas Anderson <dianders@chromium.org>
|
||||
|
||||
description: |
|
||||
This binding file is a collection of eDP panels from before the generic
|
||||
"edp-panel" compatible was introduced. It is kept around to support old
|
||||
dts files. The only reason one might add a new panel here instead of using
|
||||
the generic "edp-panel" is if it needed to be used on an eDP controller
|
||||
that doesn't support the generic "edp-panel" compatible, but it should be
|
||||
a strong preference to add the generic "edp-panel" compatible instead.
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
# compatible must be listed in alphabetical order, ordered by compatible.
|
||||
# The description in the comment is mandatory for each compatible.
|
||||
|
||||
# AU Optronics Corporation 10.1" WSVGA TFT LCD panel
|
||||
- auo,b101ean01
|
||||
# AUO B116XAK01 eDP TFT LCD panel
|
||||
- auo,b116xa01
|
||||
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
|
||||
- auo,b133htn01
|
||||
# AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
|
||||
- auo,b133xtn01
|
||||
# BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
|
||||
- boe,nv101wxmn51
|
||||
# BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
|
||||
- boe,nv110wtm-n61
|
||||
# BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
|
||||
- boe,nv133fhm-n61
|
||||
# BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
|
||||
- boe,nv133fhm-n62
|
||||
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
|
||||
- boe,nv140fhmn49
|
||||
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
|
||||
- innolux,n116bca-ea1
|
||||
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
|
||||
- innolux,n116bge
|
||||
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
|
||||
- innolux,n125hce-gn1
|
||||
# Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
|
||||
- innolux,p120zdg-bf1
|
||||
# King & Display KD116N21-30NV-A010 eDP TFT LCD panel
|
||||
- kingdisplay,kd116n21-30nv-a010
|
||||
# LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
|
||||
- lg,lp079qx1-sp0v
|
||||
# LG 9.7" (2048x1536 pixels) TFT LCD panel
|
||||
- lg,lp097qx1-spa1
|
||||
# LG 12.0" (1920x1280 pixels) TFT LCD panel
|
||||
- lg,lp120up1
|
||||
# LG 12.9" (2560x1700 pixels) TFT LCD panel
|
||||
- lg,lp129qe
|
||||
# NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
|
||||
- neweast,wjfh116008a
|
||||
# Samsung 12.2" (2560x1600 pixels) TFT LCD panel
|
||||
- samsung,lsn122dl01-c01
|
||||
# Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
|
||||
- samsung,ltn140at29-301
|
||||
# Sharp LD-D5116Z01B 12.3" WUXGA+ eDP panel
|
||||
- sharp,ld-d5116z01b
|
||||
# Sharp 12.3" (2400x1600 pixels) TFT LCD panel
|
||||
- sharp,lq123p1jx31
|
||||
|
||||
backlight: true
|
||||
ddc-i2c-bus: true
|
||||
enable-gpios: true
|
||||
panel-timing: true
|
||||
port: true
|
||||
power-supply: true
|
||||
no-hpd: true
|
||||
hpd-gpios: true
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- power-supply
|
||||
|
||||
examples:
|
||||
- |
|
||||
panel: panel {
|
||||
compatible = "innolux,n116bge";
|
||||
power-supply = <&panel_regulator>;
|
||||
backlight = <&backlight>;
|
||||
|
||||
panel-timing {
|
||||
clock-frequency = <74250000>;
|
||||
hactive = <1366>;
|
||||
hfront-porch = <136>;
|
||||
hback-porch = <60>;
|
||||
hsync-len = <30>;
|
||||
hsync-active = <0>;
|
||||
vactive = <768>;
|
||||
vfront-porch = <8>;
|
||||
vback-porch = <12>;
|
||||
vsync-len = <12>;
|
||||
vsync-active = <0>;
|
||||
};
|
||||
|
||||
port {
|
||||
panel_in_edp: endpoint {
|
||||
remote-endpoint = <&edp_out_panel>;
|
||||
};
|
||||
};
|
||||
};
|
@ -50,6 +50,12 @@ description: |
|
||||
| Command or data |
|
||||
|<D7><D6><D5><D4><D3><D2><D1><D0>|
|
||||
|
||||
The standard defines one pixel format for type C: RGB111. The industry
|
||||
however has decided to provide the type A/B interface pixel formats also on
|
||||
the Type C interface and most common among these are RGB565 and RGB666.
|
||||
The MIPI DCS command set_address_mode (36h) has one bit that controls RGB/BGR
|
||||
order. This gives each supported RGB format a BGR variant.
|
||||
|
||||
The panel resolution is specified using the panel-timing node properties
|
||||
hactive (width) and vactive (height). The other mandatory panel-timing
|
||||
properties should be set to zero except clock-frequency which can be
|
||||
@ -93,6 +99,28 @@ properties:
|
||||
|
||||
spi-3wire: true
|
||||
|
||||
format:
|
||||
description: >
|
||||
Pixel format in bit order as going on the wire:
|
||||
* `x2r1g1b1r1g1b1` - RGB111, 2 pixels per byte
|
||||
* `x2b1g1r1b1g1r1` - BGR111, 2 pixels per byte
|
||||
* `x1r1g1b1x1r1g1b1` - RGB111, 2 pixels per byte
|
||||
* `x1b1g1r1x1b1g1r1` - BGR111, 2 pixels per byte
|
||||
* `r5g6b5` - RGB565, 2 bytes
|
||||
* `b5g6r5` - BGR565, 2 bytes
|
||||
* `r6x2g6x2b6x2` - RGB666, 3 bytes
|
||||
* `b6x2g6x2r6x2` - BGR666, 3 bytes
|
||||
enum:
|
||||
- x2r1g1b1r1g1b1
|
||||
- x2b1g1r1b1g1r1
|
||||
- x1r1g1b1x1r1g1b1
|
||||
- x1b1g1r1x1b1g1r1
|
||||
- r5g6b5
|
||||
- b5g6r5
|
||||
- r6x2g6x2b6x2
|
||||
- b6x2g6x2r6x2
|
||||
default: r5g6b5
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
@ -119,6 +147,8 @@ examples:
|
||||
reset-gpios = <&gpio 25 GPIO_ACTIVE_HIGH>;
|
||||
write-only;
|
||||
|
||||
format = "r5g6b5";
|
||||
|
||||
backlight = <&backlight>;
|
||||
|
||||
width-mm = <35>;
|
||||
|
@ -46,6 +46,8 @@ properties:
|
||||
- lg,ld070wx3-sl01
|
||||
# LG Corporation 5" HD TFT LCD panel
|
||||
- lg,lh500wx1-sd03
|
||||
# Lincoln LCD197 5" 1080x1920 LCD panel
|
||||
- lincolntech,lcd197
|
||||
# One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
|
||||
- osddisplays,osd101t2587-53ts
|
||||
# Panasonic 10" WUXGA TFT LCD panel
|
||||
|
@ -41,6 +41,12 @@ properties:
|
||||
- auo,g190ean01
|
||||
# Kaohsiung Opto-Electronics Inc. 10.1" WUXGA (1920 x 1200) LVDS TFT LCD panel
|
||||
- koe,tx26d202vm0bwa
|
||||
# Lincoln Technology Solutions, LCD185-101CT 10.1" TFT 1920x1200
|
||||
- lincolntech,lcd185-101ct
|
||||
# Microtips Technology MF-101HIEBCAF0 10.1" WUXGA (1920x1200) TFT LCD panel
|
||||
- microtips,mf-101hiebcaf0
|
||||
# Microtips Technology MF-103HIEB0GA0 10.25" 1920x720 TFT LCD panel
|
||||
- microtips,mf-103hieb0ga0
|
||||
# NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
|
||||
- nlt,nl192108ac18-02d
|
||||
|
||||
|
@ -41,28 +41,18 @@ properties:
|
||||
- ampire,am800600p5tmqw-tb8h
|
||||
# AU Optronics Corporation 10.1" WSVGA TFT LCD panel
|
||||
- auo,b101aw03
|
||||
# AU Optronics Corporation 10.1" WSVGA TFT LCD panel
|
||||
- auo,b101ean01
|
||||
# AU Optronics Corporation 10.1" WXGA TFT LCD panel
|
||||
- auo,b101xtn01
|
||||
# AUO B116XAK01 eDP TFT LCD panel
|
||||
- auo,b116xa01
|
||||
# AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
|
||||
- auo,b116xw03
|
||||
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
|
||||
- auo,b133han05
|
||||
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
|
||||
- auo,b133htn01
|
||||
# AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
|
||||
- auo,b133xtn01
|
||||
# AU Optronics Corporation 14.0" FHD (1920x1080) color TFT-LCD panel
|
||||
- auo,b140han06
|
||||
# AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
|
||||
- auo,g070vvn01
|
||||
# AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
|
||||
- auo,g101evn010
|
||||
# AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
|
||||
- auo,g104sn02
|
||||
# AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
|
||||
- auo,g104stn01
|
||||
# AU Optronics Corporation 12.1" (1280x800) TFT LCD panel
|
||||
- auo,g121ean01
|
||||
# AU Optronics Corporation 15.6" (1366x768) TFT LCD panel
|
||||
@ -81,16 +71,6 @@ properties:
|
||||
- boe,ev121wxm-n10-1850
|
||||
# BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
|
||||
- boe,hv070wsa-100
|
||||
# BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
|
||||
- boe,nv101wxmn51
|
||||
# BOE NV133FHM-N61 13.3" FHD (1920x1080) TFT LCD Panel
|
||||
- boe,nv110wtm-n61
|
||||
# BOE NV110WTM-N61 11.0" 2160x1440 TFT LCD Panel
|
||||
- boe,nv133fhm-n61
|
||||
# BOE NV133FHM-N62 13.3" FHD (1920x1080) TFT LCD Panel
|
||||
- boe,nv133fhm-n62
|
||||
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
|
||||
- boe,nv140fhmn49
|
||||
# Crystal Clear Technology CMT430B19N00 4.3" 480x272 TFT-LCD panel
|
||||
- cct,cmt430b19n00
|
||||
# CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
|
||||
@ -172,8 +152,6 @@ properties:
|
||||
- hannstar,hsd100pxn1
|
||||
# Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
|
||||
- hit,tx23d38vm0caa
|
||||
# InfoVision Optoelectronics M133NWF4 R0 13.3" FHD (1920x1080) TFT LCD panel
|
||||
- ivo,m133nwf4-r0
|
||||
# Innolux AT043TN24 4.3" WQVGA TFT LCD panel
|
||||
- innolux,at043tn24
|
||||
# Innolux AT070TN92 7.0" WQVGA TFT LCD panel
|
||||
@ -192,22 +170,12 @@ properties:
|
||||
- innolux,g121x1-l03
|
||||
# Innolux Corporation 12.1" G121XCE-L01 XGA (1024x768) TFT LCD panel
|
||||
- innolux,g121xce-l01
|
||||
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
|
||||
- innolux,n116bca-ea1
|
||||
# Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
|
||||
- innolux,n116bge
|
||||
# InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel
|
||||
- innolux,n125hce-gn1
|
||||
# InnoLux 15.6" FHD (1920x1080) TFT LCD panel
|
||||
- innolux,g156hce-l01
|
||||
# InnoLux 15.6" WXGA TFT LCD panel
|
||||
- innolux,n156bge-l21
|
||||
# Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
|
||||
- innolux,p120zdg-bf1
|
||||
# Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
|
||||
- innolux,zj070na-01p
|
||||
# King & Display KD116N21-30NV-A010 eDP TFT LCD panel
|
||||
- kingdisplay,kd116n21-30nv-a010
|
||||
# Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
|
||||
- koe,tx14d24vm1bpa
|
||||
# Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
|
||||
@ -220,14 +188,6 @@ properties:
|
||||
- lemaker,bl035-rgb-002
|
||||
# LG 7" (800x480 pixels) TFT LCD panel
|
||||
- lg,lb070wv8
|
||||
# LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
|
||||
- lg,lp079qx1-sp0v
|
||||
# LG 9.7" (2048x1536 pixels) TFT LCD panel
|
||||
- lg,lp097qx1-spa1
|
||||
# LG 12.0" (1920x1280 pixels) TFT LCD panel
|
||||
- lg,lp120up1
|
||||
# LG 12.9" (2560x1700 pixels) TFT LCD panel
|
||||
- lg,lp129qe
|
||||
# Logic Technologies LT161010-2NHC 7" WVGA TFT Cap Touch Module
|
||||
- logictechno,lt161010-2nhc
|
||||
# Logic Technologies LT161010-2NHR 7" WVGA TFT Resistive Touch Module
|
||||
@ -254,8 +214,6 @@ properties:
|
||||
- nec,nl4827hc19-05b
|
||||
# Netron-DY E231732 7.0" WSVGA TFT LCD panel
|
||||
- netron-dy,e231732
|
||||
# NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
|
||||
- neweast,wjfh116008a
|
||||
# Newhaven Display International 480 x 272 TFT LCD panel
|
||||
- newhaven,nhd-4.3-480272ef-atxl
|
||||
# New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
|
||||
@ -280,6 +238,8 @@ properties:
|
||||
- powertip,ph128800t006-zhc01
|
||||
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
|
||||
- powertip,ph800480t013-idf02
|
||||
# PrimeView PM070WL4 7.0" 800x480 TFT LCD panel
|
||||
- primeview,pm070wl4
|
||||
# QiaoDian XianShi Corporation 4"3 TFT LCD panel
|
||||
- qiaodian,qd43003c0-40
|
||||
# Shenzhen QiShenglong Industrialist Co., Ltd. Gopher 2b 4.3" 480(RGB)x272 TFT LCD panel
|
||||
@ -290,16 +250,10 @@ properties:
|
||||
- rocktech,rk070er9427
|
||||
# Rocktech Display Ltd. RK043FN48H 4.3" 480x272 LCD-TFT panel
|
||||
- rocktech,rk043fn48h
|
||||
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
|
||||
- samsung,atna33xc20
|
||||
# Samsung 12.2" (2560x1600 pixels) TFT LCD panel
|
||||
- samsung,lsn122dl01-c01
|
||||
# Samsung Electronics 10.1" WXGA (1280x800) TFT LCD panel
|
||||
- samsung,ltl101al01
|
||||
# Samsung Electronics 10.1" WSVGA TFT LCD panel
|
||||
- samsung,ltn101nt05
|
||||
# Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
|
||||
- samsung,ltn140at29-301
|
||||
# Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
|
||||
- satoz,sat050at40h12r2
|
||||
# Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
|
||||
@ -308,18 +262,12 @@ properties:
|
||||
- sharp,lq070y3dg3b
|
||||
# Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
|
||||
- sharp,lq101k1ly04
|
||||
# Sharp 12.3" (2400x1600 pixels) TFT LCD panel
|
||||
- sharp,lq123p1jx31
|
||||
# Sharp 14" (1920x1080 pixels) TFT LCD panel
|
||||
- sharp,lq140m1jw46
|
||||
# Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
|
||||
- sharp,ls020b1dd01d
|
||||
# Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
|
||||
- shelly,sca07010-bfn-lnn
|
||||
# Starry KR070PE2T 7" WVGA TFT LCD panel
|
||||
- starry,kr070pe2t
|
||||
# Starry 12.2" (1920x1200 pixels) TFT LCD panel
|
||||
- starry,kr122ea0sra
|
||||
# Startek KD070WVFPA043-C069A 7" TFT LCD panel
|
||||
- startek,kd070wvfpa
|
||||
# Team Source Display Technology TST043015CMHX 4.3" WQVGA TFT LCD panel
|
||||
|
@ -0,0 +1,95 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/samsung,atna33xc20.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
|
||||
|
||||
maintainers:
|
||||
- Douglas Anderson <dianders@chromium.org>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: samsung,atna33xc20
|
||||
|
||||
enable-gpios: true
|
||||
port: true
|
||||
power-supply: true
|
||||
no-hpd: true
|
||||
hpd-gpios: true
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- enable-gpios
|
||||
- power-supply
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/qcom,rpmh.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
bridge@2d {
|
||||
compatible = "ti,sn65dsi86";
|
||||
reg = <0x2d>;
|
||||
|
||||
interrupt-parent = <&tlmm>;
|
||||
interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
enable-gpios = <&tlmm 102 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
vpll-supply = <&src_pp1800_s4a>;
|
||||
vccio-supply = <&src_pp1800_s4a>;
|
||||
vcca-supply = <&src_pp1200_l2a>;
|
||||
vcc-supply = <&src_pp1200_l2a>;
|
||||
|
||||
clocks = <&rpmhcc RPMH_LN_BB_CLK2>;
|
||||
clock-names = "refclk";
|
||||
|
||||
no-hpd;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
endpoint {
|
||||
remote-endpoint = <&dsi0_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
sn65dsi86_out: endpoint {
|
||||
remote-endpoint = <&panel_in_edp>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
aux-bus {
|
||||
panel {
|
||||
compatible = "samsung,atna33xc20";
|
||||
enable-gpios = <&tlmm 12 GPIO_ACTIVE_HIGH>;
|
||||
power-supply = <&pp3300_dx_edp>;
|
||||
hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
port {
|
||||
panel_in_edp: endpoint {
|
||||
remote-endpoint = <&sn65dsi86_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
@ -1,30 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/sharp,ld-d5116z01b.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Sharp LD-D5116Z01B 12.3" WUXGA+ eDP panel
|
||||
|
||||
maintainers:
|
||||
- Jeffrey Hugo <jeffrey.l.hugo@gmail.com>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: sharp,ld-d5116z01b
|
||||
|
||||
power-supply: true
|
||||
backlight: true
|
||||
port: true
|
||||
no-hpd: true
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- power-supply
|
||||
|
||||
...
|
@ -0,0 +1,60 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/wl-355608-a8.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: WL-355608-A8 3.5" (640x480 pixels) 24-bit IPS LCD panel
|
||||
|
||||
maintainers:
|
||||
- Ryan Walklin <ryan@testtoast.com>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
- $ref: /schemas/spi/spi-peripheral-props.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: wl-355608-a8
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
spi-3wire: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- port
|
||||
- power-supply
|
||||
- reset-gpios
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
spi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
panel@0 {
|
||||
compatible = "wl-355608-a8";
|
||||
reg = <0>;
|
||||
|
||||
spi-3wire;
|
||||
spi-max-frequency = <3125000>;
|
||||
|
||||
reset-gpios = <&pio 8 14 GPIO_ACTIVE_LOW>; // PI14
|
||||
|
||||
backlight = <&backlight>;
|
||||
power-supply = <®_lcd>;
|
||||
|
||||
port {
|
||||
endpoint {
|
||||
remote-endpoint = <&tcon_lcd0_out_lcd>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
@ -70,14 +70,6 @@ properties:
|
||||
- vpll
|
||||
- ref
|
||||
|
||||
ddc-i2c-bus:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
description:
|
||||
The HDMI DDC bus can be connected to either a system I2C master or the
|
||||
functionally-reduced I2C master contained in the DWC HDMI. When connected
|
||||
to a system I2C master this property contains a phandle to that I2C
|
||||
master controller.
|
||||
|
||||
phys:
|
||||
maxItems: 1
|
||||
description: The HDMI PHY
|
||||
|
@ -15,6 +15,7 @@ properties:
|
||||
items:
|
||||
- enum:
|
||||
- rockchip,px30-mipi-dsi
|
||||
- rockchip,rk3128-mipi-dsi
|
||||
- rockchip,rk3288-mipi-dsi
|
||||
- rockchip,rk3399-mipi-dsi
|
||||
- rockchip,rk3568-mipi-dsi
|
||||
@ -77,6 +78,7 @@ allOf:
|
||||
contains:
|
||||
enum:
|
||||
- rockchip,px30-mipi-dsi
|
||||
- rockchip,rk3128-mipi-dsi
|
||||
- rockchip,rk3568-mipi-dsi
|
||||
- rockchip,rv1126-mipi-dsi
|
||||
|
||||
|
119
Documentation/devicetree/bindings/display/st,stm32mp25-lvds.yaml
Normal file
119
Documentation/devicetree/bindings/display/st,stm32mp25-lvds.yaml
Normal file
@ -0,0 +1,119 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/st,stm32mp25-lvds.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: STMicroelectronics STM32 LVDS Display Interface Transmitter
|
||||
|
||||
maintainers:
|
||||
- Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
|
||||
- Yannick Fertre <yannick.fertre@foss.st.com>
|
||||
|
||||
description: |
|
||||
The STMicroelectronics STM32 LVDS Display Interface Transmitter handles the
|
||||
LVDS protocol: it maps the pixels received from the upstream Pixel-DMA (LTDC)
|
||||
onto the LVDS PHY.
|
||||
|
||||
It is composed of three sub blocks:
|
||||
- LVDS host: handles the LVDS protocol (FPD / OpenLDI) and maps its input
|
||||
pixels onto the data lanes of the PHY
|
||||
- LVDS PHY: parallelize the data and drives the LVDS data lanes
|
||||
- LVDS wrapper: handles top-level settings
|
||||
|
||||
The LVDS controller driver supports the following high-level features:
|
||||
- FDP-Link-I and OpenLDI (v0.95) protocols
|
||||
- Single-Link or Dual-Link operation
|
||||
- Single-Display or Double-Display (with the same content duplicated on both)
|
||||
- Flexible Bit-Mapping, including JEIDA and VESA
|
||||
- RGB888 or RGB666 output
|
||||
- Synchronous design, with one input pixel per clock cycle
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
const: st,stm32mp25-lvds
|
||||
|
||||
"#clock-cells":
|
||||
const: 0
|
||||
description:
|
||||
Provides the internal LVDS PHY clock to the framework.
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
clocks:
|
||||
items:
|
||||
- description: APB peripheral clock
|
||||
- description: Reference clock for the internal PLL
|
||||
|
||||
clock-names:
|
||||
items:
|
||||
- const: pclk
|
||||
- const: ref
|
||||
|
||||
resets:
|
||||
maxItems: 1
|
||||
|
||||
ports:
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
|
||||
properties:
|
||||
port@0:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
LVDS input port node, connected to the LTDC RGB output port.
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
LVDS output port node, connected to a panel or bridge input port.
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- "#clock-cells"
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- resets
|
||||
- ports
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/clock/st,stm32mp25-rcc.h>
|
||||
#include <dt-bindings/reset/st,stm32mp25-rcc.h>
|
||||
|
||||
lvds: lvds@48060000 {
|
||||
compatible = "st,stm32mp25-lvds";
|
||||
reg = <0x48060000 0x2000>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&rcc CK_BUS_LVDS>, <&rcc CK_KER_LVDSPHY>;
|
||||
clock-names = "pclk", "ref";
|
||||
resets = <&rcc LVDS_R>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
lvds_in: endpoint {
|
||||
remote-endpoint = <<dc_ep1_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@1 {
|
||||
reg = <1>;
|
||||
lvds_out0: endpoint {
|
||||
remote-endpoint = <&lvds_panel_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -34,6 +34,7 @@ properties:
|
||||
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
|
||||
- items:
|
||||
- enum:
|
||||
- mediatek,mt8188-mali
|
||||
- mediatek,mt8192-mali
|
||||
- const: arm,mali-valhall-jm # Mali Valhall GPU model/revision is fully discoverable
|
||||
|
||||
@ -195,7 +196,9 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: mediatek,mt8183b-mali
|
||||
enum:
|
||||
- mediatek,mt8183b-mali
|
||||
- mediatek,mt8188-mali
|
||||
then:
|
||||
properties:
|
||||
power-domains:
|
||||
|
@ -824,6 +824,8 @@ patternProperties:
|
||||
description: Lichee Pi
|
||||
"^linaro,.*":
|
||||
description: Linaro Limited
|
||||
"^lincolntech,.*":
|
||||
description: Lincoln Technology Solutions
|
||||
"^lineartechnology,.*":
|
||||
description: Linear Technology
|
||||
"^linksprite,.*":
|
||||
@ -928,6 +930,8 @@ patternProperties:
|
||||
description: Microsoft Corporation
|
||||
"^microsys,.*":
|
||||
description: MicroSys Electronics GmbH
|
||||
"^microtips,.*":
|
||||
description: Microtips Technology USA
|
||||
"^mikroe,.*":
|
||||
description: MikroElektronika d.o.o.
|
||||
"^mikrotik,.*":
|
||||
@ -1168,6 +1172,8 @@ patternProperties:
|
||||
description: PowerVR (deprecated, use img)
|
||||
"^powkiddy,.*":
|
||||
description: Powkiddy
|
||||
"^primeview,.*":
|
||||
description: Prime View International (PVI)
|
||||
"^primux,.*":
|
||||
description: Primux Trading, S.L.
|
||||
"^probox2,.*":
|
||||
|
@ -7,7 +7,9 @@ SteamDeck, VANGOGH, DCN 3.0.1, 10.3.1, VCN 3.1.0, 5.2.1, 11.5.0
|
||||
Ryzen 5000 series / Ryzen 7x30 series, GREEN SARDINE / Cezanne / Barcelo / Barcelo-R, DCN 2.1, 9.3, VCN 2.2, 4.1.1, 12.0.1
|
||||
Ryzen 6000 series / Ryzen 7x35 series / Ryzen 7x36 series, YELLOW CARP / Rembrandt / Rembrandt-R, 3.1.2, 10.3.3, VCN 3.1.1, 5.2.3, 13.0.3
|
||||
Ryzen 7000 series (AM5), Raphael, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
|
||||
Ryzen 9000 series (AM5), Granite Ridge, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
|
||||
Ryzen 7x45 series (FL1), Dragon Range, 3.1.5, 10.3.6, 3.1.2, 5.2.6, 13.0.5
|
||||
Ryzen 7x20 series, Mendocino, 3.1.6, 10.3.7, 3.1.1, 5.2.7, 13.0.8
|
||||
Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
|
||||
Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11
|
||||
Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0
|
||||
|
|
@ -15,8 +15,8 @@ Radeon (RX/Pro) 500 /540(X) /550 /640 /WX2100 /WX3100 /WX200 Series, POLARIS12,
|
||||
Radeon (RX|TM) (PRO|WX) Vega /MI25 /V320 /V340L /8200 /9100 /SSG MxGPU, VEGA10, DCE 12, 9.0.1, VCE 4.0.0 / UVD 7.0.0, 4.0.0
|
||||
AMD Radeon (Pro) VII /MI50 /MI60, VEGA20, DCE 12, 9.4.0, VCE 4.1.0 / UVD 7.2.0, 4.2.0
|
||||
MI100, ARCTURUS, *, 9.4.1, VCN 2.5.0, 4.2.2
|
||||
MI200, ALDEBARAN, *, 9.4.2, VCN 2.6.0, 4.4.0
|
||||
MI300, AQUA_VANGARAM, *, 9.4.3, VCN 4.0.3, 4.4.2
|
||||
MI200 Series, ALDEBARAN, *, 9.4.2, VCN 2.6.0, 4.4.0
|
||||
MI300 Series, AQUA_VANJARAM, *, 9.4.3, VCN 4.0.3, 4.4.2
|
||||
AMD Radeon (RX|Pro) 5600(M|XT) /5700 (M|XT|XTB) /W5700, NAVI10, DCN 2.0.0, 10.1.10, VCN 2.0.0, 5.0.0
|
||||
AMD Radeon (Pro) 5300 /5500XTB/5500(XT|M) /W5500M /W5500, NAVI14, DCN 2.0.0, 10.1.1, VCN 2.0.2, 5.0.2
|
||||
AMD Radeon RX 6800(XT) /6900(XT) /W6800, SIENNA_CICHLID, DCN 3.0.0, 10.3.0, VCN 3.0.0, 5.2.0
|
||||
|
|
@ -49,6 +49,12 @@ pp_power_profile_mode
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
|
||||
:doc: pp_power_profile_mode
|
||||
|
||||
pm_policy
|
||||
---------------------
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
|
||||
:doc: pm_policy
|
||||
|
||||
\*_busy_percent
|
||||
---------------
|
||||
|
||||
|
@ -57,8 +57,8 @@ is larger than the driver minor, the DRM_IOCTL_SET_VERSION call will
|
||||
return an error. Otherwise the driver's set_version() method will be
|
||||
called with the requested version.
|
||||
|
||||
Name, Description and Date
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Name and Description
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
char \*name; char \*desc; char \*date;
|
||||
The driver name is printed to the kernel log at initialization time,
|
||||
@ -69,12 +69,6 @@ The driver description is a purely informative string passed to
|
||||
userspace through the DRM_IOCTL_VERSION ioctl and otherwise unused by
|
||||
the kernel.
|
||||
|
||||
The driver date, formatted as YYYYMMDD, is meant to identify the date of
|
||||
the latest modification to the driver. However, as most drivers fail to
|
||||
update it, its value is mostly useless. The DRM core prints it to the
|
||||
kernel log at initialization time and passes it to userspace through the
|
||||
DRM_IOCTL_VERSION ioctl.
|
||||
|
||||
Module Initialization
|
||||
---------------------
|
||||
|
||||
|
@ -110,15 +110,21 @@ fbdev Helper Functions Reference
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
|
||||
:doc: fbdev helpers
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_dma.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_shmem.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_ttm.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: include/drm/drm_fb_helper.h
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_generic.c
|
||||
:export:
|
||||
|
||||
format Helper Functions Reference
|
||||
=================================
|
||||
|
||||
|
@ -112,6 +112,19 @@ larger value within a reasonable period. Upon observing a value lower than what
|
||||
was previously read, userspace is expected to stay with that larger previous
|
||||
value until a monotonic update is seen.
|
||||
|
||||
- drm-total-cycles-<keystr>: <uint>
|
||||
|
||||
Engine identifier string must be the same as the one specified in the
|
||||
drm-cycles-<keystr> tag and shall contain the total number cycles for the given
|
||||
engine.
|
||||
|
||||
This is a timestamp in GPU unspecified unit that matches the update rate
|
||||
of drm-cycles-<keystr>. For drivers that implement this interface, the engine
|
||||
utilization can be calculated entirely on the GPU clock domain, without
|
||||
considering the CPU sleep time between 2 samples.
|
||||
|
||||
A driver may implement either this key or drm-maxfreq-<keystr>, but not both.
|
||||
|
||||
- drm-maxfreq-<keystr>: <uint> [Hz|MHz|KHz]
|
||||
|
||||
Engine identifier string must be the same as the one specified in the
|
||||
@ -121,6 +134,9 @@ percentage utilization of the engine, whereas drm-engine-<keystr> only reflects
|
||||
time active without considering what frequency the engine is operating as a
|
||||
percentage of its maximum frequency.
|
||||
|
||||
A driver may implement either this key or drm-total-cycles-<keystr>, but not
|
||||
both.
|
||||
|
||||
Memory
|
||||
^^^^^^
|
||||
|
||||
@ -168,5 +184,6 @@ be documented above and where possible, aligned with other drivers.
|
||||
Driver specific implementations
|
||||
-------------------------------
|
||||
|
||||
:ref:`i915-usage-stats`
|
||||
:ref:`panfrost-usage-stats`
|
||||
* :ref:`i915-usage-stats`
|
||||
* :ref:`panfrost-usage-stats`
|
||||
* :ref:`xe-usage-stats`
|
||||
|
@ -150,7 +150,7 @@ High Definition Audio
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_audio.c
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: include/drm/i915_component.h
|
||||
.. kernel-doc:: include/drm/intel/i915_component.h
|
||||
:internal:
|
||||
|
||||
Intel HDMI LPE Audio Support
|
||||
@ -210,9 +210,6 @@ DMC wakelock support
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
|
||||
:doc: DMC wakelock support
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_dmc_wl.c
|
||||
:internal:
|
||||
|
||||
Video BIOS Table (VBT)
|
||||
----------------------
|
||||
|
||||
|
@ -17,7 +17,6 @@ Owner Module/Drivers,Group,Property Name,Type,Property Values,Object attached,De
|
||||
,Virtual GPU,“suggested X”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an X offset for a connector
|
||||
,,“suggested Y”,RANGE,"Min=0, Max=0xffffffff",Connector,property to suggest an Y offset for a connector
|
||||
,Optional,"""aspect ratio""",ENUM,"{ ""None"", ""4:3"", ""16:9"" }",Connector,TDB
|
||||
i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:235"" }",Connector,"When this property is set to Limited 16:235 and CTM is set, the hardware will be programmed with the result of the multiplication of CTM by the limited range matrix to ensure the pixels normally in the range 0..1.0 are remapped to the range 16/255..235/255."
|
||||
,,“audio”,ENUM,"{ ""force-dvi"", ""off"", ""auto"", ""on"" }",Connector,TBD
|
||||
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
|
||||
,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
|
||||
@ -38,7 +37,6 @@ i915,Generic,"""Broadcast RGB""",ENUM,"{ ""Automatic"", ""Full"", ""Limited 16:2
|
||||
,,“dot_crawl”,RANGE,"Min=0, Max=1",Connector,TBD
|
||||
,SDVO-TV/LVDS,“brightness”,RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
|
||||
CDV gma-500,Generic,"""Broadcast RGB""",ENUM,"{ “Full”, “Limited 16:235” }",Connector,TBD
|
||||
,,"""Broadcast RGB""",ENUM,"{ “off”, “auto”, “on” }",Connector,TBD
|
||||
Poulsbo,Generic,“backlight”,RANGE,"Min=0, Max=100",Connector,TBD
|
||||
,SDVO-TV,“mode”,ENUM,"{ ""NTSC_M"", ""NTSC_J"", ""NTSC_443"", ""PAL_B"" } etc.",Connector,TBD
|
||||
,,"""left_margin""",RANGE,"Min=0, Max= SDVO dependent",Connector,TBD
|
||||
|
|
@ -243,19 +243,6 @@ Contact: Maintainer of the driver you plan to convert
|
||||
|
||||
Level: Intermediate
|
||||
|
||||
Convert drivers to use drm_fbdev_generic_setup()
|
||||
------------------------------------------------
|
||||
|
||||
Most drivers can use drm_fbdev_generic_setup(). Driver have to implement
|
||||
atomic modesetting and GEM vmap support. Historically, generic fbdev emulation
|
||||
expected the framebuffer in system memory or system-like memory. By employing
|
||||
struct iosys_map, drivers with frambuffers in I/O memory can be supported
|
||||
as well.
|
||||
|
||||
Contact: Maintainer of the driver you plan to convert
|
||||
|
||||
Level: Intermediate
|
||||
|
||||
Reimplement functions in drm_fbdev_fb_ops without fbdev
|
||||
-------------------------------------------------------
|
||||
|
||||
@ -482,30 +469,53 @@ Contact: Thomas Zimmermann <tzimmermann@suse.de>
|
||||
|
||||
Level: Starter
|
||||
|
||||
Clean up checks for already prepared/enabled in panels
|
||||
------------------------------------------------------
|
||||
Remove disable/unprepare in remove/shutdown in panel-simple and panel-edp
|
||||
-------------------------------------------------------------------------
|
||||
|
||||
In a whole pile of panel drivers, we have code to make the
|
||||
prepare/unprepare/enable/disable callbacks behave as no-ops if they've already
|
||||
been called. To get some idea of the duplicated code, try::
|
||||
As of commit d2aacaf07395 ("drm/panel: Check for already prepared/enabled in
|
||||
drm_panel"), we have a check in the drm_panel core to make sure nobody
|
||||
double-calls prepare/enable/disable/unprepare. Eventually that should probably
|
||||
be turned into a WARN_ON() or somehow made louder, but right now we actually
|
||||
expect it to trigger and so we don't want it to be too loud.
|
||||
|
||||
git grep 'if.*>prepared' -- drivers/gpu/drm/panel
|
||||
git grep 'if.*>enabled' -- drivers/gpu/drm/panel
|
||||
Specifically, that warning will trigger for panel-edp and panel-simple at
|
||||
shutdown time because those panels hardcode a call to drm_panel_disable()
|
||||
and drm_panel_unprepare() at shutdown and remove time that they call regardless
|
||||
of panel state. On systems with a properly coded DRM modeset driver that
|
||||
calls drm_atomic_helper_shutdown() this is pretty much guaranteed to cause
|
||||
the warning to fire.
|
||||
|
||||
In the patch ("drm/panel: Check for already prepared/enabled in drm_panel")
|
||||
we've moved this check to the core. Now we can most definitely remove the
|
||||
check from the individual panels and save a pile of code.
|
||||
|
||||
In adition to removing the check from the individual panels, it is believed
|
||||
that even the core shouldn't need this check and that should be considered
|
||||
an error if other code ever relies on this check. The check in the core
|
||||
currently prints a warning whenever something is relying on this check with
|
||||
dev_warn(). After a little while, we likely want to promote this to a
|
||||
WARN(1) to help encourage folks not to rely on this behavior.
|
||||
Unfortunately we can't safely remove the calls in panel-edp and panel-simple
|
||||
until we're sure that all DRM modeset drivers that are used with those panels
|
||||
properly call drm_atomic_helper_shutdown(). This TODO item is to validate
|
||||
that all DRM modeset drivers used with panel-edp and panel-simple properly
|
||||
call drm_atomic_helper_shutdown() and then remove the calls to
|
||||
disable/unprepare from those panels. Alternatively, this TODO item could be
|
||||
removed by convincing stakeholders that those calls are fine and downgrading
|
||||
the error message in drm_panel_disable() / drm_panel_unprepare() to a
|
||||
debug-level message.
|
||||
|
||||
Contact: Douglas Anderson <dianders@chromium.org>
|
||||
|
||||
Level: Starter/Intermediate
|
||||
Level: Intermediate
|
||||
|
||||
Transition away from using mipi_dsi_*_write_seq()
|
||||
-------------------------------------------------
|
||||
|
||||
The macros mipi_dsi_generic_write_seq() and mipi_dsi_dcs_write_seq() are
|
||||
non-intuitive because, if there are errors, they return out of the *caller's*
|
||||
function. We should move all callers to use mipi_dsi_generic_write_seq_multi()
|
||||
and mipi_dsi_dcs_write_seq_multi() macros instead.
|
||||
|
||||
Once all callers are transitioned, the macros and the functions that they call,
|
||||
mipi_dsi_generic_write_chatty() and mipi_dsi_dcs_write_buffer_chatty(), can
|
||||
probably be removed. Alternatively, if people feel like the _multi() variants
|
||||
are overkill for some use cases, we could keep the mipi_dsi_*_write_seq()
|
||||
variants but change them not to return out of the caller.
|
||||
|
||||
Contact: Douglas Anderson <dianders@chromium.org>
|
||||
|
||||
Level: Starter
|
||||
|
||||
|
||||
Core refactorings
|
||||
|
@ -23,3 +23,4 @@ DG2, etc is provided to prototype the driver.
|
||||
xe_firmware
|
||||
xe_tile
|
||||
xe_debugging
|
||||
xe-drm-usage-stats.rst
|
||||
|
10
Documentation/gpu/xe/xe-drm-usage-stats.rst
Normal file
10
Documentation/gpu/xe/xe-drm-usage-stats.rst
Normal file
@ -0,0 +1,10 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
.. _xe-usage-stats:
|
||||
|
||||
========================================
|
||||
Xe DRM client usage stats implementation
|
||||
========================================
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/xe/xe_drm_client.c
|
||||
:doc: DRM Client usage stats
|
28
MAINTAINERS
28
MAINTAINERS
@ -1081,7 +1081,7 @@ F: Documentation/ABI/testing/sysfs-amd-pmf
|
||||
F: drivers/platform/x86/amd/pmf/
|
||||
|
||||
AMD POWERPLAY AND SWSMU
|
||||
M: Evan Quan <evan.quan@amd.com>
|
||||
M: Kenneth Feng <kenneth.feng@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
@ -7001,6 +7001,11 @@ S: Maintained
|
||||
F: Documentation/devicetree/bindings/display/panel/ilitek,ili9805.yaml
|
||||
F: drivers/gpu/drm/panel/panel-ilitek-ili9805.c
|
||||
|
||||
DRM DRIVER FOR ILITEK ILI9806E PANELS
|
||||
M: Michael Walle <mwalle@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
|
||||
|
||||
DRM DRIVER FOR JADARD JD9365DA-H3 MIPI-DSI LCD PANELS
|
||||
M: Jagan Teki <jagan@edgeble.ai>
|
||||
S: Maintained
|
||||
@ -7011,7 +7016,7 @@ DRM DRIVER FOR LG SW43408 PANELS
|
||||
M: Sumit Semwal <sumit.semwal@linaro.org>
|
||||
M: Caleb Connolly <caleb.connolly@linaro.org>
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: Documentation/devicetree/bindings/display/panel/lg,sw43408.yaml
|
||||
F: drivers/gpu/drm/panel/panel-lg-sw43408.c
|
||||
|
||||
@ -7298,6 +7303,7 @@ L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: Documentation/gpu/vkms.rst
|
||||
F: drivers/gpu/drm/ci/xfails/vkms*
|
||||
F: drivers/gpu/drm/vkms/
|
||||
|
||||
DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
|
||||
@ -7579,6 +7585,7 @@ L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
|
||||
F: Documentation/devicetree/bindings/display/st,stm32mp25-lvds.yaml
|
||||
F: drivers/gpu/drm/stm
|
||||
|
||||
DRM DRIVERS FOR TI KEYSTONE
|
||||
@ -7620,8 +7627,9 @@ F: include/uapi/drm/v3d_drm.h
|
||||
|
||||
DRM DRIVERS FOR VC4
|
||||
M: Maxime Ripard <mripard@kernel.org>
|
||||
M: Dave Stevenson <dave.stevenson@raspberrypi.com>
|
||||
R: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
|
||||
S: Supported
|
||||
T: git git://github.com/anholt/linux
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
F: Documentation/devicetree/bindings/display/brcm,bcm2835-*.yaml
|
||||
F: drivers/gpu/drm/vc4/
|
||||
@ -7668,7 +7676,6 @@ F: include/drm/gpu_scheduler.h
|
||||
DRM PANEL DRIVERS
|
||||
M: Neil Armstrong <neil.armstrong@linaro.org>
|
||||
R: Jessica Zhang <quic_jesszhan@quicinc.com>
|
||||
R: Sam Ravnborg <sam@ravnborg.org>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
|
||||
@ -9712,11 +9719,11 @@ S: Maintained
|
||||
F: block/partitions/efi.*
|
||||
|
||||
HABANALABS PCI DRIVER
|
||||
M: Oded Gabbay <ogabbay@kernel.org>
|
||||
M: Ofir Bitton <obitton@habana.ai>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
C: irc://irc.oftc.net/dri-devel
|
||||
T: git https://git.kernel.org/pub/scm/linux/kernel/git/ogabbay/linux.git
|
||||
T: git https://github.com/HabanaAI/drivers.accel.habanalabs.kernel.git
|
||||
F: Documentation/ABI/testing/debugfs-driver-habanalabs
|
||||
F: Documentation/ABI/testing/sysfs-driver-habanalabs
|
||||
F: drivers/accel/habanalabs/
|
||||
@ -11140,6 +11147,7 @@ S: Supported
|
||||
F: drivers/gpu/drm/i915/display/
|
||||
F: drivers/gpu/drm/xe/display/
|
||||
F: drivers/gpu/drm/xe/compat-i915-headers
|
||||
F: include/drm/intel/
|
||||
|
||||
INTEL DRM I915 DRIVER (Meteor Lake, DG2 and older excluding Poulsbo, Moorestown and derivative)
|
||||
M: Jani Nikula <jani.nikula@linux.intel.com>
|
||||
@ -11152,12 +11160,12 @@ W: https://drm.pages.freedesktop.org/intel-docs/
|
||||
Q: http://patchwork.freedesktop.org/project/intel-gfx/
|
||||
B: https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
|
||||
C: irc://irc.oftc.net/intel-gfx
|
||||
T: git git://anongit.freedesktop.org/drm-intel
|
||||
T: git https://gitlab.freedesktop.org/drm/i915/kernel.git
|
||||
F: Documentation/ABI/testing/sysfs-driver-intel-i915-hwmon
|
||||
F: Documentation/gpu/i915.rst
|
||||
F: drivers/gpu/drm/ci/xfails/i915*
|
||||
F: drivers/gpu/drm/i915/
|
||||
F: include/drm/i915*
|
||||
F: include/drm/intel/
|
||||
F: include/uapi/drm/i915_drm.h
|
||||
|
||||
INTEL DRM XE DRIVER (Lunar Lake and newer)
|
||||
@ -11174,7 +11182,7 @@ T: git https://gitlab.freedesktop.org/drm/xe/kernel.git
|
||||
F: Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon
|
||||
F: Documentation/gpu/xe/
|
||||
F: drivers/gpu/drm/xe/
|
||||
F: include/drm/xe*
|
||||
F: include/drm/intel/
|
||||
F: include/uapi/drm/xe_drm.h
|
||||
|
||||
INTEL ETHERNET DRIVERS
|
||||
@ -18861,7 +18869,7 @@ F: drivers/net/wireless/quantenna
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
M: Pan, Xinhui <Xinhui.Pan@amd.com>
|
||||
M: Xinhui Pan <Xinhui.Pan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
B: https://gitlab.freedesktop.org/drm/amd/-/issues
|
||||
|
@ -17,8 +17,8 @@
|
||||
#include <linux/bcma/bcma.h>
|
||||
#include <linux/bcma/bcma_regs.h>
|
||||
#include <linux/platform_data/x86/apple.h>
|
||||
#include <drm/i915_drm.h>
|
||||
#include <drm/i915_pciids.h>
|
||||
#include <drm/intel/i915_drm.h>
|
||||
#include <drm/intel/i915_pciids.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/io_apic.h>
|
||||
@ -518,47 +518,46 @@ static const struct intel_early_ops gen11_early_ops __initconst = {
|
||||
|
||||
/* Intel integrated GPUs for which we need to reserve "stolen memory" */
|
||||
static const struct pci_device_id intel_early_ids[] __initconst = {
|
||||
INTEL_I830_IDS(&i830_early_ops),
|
||||
INTEL_I845G_IDS(&i845_early_ops),
|
||||
INTEL_I85X_IDS(&i85x_early_ops),
|
||||
INTEL_I865G_IDS(&i865_early_ops),
|
||||
INTEL_I915G_IDS(&gen3_early_ops),
|
||||
INTEL_I915GM_IDS(&gen3_early_ops),
|
||||
INTEL_I945G_IDS(&gen3_early_ops),
|
||||
INTEL_I945GM_IDS(&gen3_early_ops),
|
||||
INTEL_VLV_IDS(&gen6_early_ops),
|
||||
INTEL_PINEVIEW_G_IDS(&gen3_early_ops),
|
||||
INTEL_PINEVIEW_M_IDS(&gen3_early_ops),
|
||||
INTEL_I965G_IDS(&gen3_early_ops),
|
||||
INTEL_G33_IDS(&gen3_early_ops),
|
||||
INTEL_I965GM_IDS(&gen3_early_ops),
|
||||
INTEL_GM45_IDS(&gen3_early_ops),
|
||||
INTEL_G45_IDS(&gen3_early_ops),
|
||||
INTEL_IRONLAKE_D_IDS(&gen3_early_ops),
|
||||
INTEL_IRONLAKE_M_IDS(&gen3_early_ops),
|
||||
INTEL_SNB_D_IDS(&gen6_early_ops),
|
||||
INTEL_SNB_M_IDS(&gen6_early_ops),
|
||||
INTEL_IVB_M_IDS(&gen6_early_ops),
|
||||
INTEL_IVB_D_IDS(&gen6_early_ops),
|
||||
INTEL_HSW_IDS(&gen6_early_ops),
|
||||
INTEL_BDW_IDS(&gen8_early_ops),
|
||||
INTEL_CHV_IDS(&chv_early_ops),
|
||||
INTEL_SKL_IDS(&gen9_early_ops),
|
||||
INTEL_BXT_IDS(&gen9_early_ops),
|
||||
INTEL_KBL_IDS(&gen9_early_ops),
|
||||
INTEL_CFL_IDS(&gen9_early_ops),
|
||||
INTEL_GLK_IDS(&gen9_early_ops),
|
||||
INTEL_CNL_IDS(&gen9_early_ops),
|
||||
INTEL_ICL_11_IDS(&gen11_early_ops),
|
||||
INTEL_EHL_IDS(&gen11_early_ops),
|
||||
INTEL_JSL_IDS(&gen11_early_ops),
|
||||
INTEL_TGL_12_IDS(&gen11_early_ops),
|
||||
INTEL_RKL_IDS(&gen11_early_ops),
|
||||
INTEL_ADLS_IDS(&gen11_early_ops),
|
||||
INTEL_ADLP_IDS(&gen11_early_ops),
|
||||
INTEL_ADLN_IDS(&gen11_early_ops),
|
||||
INTEL_RPLS_IDS(&gen11_early_ops),
|
||||
INTEL_RPLP_IDS(&gen11_early_ops),
|
||||
INTEL_I830_IDS(INTEL_VGA_DEVICE, &i830_early_ops),
|
||||
INTEL_I845G_IDS(INTEL_VGA_DEVICE, &i845_early_ops),
|
||||
INTEL_I85X_IDS(INTEL_VGA_DEVICE, &i85x_early_ops),
|
||||
INTEL_I865G_IDS(INTEL_VGA_DEVICE, &i865_early_ops),
|
||||
INTEL_I915G_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_I915GM_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_I945G_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_I945GM_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_VLV_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
|
||||
INTEL_PNV_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_I965G_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_G33_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_I965GM_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_GM45_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_G45_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_ILK_IDS(INTEL_VGA_DEVICE, &gen3_early_ops),
|
||||
INTEL_SNB_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
|
||||
INTEL_IVB_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
|
||||
INTEL_HSW_IDS(INTEL_VGA_DEVICE, &gen6_early_ops),
|
||||
INTEL_BDW_IDS(INTEL_VGA_DEVICE, &gen8_early_ops),
|
||||
INTEL_CHV_IDS(INTEL_VGA_DEVICE, &chv_early_ops),
|
||||
INTEL_SKL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_BXT_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_KBL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_CFL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_WHL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_CML_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_GLK_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_CNL_IDS(INTEL_VGA_DEVICE, &gen9_early_ops),
|
||||
INTEL_ICL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_EHL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_JSL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_TGL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_RKL_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_ADLS_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_ADLP_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_ADLN_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_RPLS_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_RPLU_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
INTEL_RPLP_IDS(INTEL_VGA_DEVICE, &gen11_early_ops),
|
||||
};
|
||||
|
||||
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
|
||||
|
@ -3284,12 +3284,6 @@ static int ts_get_and_handle_kernel_record(struct hl_device *hdev, struct hl_ctx
|
||||
|
||||
/* In case the node already registered, need to unregister first then re-use */
|
||||
if (req_offset_record->ts_reg_info.in_use) {
|
||||
dev_dbg(data->buf->mmg->dev,
|
||||
"Requested record %p is in use on irq: %u ts addr: %p, unregister first then put on irq: %u\n",
|
||||
req_offset_record,
|
||||
req_offset_record->ts_reg_info.interrupt->interrupt_id,
|
||||
req_offset_record->ts_reg_info.timestamp_kernel_addr,
|
||||
data->interrupt->interrupt_id);
|
||||
/*
|
||||
* Since interrupt here can be different than the one the node currently registered
|
||||
* on, and we don't want to lock two lists while we're doing unregister, so
|
||||
@ -3345,10 +3339,6 @@ static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx
|
||||
goto put_cq_cb;
|
||||
}
|
||||
|
||||
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, handle: 0x%llx, ts offset: %llu, cq_offset: %llu\n",
|
||||
data->interrupt->interrupt_id, data->ts_handle,
|
||||
data->ts_offset, data->cq_offset);
|
||||
|
||||
data->buf = hl_mmap_mem_buf_get(data->mmg, data->ts_handle);
|
||||
if (!data->buf) {
|
||||
rc = -EINVAL;
|
||||
@ -3370,9 +3360,6 @@ static int _hl_interrupt_ts_reg_ioctl(struct hl_device *hdev, struct hl_ctx *ctx
|
||||
if (*pend->cq_kernel_addr >= data->target_value) {
|
||||
spin_unlock_irqrestore(&data->interrupt->ts_list_lock, flags);
|
||||
|
||||
dev_dbg(hdev->dev, "Target value already reached release ts record: pend: %p, offset: %llu, interrupt: %u\n",
|
||||
pend, data->ts_offset, data->interrupt->interrupt_id);
|
||||
|
||||
pend->ts_reg_info.in_use = 0;
|
||||
*status = HL_WAIT_CS_STATUS_COMPLETED;
|
||||
*pend->ts_reg_info.timestamp_kernel_addr = ktime_get_ns();
|
||||
|
@ -42,9 +42,8 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
|
||||
pkt.i2c_reg = i2c_reg;
|
||||
pkt.i2c_len = i2c_len;
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, val);
|
||||
if (rc)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, val);
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
|
||||
|
||||
return rc;
|
||||
@ -75,10 +74,8 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
|
||||
pkt.i2c_len = i2c_len;
|
||||
pkt.value = cpu_to_le64(val);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
|
||||
|
||||
return rc;
|
||||
@ -99,10 +96,8 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
|
||||
pkt.led_index = cpu_to_le32(led);
|
||||
pkt.value = cpu_to_le64(state);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
|
||||
}
|
||||
|
||||
@ -1722,6 +1717,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
|
||||
root,
|
||||
&hdev->device_release_watchdog_timeout_sec);
|
||||
|
||||
debugfs_create_u16("server_type",
|
||||
0444,
|
||||
root,
|
||||
&hdev->asic_prop.server_type);
|
||||
|
||||
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
|
||||
debugfs_create_file(hl_debugfs_list[i].name,
|
||||
0644,
|
||||
|
@ -30,6 +30,8 @@ enum dma_alloc_type {
|
||||
|
||||
#define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
|
||||
|
||||
static void hl_device_heartbeat(struct work_struct *work);
|
||||
|
||||
/*
|
||||
* hl_set_dram_bar- sets the bar to allow later access to address
|
||||
*
|
||||
@ -130,8 +132,8 @@ static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t
|
||||
}
|
||||
|
||||
if (trace_habanalabs_dma_alloc_enabled() && !ZERO_OR_NULL_PTR(ptr))
|
||||
trace_habanalabs_dma_alloc(hdev->dev, (u64) (uintptr_t) ptr, *dma_handle, size,
|
||||
caller);
|
||||
trace_habanalabs_dma_alloc(&(hdev)->pdev->dev, (u64) (uintptr_t) ptr, *dma_handle,
|
||||
size, caller);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@ -152,7 +154,7 @@ static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *c
|
||||
break;
|
||||
}
|
||||
|
||||
trace_habanalabs_dma_free(hdev->dev, store_cpu_addr, dma_handle, size, caller);
|
||||
trace_habanalabs_dma_free(&(hdev)->pdev->dev, store_cpu_addr, dma_handle, size, caller);
|
||||
}
|
||||
|
||||
void *hl_asic_dma_alloc_coherent_caller(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
|
||||
@ -204,15 +206,15 @@ int hl_dma_map_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
|
||||
return 0;
|
||||
|
||||
for_each_sgtable_dma_sg(sgt, sg, i)
|
||||
trace_habanalabs_dma_map_page(hdev->dev,
|
||||
page_to_phys(sg_page(sg)),
|
||||
sg->dma_address - prop->device_dma_offset_for_host_access,
|
||||
trace_habanalabs_dma_map_page(&(hdev)->pdev->dev,
|
||||
page_to_phys(sg_page(sg)),
|
||||
sg->dma_address - prop->device_dma_offset_for_host_access,
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
sg->dma_length,
|
||||
sg->dma_length,
|
||||
#else
|
||||
sg->length,
|
||||
sg->length,
|
||||
#endif
|
||||
dir, caller);
|
||||
dir, caller);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -247,7 +249,8 @@ void hl_dma_unmap_sgtable_caller(struct hl_device *hdev, struct sg_table *sgt,
|
||||
|
||||
if (trace_habanalabs_dma_unmap_page_enabled()) {
|
||||
for_each_sgtable_dma_sg(sgt, sg, i)
|
||||
trace_habanalabs_dma_unmap_page(hdev->dev, page_to_phys(sg_page(sg)),
|
||||
trace_habanalabs_dma_unmap_page(&(hdev)->pdev->dev,
|
||||
page_to_phys(sg_page(sg)),
|
||||
sg->dma_address - prop->device_dma_offset_for_host_access,
|
||||
#ifdef CONFIG_NEED_SG_DMA_LENGTH
|
||||
sg->dma_length,
|
||||
@ -439,16 +442,19 @@ static void print_idle_status_mask(struct hl_device *hdev, const char *message,
|
||||
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE])
|
||||
{
|
||||
if (idle_mask[3])
|
||||
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx_%016llx)\n",
|
||||
message, idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
|
||||
dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx_%016llx)\n",
|
||||
dev_name(&hdev->pdev->dev), message,
|
||||
idle_mask[3], idle_mask[2], idle_mask[1], idle_mask[0]);
|
||||
else if (idle_mask[2])
|
||||
dev_err(hdev->dev, "%s (mask %#llx_%016llx_%016llx)\n",
|
||||
message, idle_mask[2], idle_mask[1], idle_mask[0]);
|
||||
dev_err(hdev->dev, "%s %s (mask %#llx_%016llx_%016llx)\n",
|
||||
dev_name(&hdev->pdev->dev), message,
|
||||
idle_mask[2], idle_mask[1], idle_mask[0]);
|
||||
else if (idle_mask[1])
|
||||
dev_err(hdev->dev, "%s (mask %#llx_%016llx)\n",
|
||||
message, idle_mask[1], idle_mask[0]);
|
||||
dev_err(hdev->dev, "%s %s (mask %#llx_%016llx)\n",
|
||||
dev_name(&hdev->pdev->dev), message, idle_mask[1], idle_mask[0]);
|
||||
else
|
||||
dev_err(hdev->dev, "%s (mask %#llx)\n", message, idle_mask[0]);
|
||||
dev_err(hdev->dev, "%s %s (mask %#llx)\n", dev_name(&hdev->pdev->dev), message,
|
||||
idle_mask[0]);
|
||||
}
|
||||
|
||||
static void hpriv_release(struct kref *ref)
|
||||
@ -545,7 +551,8 @@ int hl_hpriv_put(struct hl_fpriv *hpriv)
|
||||
return kref_put(&hpriv->refcount, hpriv_release);
|
||||
}
|
||||
|
||||
static void print_device_in_use_info(struct hl_device *hdev, const char *message)
|
||||
static void print_device_in_use_info(struct hl_device *hdev,
|
||||
struct hl_mem_mgr_fini_stats *mm_fini_stats, const char *message)
|
||||
{
|
||||
u32 active_cs_num, dmabuf_export_cnt;
|
||||
bool unknown_reason = true;
|
||||
@ -569,6 +576,12 @@ static void print_device_in_use_info(struct hl_device *hdev, const char *message
|
||||
dmabuf_export_cnt);
|
||||
}
|
||||
|
||||
if (mm_fini_stats->n_busy_cb) {
|
||||
unknown_reason = false;
|
||||
offset += scnprintf(buf + offset, size - offset, " [%u live CB handles]",
|
||||
mm_fini_stats->n_busy_cb);
|
||||
}
|
||||
|
||||
if (unknown_reason)
|
||||
scnprintf(buf + offset, size - offset, " [unknown reason]");
|
||||
|
||||
@ -586,6 +599,7 @@ void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
|
||||
{
|
||||
struct hl_fpriv *hpriv = file_priv->driver_priv;
|
||||
struct hl_device *hdev = to_hl_device(ddev);
|
||||
struct hl_mem_mgr_fini_stats mm_fini_stats;
|
||||
|
||||
if (!hdev) {
|
||||
pr_crit("Closing FD after device was removed. Memory leak will occur and it is advised to reboot.\n");
|
||||
@ -597,12 +611,13 @@ void hl_device_release(struct drm_device *ddev, struct drm_file *file_priv)
|
||||
/* Memory buffers might be still in use at this point and thus the handles IDR destruction
|
||||
* is postponed to hpriv_release().
|
||||
*/
|
||||
hl_mem_mgr_fini(&hpriv->mem_mgr);
|
||||
hl_mem_mgr_fini(&hpriv->mem_mgr, &mm_fini_stats);
|
||||
|
||||
hdev->compute_ctx_in_release = 1;
|
||||
|
||||
if (!hl_hpriv_put(hpriv)) {
|
||||
print_device_in_use_info(hdev, "User process closed FD but device still in use");
|
||||
print_device_in_use_info(hdev, &mm_fini_stats,
|
||||
"User process closed FD but device still in use");
|
||||
hl_device_reset(hdev, HL_DRV_RESET_HARD);
|
||||
}
|
||||
|
||||
@ -858,6 +873,10 @@ static int device_early_init(struct hl_device *hdev)
|
||||
gaudi2_set_asic_funcs(hdev);
|
||||
strscpy(hdev->asic_name, "GAUDI2C", sizeof(hdev->asic_name));
|
||||
break;
|
||||
case ASIC_GAUDI2D:
|
||||
gaudi2_set_asic_funcs(hdev);
|
||||
strscpy(hdev->asic_name, "GAUDI2D", sizeof(hdev->asic_name));
|
||||
break;
|
||||
default:
|
||||
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
|
||||
hdev->asic_type);
|
||||
@ -946,6 +965,8 @@ static int device_early_init(struct hl_device *hdev)
|
||||
goto free_cb_mgr;
|
||||
}
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->device_reset_work.reset_work, device_hard_reset_pending);
|
||||
hdev->device_reset_work.hdev = hdev;
|
||||
hdev->device_fini_pending = 0;
|
||||
@ -968,7 +989,7 @@ static int device_early_init(struct hl_device *hdev)
|
||||
return 0;
|
||||
|
||||
free_cb_mgr:
|
||||
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
|
||||
hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
|
||||
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
|
||||
free_chip_info:
|
||||
kfree(hdev->hl_chip_info);
|
||||
@ -1012,7 +1033,7 @@ static void device_early_fini(struct hl_device *hdev)
|
||||
|
||||
mutex_destroy(&hdev->clk_throttling.lock);
|
||||
|
||||
hl_mem_mgr_fini(&hdev->kernel_mem_mgr);
|
||||
hl_mem_mgr_fini(&hdev->kernel_mem_mgr, NULL);
|
||||
hl_mem_mgr_idr_destroy(&hdev->kernel_mem_mgr);
|
||||
|
||||
kfree(hdev->hl_chip_info);
|
||||
@ -1045,21 +1066,55 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
|
||||
return (device_id == hdev->pdev->device);
|
||||
}
|
||||
|
||||
static int hl_device_eq_heartbeat_check(struct hl_device *hdev)
|
||||
static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
|
||||
bool is_pq_hb)
|
||||
{
|
||||
time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
|
||||
: hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
|
||||
struct tm tm;
|
||||
|
||||
if (!seconds)
|
||||
return;
|
||||
|
||||
time64_to_tm(seconds, 0, &tm);
|
||||
|
||||
snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
|
||||
tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
|
||||
}
|
||||
|
||||
static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
|
||||
{
|
||||
struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
|
||||
u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
|
||||
|
||||
if (!prop->cpucp_info.eq_health_check_supported)
|
||||
return 0;
|
||||
return true;
|
||||
|
||||
if (hdev->eq_heartbeat_received) {
|
||||
hdev->eq_heartbeat_received = false;
|
||||
} else {
|
||||
if (!hdev->eq_heartbeat_received) {
|
||||
dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
|
||||
return -EIO;
|
||||
|
||||
stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
|
||||
stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
|
||||
dev_err(hdev->dev,
|
||||
"EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
|
||||
hdev->event_queue.ci,
|
||||
heartbeat_debug_info->heartbeat_event_counter,
|
||||
eq_time_str,
|
||||
hdev->kernel_queues[cpu_q_id].pi,
|
||||
atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
|
||||
atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
|
||||
pq_time_str);
|
||||
|
||||
hl_eq_dump(hdev, &hdev->event_queue);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return 0;
|
||||
hdev->eq_heartbeat_received = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void hl_device_heartbeat(struct work_struct *work)
|
||||
@ -1078,7 +1133,7 @@ static void hl_device_heartbeat(struct work_struct *work)
|
||||
* in order to validate the eq is working.
|
||||
* Only if both the EQ is healthy and we managed to send the next heartbeat reschedule.
|
||||
*/
|
||||
if ((!hl_device_eq_heartbeat_check(hdev)) && (!hdev->asic_funcs->send_heartbeat(hdev)))
|
||||
if (hl_device_eq_heartbeat_received(hdev) && (!hdev->asic_funcs->send_heartbeat(hdev)))
|
||||
goto reschedule;
|
||||
|
||||
if (hl_device_operational(hdev, NULL))
|
||||
@ -1132,21 +1187,6 @@ static int device_late_init(struct hl_device *hdev)
|
||||
}
|
||||
|
||||
hdev->high_pll = hdev->asic_prop.high_pll;
|
||||
|
||||
if (hdev->heartbeat) {
|
||||
/*
|
||||
* Before scheduling the heartbeat driver will check if eq event has received.
|
||||
* for the first schedule we need to set the indication as true then for the next
|
||||
* one this indication will be true only if eq event was sent by FW.
|
||||
*/
|
||||
hdev->eq_heartbeat_received = true;
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
|
||||
|
||||
schedule_delayed_work(&hdev->work_heartbeat,
|
||||
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
|
||||
}
|
||||
|
||||
hdev->late_init_done = true;
|
||||
|
||||
return 0;
|
||||
@ -1163,9 +1203,6 @@ static void device_late_fini(struct hl_device *hdev)
|
||||
if (!hdev->late_init_done)
|
||||
return;
|
||||
|
||||
if (hdev->heartbeat)
|
||||
cancel_delayed_work_sync(&hdev->work_heartbeat);
|
||||
|
||||
if (hdev->asic_funcs->late_fini)
|
||||
hdev->asic_funcs->late_fini(hdev);
|
||||
|
||||
@ -1266,8 +1303,12 @@ static void hl_abort_waiting_for_completions(struct hl_device *hdev)
|
||||
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset,
|
||||
bool skip_wq_flush)
|
||||
{
|
||||
if (hard_reset)
|
||||
if (hard_reset) {
|
||||
if (hdev->heartbeat)
|
||||
cancel_delayed_work_sync(&hdev->work_heartbeat);
|
||||
|
||||
device_late_fini(hdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Halt the engines and disable interrupts so we won't get any more
|
||||
@ -1495,15 +1536,14 @@ static void send_disable_pci_access(struct hl_device *hdev, u32 flags)
|
||||
* of heartbeat, the device CPU is marked as disable
|
||||
* so this message won't be sent
|
||||
*/
|
||||
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0)) {
|
||||
dev_warn(hdev->dev, "Failed to disable FW's PCI access\n");
|
||||
if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
|
||||
return;
|
||||
}
|
||||
|
||||
/* verify that last EQs are handled before disabled is set */
|
||||
/* disable_irq also generates sync irq, this verifies that last EQs are handled
|
||||
* before disabled is set. The IRQ will be enabled again in request_irq call.
|
||||
*/
|
||||
if (hdev->cpu_queues_enable)
|
||||
synchronize_irq(pci_irq_vector(hdev->pdev,
|
||||
hdev->asic_prop.eq_interrupt_id));
|
||||
disable_irq(pci_irq_vector(hdev->pdev, hdev->asic_prop.eq_interrupt_id));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1547,6 +1587,31 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
|
||||
}
|
||||
}
|
||||
|
||||
static void reset_heartbeat_debug_info(struct hl_device *hdev)
|
||||
{
|
||||
hdev->heartbeat_debug_info.last_pq_heartbeat_ts = 0;
|
||||
hdev->heartbeat_debug_info.last_eq_heartbeat_ts = 0;
|
||||
hdev->heartbeat_debug_info.heartbeat_event_counter = 0;
|
||||
}
|
||||
|
||||
static inline void device_heartbeat_schedule(struct hl_device *hdev)
|
||||
{
|
||||
if (!hdev->heartbeat)
|
||||
return;
|
||||
|
||||
reset_heartbeat_debug_info(hdev);
|
||||
|
||||
/*
|
||||
* Before scheduling the heartbeat driver will check if eq event has received.
|
||||
* for the first schedule we need to set the indication as true then for the next
|
||||
* one this indication will be true only if eq event was sent by FW.
|
||||
*/
|
||||
hdev->eq_heartbeat_received = true;
|
||||
|
||||
schedule_delayed_work(&hdev->work_heartbeat,
|
||||
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
|
||||
}
|
||||
|
||||
/*
|
||||
* hl_device_reset - reset the device
|
||||
*
|
||||
@ -1916,6 +1981,8 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
|
||||
if (hard_reset) {
|
||||
hdev->reset_info.hard_reset_cnt++;
|
||||
|
||||
device_heartbeat_schedule(hdev);
|
||||
|
||||
/* After reset is done, we are ready to receive events from
|
||||
* the F/W. We can't do it before because we will ignore events
|
||||
* and if those events are fatal, we won't know about it and
|
||||
@ -2350,6 +2417,12 @@ int hl_device_init(struct hl_device *hdev)
|
||||
goto out_disabled;
|
||||
}
|
||||
|
||||
/* Scheduling the EQ heartbeat thread must come after driver is done with all
|
||||
* initializations, as we want to make sure the FW gets enough time to be prepared
|
||||
* to respond to heartbeat packets.
|
||||
*/
|
||||
device_heartbeat_schedule(hdev);
|
||||
|
||||
dev_notice(hdev->dev,
|
||||
"Successfully added device %s to habanalabs driver\n",
|
||||
dev_name(&(hdev)->pdev->dev));
|
||||
@ -2592,7 +2665,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
|
||||
u32 val = readl(hdev->rmmio + reg);
|
||||
|
||||
if (unlikely(trace_habanalabs_rreg32_enabled()))
|
||||
trace_habanalabs_rreg32(hdev->dev, reg, val);
|
||||
trace_habanalabs_rreg32(&(hdev)->pdev->dev, reg, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
@ -2610,7 +2683,7 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
|
||||
inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
|
||||
{
|
||||
if (unlikely(trace_habanalabs_wreg32_enabled()))
|
||||
trace_habanalabs_wreg32(hdev->dev, reg, val);
|
||||
trace_habanalabs_wreg32(&(hdev)->pdev->dev, reg, val);
|
||||
|
||||
writel(val, hdev->rmmio + reg);
|
||||
}
|
||||
@ -2836,3 +2909,56 @@ void hl_set_irq_affinity(struct hl_device *hdev, int irq)
|
||||
if (irq_set_affinity_and_hint(irq, &hdev->irq_affinity_mask))
|
||||
dev_err(hdev->dev, "Failed setting irq %d affinity\n", irq);
|
||||
}
|
||||
|
||||
void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
|
||||
{
|
||||
hdev->heartbeat_debug_info.heartbeat_event_counter++;
|
||||
hdev->heartbeat_debug_info.last_eq_heartbeat_ts = ktime_get_real_seconds();
|
||||
hdev->eq_heartbeat_received = true;
|
||||
}
|
||||
|
||||
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask)
|
||||
{
|
||||
struct hl_clk_throttle *clk_throttle = &hdev->clk_throttling;
|
||||
ktime_t zero_time = ktime_set(0, 0);
|
||||
|
||||
mutex_lock(&clk_throttle->lock);
|
||||
|
||||
switch (event_type) {
|
||||
case EQ_EVENT_POWER_EVT_START:
|
||||
clk_throttle->current_reason |= HL_CLK_THROTTLE_POWER;
|
||||
clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_POWER;
|
||||
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
|
||||
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
|
||||
dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
|
||||
break;
|
||||
|
||||
case EQ_EVENT_POWER_EVT_END:
|
||||
clk_throttle->current_reason &= ~HL_CLK_THROTTLE_POWER;
|
||||
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
|
||||
dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
|
||||
break;
|
||||
|
||||
case EQ_EVENT_THERMAL_EVT_START:
|
||||
clk_throttle->current_reason |= HL_CLK_THROTTLE_THERMAL;
|
||||
clk_throttle->aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
|
||||
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
|
||||
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
|
||||
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
|
||||
break;
|
||||
|
||||
case EQ_EVENT_THERMAL_EVT_END:
|
||||
clk_throttle->current_reason &= ~HL_CLK_THROTTLE_THERMAL;
|
||||
clk_throttle->timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
|
||||
*event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
|
||||
dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
dev_err(hdev->dev, "Received invalid clock change event %d\n", event_type);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&clk_throttle->lock);
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "habanalabs.h"
|
||||
#include <linux/habanalabs/hl_boot_if.h>
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/slab.h>
|
||||
@ -40,6 +41,31 @@ static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
|
||||
[COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
|
||||
};
|
||||
|
||||
/**
|
||||
* hl_fw_version_cmp() - compares the FW version to a specific version
|
||||
*
|
||||
* @hdev: pointer to hl_device structure
|
||||
* @major: major number of a reference version
|
||||
* @minor: minor number of a reference version
|
||||
* @subminor: sub-minor number of a reference version
|
||||
*
|
||||
* Return 1 if FW version greater than the reference version, -1 if it's
|
||||
* smaller and 0 if versions are identical.
|
||||
*/
|
||||
int hl_fw_version_cmp(struct hl_device *hdev, u32 major, u32 minor, u32 subminor)
|
||||
{
|
||||
if (hdev->fw_sw_major_ver != major)
|
||||
return (hdev->fw_sw_major_ver > major) ? 1 : -1;
|
||||
|
||||
if (hdev->fw_sw_minor_ver != minor)
|
||||
return (hdev->fw_sw_minor_ver > minor) ? 1 : -1;
|
||||
|
||||
if (hdev->fw_sw_sub_minor_ver != subminor)
|
||||
return (hdev->fw_sw_sub_minor_ver > subminor) ? 1 : -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char *extract_fw_ver_from_str(const char *fw_str)
|
||||
{
|
||||
char *str, *fw_ver, *whitespace;
|
||||
@ -345,43 +371,63 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
|
||||
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
|
||||
{
|
||||
struct cpucp_packet pkt = {};
|
||||
int rc;
|
||||
|
||||
pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
|
||||
pkt.value = cpu_to_le64(value);
|
||||
|
||||
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to disable FW's PCI access\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* hl_fw_send_cpu_message() - send CPU message to the device.
|
||||
*
|
||||
* @hdev: pointer to hl_device structure.
|
||||
* @hw_queue_id: HW queue ID
|
||||
* @msg: raw data of the message/packet
|
||||
* @size: size of @msg in bytes
|
||||
* @timeout_us: timeout in usec to wait for CPU reply on the message
|
||||
* @result: return code reported by FW
|
||||
*
|
||||
* send message to the device CPU.
|
||||
*
|
||||
* Return: 0 on success, non-zero for failure.
|
||||
* -ENOMEM: memory allocation failure
|
||||
* -EAGAIN: CPU is disabled (try again when enabled)
|
||||
* -ETIMEDOUT: timeout waiting for FW response
|
||||
* -EIO: protocol error
|
||||
*/
|
||||
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
u16 len, u32 timeout, u64 *result)
|
||||
u16 size, u32 timeout_us, u64 *result)
|
||||
{
|
||||
struct hl_hw_queue *queue = &hdev->kernel_queues[hw_queue_id];
|
||||
struct asic_fixed_properties *prop = &hdev->asic_prop;
|
||||
u32 tmp, expected_ack_val, pi, opcode;
|
||||
struct cpucp_packet *pkt;
|
||||
dma_addr_t pkt_dma_addr;
|
||||
struct hl_bd *sent_bd;
|
||||
u32 tmp, expected_ack_val, pi, opcode;
|
||||
int rc;
|
||||
int rc = 0, fw_rc;
|
||||
|
||||
pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
|
||||
pkt = hl_cpu_accessible_dma_pool_alloc(hdev, size, &pkt_dma_addr);
|
||||
if (!pkt) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to allocate DMA memory for packet to CPU\n");
|
||||
dev_err(hdev->dev, "Failed to allocate DMA memory for packet to CPU\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(pkt, msg, len);
|
||||
memcpy(pkt, msg, size);
|
||||
|
||||
mutex_lock(&hdev->send_cpu_message_lock);
|
||||
|
||||
/* CPU-CP messages can be sent during soft-reset */
|
||||
if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
|
||||
rc = 0;
|
||||
if (hdev->disabled && !hdev->reset_info.in_compute_reset)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (hdev->device_cpu_disabled) {
|
||||
rc = -EIO;
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -397,7 +443,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
* Which means that we don't need to lock the access to the entire H/W
|
||||
* queues module when submitting a JOB to the CPU queue.
|
||||
*/
|
||||
hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
|
||||
hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), size, pkt_dma_addr);
|
||||
|
||||
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
|
||||
expected_ack_val = queue->pi;
|
||||
@ -406,7 +452,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
|
||||
rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
|
||||
(tmp == expected_ack_val), 1000,
|
||||
timeout, true);
|
||||
timeout_us, true);
|
||||
|
||||
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
|
||||
|
||||
@ -414,19 +460,27 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
/* If FW performed reset just before sending it a packet, we will get a timeout.
|
||||
* This is expected behavior, hence no need for error message.
|
||||
*/
|
||||
if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
|
||||
if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset) {
|
||||
dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
|
||||
tmp);
|
||||
else
|
||||
dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
|
||||
} else {
|
||||
struct hl_bd *bd = queue->kernel_address;
|
||||
|
||||
bd += hl_pi_2_offset(pi);
|
||||
|
||||
dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n"
|
||||
"Pkt info[%u]: dma_addr: 0x%llx, kernel_addr: %p, len:0x%x, ctl: 0x%x, ptr:0x%llx, dram_bd:%u\n",
|
||||
tmp, pi, pkt_dma_addr, (void *)pkt, bd->len, bd->ctl, bd->ptr,
|
||||
queue->dram_bd);
|
||||
}
|
||||
hdev->device_cpu_disabled = true;
|
||||
goto out;
|
||||
}
|
||||
|
||||
tmp = le32_to_cpu(pkt->ctl);
|
||||
|
||||
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
|
||||
if (rc) {
|
||||
fw_rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
|
||||
if (fw_rc) {
|
||||
opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
|
||||
|
||||
if (!prop->supports_advanced_cpucp_rc) {
|
||||
@ -435,7 +489,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
goto scrub_descriptor;
|
||||
}
|
||||
|
||||
switch (rc) {
|
||||
switch (fw_rc) {
|
||||
case cpucp_packet_invalid:
|
||||
dev_err(hdev->dev,
|
||||
"CPU packet %d is not supported by F/W\n", opcode);
|
||||
@ -460,7 +514,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
|
||||
/* propagate the return code from the f/w to the callers who want to check it */
|
||||
if (result)
|
||||
*result = rc;
|
||||
*result = fw_rc;
|
||||
|
||||
rc = -EIO;
|
||||
|
||||
@ -480,7 +534,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
|
||||
out:
|
||||
mutex_unlock(&hdev->send_cpu_message_lock);
|
||||
|
||||
hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
|
||||
hl_cpu_accessible_dma_pool_free(hdev, size, pkt);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -550,7 +604,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
|
||||
int hl_fw_test_cpu_queue(struct hl_device *hdev)
|
||||
{
|
||||
struct cpucp_packet test_pkt = {};
|
||||
u64 result;
|
||||
u64 result = 0;
|
||||
int rc;
|
||||
|
||||
test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
|
||||
@ -623,16 +677,14 @@ int hl_fw_send_device_activity(struct hl_device *hdev, bool open)
|
||||
int hl_fw_send_heartbeat(struct hl_device *hdev)
|
||||
{
|
||||
struct cpucp_packet hb_pkt;
|
||||
u64 result;
|
||||
u64 result = 0;
|
||||
int rc;
|
||||
|
||||
memset(&hb_pkt, 0, sizeof(hb_pkt));
|
||||
hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
|
||||
CPUCP_PKT_CTL_OPCODE_SHIFT);
|
||||
hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST << CPUCP_PKT_CTL_OPCODE_SHIFT);
|
||||
hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
|
||||
sizeof(hb_pkt), 0, &result);
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt, sizeof(hb_pkt), 0, &result);
|
||||
|
||||
if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
|
||||
return -EIO;
|
||||
@ -643,6 +695,8 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
hdev->heartbeat_debug_info.last_pq_heartbeat_ts = ktime_get_real_seconds();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -885,7 +939,7 @@ static int hl_fw_send_msi_info_msg(struct hl_device *hdev)
|
||||
{
|
||||
struct cpucp_array_data_packet *pkt;
|
||||
size_t total_pkt_size, data_size;
|
||||
u64 result;
|
||||
u64 result = 0;
|
||||
int rc;
|
||||
|
||||
/* skip sending this info for unsupported ASICs */
|
||||
@ -976,11 +1030,10 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP EEPROM packet, error %d\n",
|
||||
rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP EEPROM packet, error %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1021,7 +1074,9 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_MON_DUMP_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP monitor-dump packet, error %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1055,8 +1110,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
counters->rx_throughput = result;
|
||||
@ -1070,8 +1126,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
counters->tx_throughput = result;
|
||||
@ -1084,8 +1141,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
counters->replay_cnt = (u32) result;
|
||||
@ -1105,9 +1163,9 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CpuCP total energy pkt, error %d\n",
|
||||
rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CpuCP total energy pkt, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1183,7 +1241,8 @@ int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to read PLL info, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1210,7 +1269,8 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to read power, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1247,8 +1307,9 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1273,7 +1334,8 @@ int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
@ -1428,7 +1490,7 @@ int hl_fw_wait_preboot_ready(struct hl_device *hdev)
|
||||
{
|
||||
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
|
||||
u32 status = 0, timeout;
|
||||
int rc, tries = 1;
|
||||
int rc, tries = 1, fw_err = 0;
|
||||
bool preboot_still_runs;
|
||||
|
||||
/* Need to check two possible scenarios:
|
||||
@ -1468,18 +1530,18 @@ int hl_fw_wait_preboot_ready(struct hl_device *hdev)
|
||||
}
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
/* If we read all FF, then something is totally wrong, no point
|
||||
* of reading specific errors
|
||||
*/
|
||||
if (status != -1)
|
||||
fw_err = fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
|
||||
pre_fw_load->boot_err1_reg,
|
||||
pre_fw_load->sts_boot_dev_sts0_reg,
|
||||
pre_fw_load->sts_boot_dev_sts1_reg);
|
||||
if (rc || fw_err) {
|
||||
detect_cpu_boot_status(hdev, status);
|
||||
dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
|
||||
|
||||
/* If we read all FF, then something is totally wrong, no point
|
||||
* of reading specific errors
|
||||
*/
|
||||
if (status != -1)
|
||||
fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
|
||||
pre_fw_load->boot_err1_reg,
|
||||
pre_fw_load->sts_boot_dev_sts0_reg,
|
||||
pre_fw_load->sts_boot_dev_sts1_reg);
|
||||
dev_err(hdev->dev, "CPU boot %s (status = %d)\n",
|
||||
fw_err ? "failed due to an error" : "ready timeout", status);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -1750,7 +1812,7 @@ static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
|
||||
val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
|
||||
val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
|
||||
|
||||
trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
|
||||
trace_habanalabs_comms_send_cmd(&hdev->pdev->dev, comms_cmd_str_arr[cmd]);
|
||||
WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
|
||||
}
|
||||
|
||||
@ -1808,7 +1870,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
|
||||
|
||||
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
|
||||
|
||||
trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
|
||||
trace_habanalabs_comms_wait_status(&hdev->pdev->dev, comms_sts_str_arr[expected_status]);
|
||||
|
||||
/* Wait for expected status */
|
||||
rc = hl_poll_timeout(
|
||||
@ -1825,7 +1887,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
|
||||
trace_habanalabs_comms_wait_status_done(&hdev->pdev->dev,
|
||||
comms_sts_str_arr[expected_status]);
|
||||
|
||||
/*
|
||||
* skip storing FW response for NOOP to preserve the actual desired
|
||||
@ -1899,7 +1962,7 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
|
||||
{
|
||||
int rc;
|
||||
|
||||
trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
|
||||
trace_habanalabs_comms_protocol_cmd(&hdev->pdev->dev, comms_cmd_str_arr[cmd]);
|
||||
|
||||
/* first send clear command to clean former commands */
|
||||
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
|
||||
@ -2038,7 +2101,7 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
|
||||
* note that no alignment/stride address issues here as all structures
|
||||
* are 64 bit padded.
|
||||
*/
|
||||
data_ptr = (u8 *)fw_desc + sizeof(struct comms_desc_header);
|
||||
data_ptr = (u8 *)fw_desc + sizeof(struct comms_msg_header);
|
||||
data_size = le16_to_cpu(fw_desc->header.size);
|
||||
|
||||
data_crc32 = hl_fw_compat_crc32(data_ptr, data_size);
|
||||
@ -2192,11 +2255,11 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
|
||||
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
|
||||
fw_data_size = le16_to_cpu(fw_desc->header.size);
|
||||
|
||||
temp_fw_desc = vzalloc(sizeof(struct comms_desc_header) + fw_data_size);
|
||||
temp_fw_desc = vzalloc(sizeof(struct comms_msg_header) + fw_data_size);
|
||||
if (!temp_fw_desc)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_desc_header) + fw_data_size);
|
||||
memcpy_fromio(temp_fw_desc, src, sizeof(struct comms_msg_header) + fw_data_size);
|
||||
|
||||
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
|
||||
(struct lkd_fw_comms_desc *) temp_fw_desc);
|
||||
@ -3122,10 +3185,10 @@ long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
|
||||
pkt.pll_index = cpu_to_le32((u32)used_pll_idx);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
|
||||
used_pll_idx, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to get frequency of PLL %d, error %d\n",
|
||||
used_pll_idx, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -3149,8 +3212,7 @@ void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
|
||||
pkt.value = cpu_to_le64(freq);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to set frequency to PLL %d, error %d\n",
|
||||
used_pll_idx, rc);
|
||||
}
|
||||
@ -3166,9 +3228,9 @@ long hl_fw_get_max_power(struct hl_device *hdev)
|
||||
pkt.ctl = cpu_to_le32(CPUCP_PACKET_MAX_POWER_GET << CPUCP_PKT_CTL_OPCODE_SHIFT);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
|
||||
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -3190,8 +3252,7 @@ void hl_fw_set_max_power(struct hl_device *hdev)
|
||||
pkt.value = cpu_to_le64(hdev->max_power);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);
|
||||
}
|
||||
|
||||
@ -3217,11 +3278,11 @@ static int hl_fw_get_sec_attest_data(struct hl_device *hdev, u32 packet_id, void
|
||||
pkt.data_max_size = cpu_to_le32(size);
|
||||
pkt.nonce = cpu_to_le32(nonce);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
timeout, NULL);
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), timeout, NULL);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev,
|
||||
"Failed to handle CPU-CP pkt %u, error %d\n", packet_id, rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -3263,10 +3324,12 @@ int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
|
||||
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
|
||||
else
|
||||
if (rc) {
|
||||
if (rc != -EAGAIN)
|
||||
dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
|
||||
} else {
|
||||
dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
|
||||
}
|
||||
|
||||
*size = (u32)result;
|
||||
|
||||
|
@ -71,7 +71,7 @@ struct hl_fpriv;
|
||||
|
||||
#define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
|
||||
|
||||
#define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
|
||||
#define HL_HEARTBEAT_PER_USEC 10000000 /* 10 s */
|
||||
|
||||
#define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
|
||||
|
||||
@ -651,6 +651,8 @@ struct hl_hints_range {
|
||||
* @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
|
||||
* not supported.
|
||||
* @reserved_fw_mem_size: size of dram memory reserved for FW.
|
||||
* @fw_event_queue_size: queue size for events from CPU-CP.
|
||||
* A value of 0 means using the default HL_EQ_SIZE_IN_BYTES value.
|
||||
* @collective_first_sob: first sync object available for collective use
|
||||
* @collective_first_mon: first monitor available for collective use
|
||||
* @sync_stream_first_sob: first sync object available for sync stream use
|
||||
@ -782,6 +784,7 @@ struct asic_fixed_properties {
|
||||
u32 glbl_err_max_cause_num;
|
||||
u32 hbw_flush_reg;
|
||||
u32 reserved_fw_mem_size;
|
||||
u32 fw_event_queue_size;
|
||||
u16 collective_first_sob;
|
||||
u16 collective_first_mon;
|
||||
u16 sync_stream_first_sob;
|
||||
@ -901,6 +904,18 @@ struct hl_mem_mgr {
|
||||
struct idr handles;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hl_mem_mgr_fini_stats - describes statistics returned during memory manager teardown.
|
||||
* @n_busy_cb: the amount of CB handles that could not be removed
|
||||
* @n_busy_ts: the amount of TS handles that could not be removed
|
||||
* @n_busy_other: the amount of any other type of handles that could not be removed
|
||||
*/
|
||||
struct hl_mem_mgr_fini_stats {
|
||||
u32 n_busy_cb;
|
||||
u32 n_busy_ts;
|
||||
u32 n_busy_other;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hl_mmap_mem_buf_behavior - describes unified memory manager buffer behavior
|
||||
* @topic: string identifier used for logging
|
||||
@ -1229,6 +1244,7 @@ struct hl_user_pending_interrupt {
|
||||
* @hdev: pointer to the device structure
|
||||
* @kernel_address: holds the queue's kernel virtual address
|
||||
* @bus_address: holds the queue's DMA address
|
||||
* @size: the event queue size
|
||||
* @ci: ci inside the queue
|
||||
* @prev_eqe_index: the index of the previous event queue entry. The index of
|
||||
* the current entry's index must be +1 of the previous one.
|
||||
@ -1240,6 +1256,7 @@ struct hl_eq {
|
||||
struct hl_device *hdev;
|
||||
void *kernel_address;
|
||||
dma_addr_t bus_address;
|
||||
u32 size;
|
||||
u32 ci;
|
||||
u32 prev_eqe_index;
|
||||
bool check_eqe_index;
|
||||
@ -1268,15 +1285,18 @@ struct hl_dec {
|
||||
* @ASIC_GAUDI2: Gaudi2 device.
|
||||
* @ASIC_GAUDI2B: Gaudi2B device.
|
||||
* @ASIC_GAUDI2C: Gaudi2C device.
|
||||
* @ASIC_GAUDI2D: Gaudi2D device.
|
||||
*/
|
||||
enum hl_asic_type {
|
||||
ASIC_INVALID,
|
||||
|
||||
ASIC_GOYA,
|
||||
ASIC_GAUDI,
|
||||
ASIC_GAUDI_SEC,
|
||||
ASIC_GAUDI2,
|
||||
ASIC_GAUDI2B,
|
||||
ASIC_GAUDI2C,
|
||||
ASIC_GAUDI2D,
|
||||
};
|
||||
|
||||
struct hl_cs_parser;
|
||||
@ -2709,11 +2729,16 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
* updated directly by the device. If false, the host memory being polled will
|
||||
* be updated by host CPU. Required so host knows whether or not the memory
|
||||
* might need to be byte-swapped before returning value to caller.
|
||||
*
|
||||
* On the first 4 polling iterations the macro goes to sleep for short period of
|
||||
* time that gradually increases and reaches sleep_us on the fifth iteration.
|
||||
*/
|
||||
#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
|
||||
mem_written_by_device) \
|
||||
({ \
|
||||
u64 __sleep_step_us; \
|
||||
ktime_t __timeout; \
|
||||
u8 __step = 8; \
|
||||
\
|
||||
__timeout = ktime_add_us(ktime_get(), timeout_us); \
|
||||
might_sleep_if(sleep_us); \
|
||||
@ -2731,8 +2756,10 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
|
||||
(val) = le32_to_cpu(*(__le32 *) &(val)); \
|
||||
break; \
|
||||
} \
|
||||
if (sleep_us) \
|
||||
usleep_range((sleep_us >> 2) + 1, sleep_us); \
|
||||
__sleep_step_us = sleep_us >> __step; \
|
||||
if (__sleep_step_us) \
|
||||
usleep_range((__sleep_step_us >> 2) + 1, __sleep_step_us); \
|
||||
__step >>= 1; \
|
||||
} \
|
||||
(cond) ? 0 : -ETIMEDOUT; \
|
||||
})
|
||||
@ -3174,6 +3201,21 @@ struct hl_reset_info {
|
||||
u8 watchdog_active;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct eq_heartbeat_debug_info - stores debug info to be used upon heartbeat failure.
|
||||
* @last_pq_heartbeat_ts: timestamp of the last test packet that was sent to FW.
|
||||
* This packet is the trigger in FW to send the EQ heartbeat event.
|
||||
* @last_eq_heartbeat_ts: timestamp of the last EQ heartbeat event that was received from FW.
|
||||
* @heartbeat_event_counter: number of heartbeat events received.
|
||||
* @cpu_queue_id: used to read the queue pi/ci
|
||||
*/
|
||||
struct eq_heartbeat_debug_info {
|
||||
time64_t last_pq_heartbeat_ts;
|
||||
time64_t last_eq_heartbeat_ts;
|
||||
u32 heartbeat_event_counter;
|
||||
u32 cpu_queue_id;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct hl_device - habanalabs device structure.
|
||||
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
|
||||
@ -3262,6 +3304,7 @@ struct hl_reset_info {
|
||||
* @clk_throttling: holds information about current/previous clock throttling events
|
||||
* @captured_err_info: holds information about errors.
|
||||
* @reset_info: holds current device reset information.
|
||||
* @heartbeat_debug_info: counters used to debug heartbeat failures.
|
||||
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
|
||||
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
|
||||
* @fw_inner_major_ver: the major of current loaded preboot inner version.
|
||||
@ -3452,6 +3495,8 @@ struct hl_device {
|
||||
|
||||
struct hl_reset_info reset_info;
|
||||
|
||||
struct eq_heartbeat_debug_info heartbeat_debug_info;
|
||||
|
||||
cpumask_t irq_affinity_mask;
|
||||
|
||||
u32 *stream_master_qid_arr;
|
||||
@ -3596,25 +3641,6 @@ struct hl_ioctl_desc {
|
||||
hl_ioctl_t *func;
|
||||
};
|
||||
|
||||
static inline bool hl_is_fw_sw_ver_below(struct hl_device *hdev, u32 fw_sw_major, u32 fw_sw_minor)
|
||||
{
|
||||
if (hdev->fw_sw_major_ver < fw_sw_major)
|
||||
return true;
|
||||
if (hdev->fw_sw_major_ver > fw_sw_major)
|
||||
return false;
|
||||
if (hdev->fw_sw_minor_ver < fw_sw_minor)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool hl_is_fw_sw_ver_equal_or_greater(struct hl_device *hdev, u32 fw_sw_major,
|
||||
u32 fw_sw_minor)
|
||||
{
|
||||
return (hdev->fw_sw_major_ver > fw_sw_major ||
|
||||
(hdev->fw_sw_major_ver == fw_sw_major &&
|
||||
hdev->fw_sw_minor_ver >= fw_sw_minor));
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel module functions that can be accessed by entire module
|
||||
*/
|
||||
@ -3740,6 +3766,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
|
||||
void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
|
||||
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
|
||||
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
|
||||
void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q);
|
||||
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
|
||||
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
|
||||
irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
|
||||
@ -3919,6 +3946,7 @@ void hl_mmu_dr_flush(struct hl_ctx *ctx);
|
||||
int hl_mmu_dr_init(struct hl_device *hdev);
|
||||
void hl_mmu_dr_fini(struct hl_device *hdev);
|
||||
|
||||
int hl_fw_version_cmp(struct hl_device *hdev, u32 major, u32 minor, u32 subminor);
|
||||
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
|
||||
void __iomem *dst, u32 src_offset, u32 size);
|
||||
int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value);
|
||||
@ -4033,7 +4061,7 @@ char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
|
||||
const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
|
||||
|
||||
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg);
|
||||
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg);
|
||||
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats);
|
||||
void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg);
|
||||
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
|
||||
void *args);
|
||||
@ -4059,6 +4087,8 @@ void hl_capture_engine_err(struct hl_device *hdev, u16 engine_id, u16 error_coun
|
||||
void hl_enable_err_info_capture(struct hl_error_info *captured_err_info);
|
||||
void hl_init_cpu_for_irq(struct hl_device *hdev);
|
||||
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
|
||||
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
|
||||
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
|
@ -144,6 +144,9 @@ static enum hl_asic_type get_asic_type(struct hl_device *hdev)
|
||||
case REV_ID_C:
|
||||
asic_type = ASIC_GAUDI2C;
|
||||
break;
|
||||
case REV_ID_D:
|
||||
asic_type = ASIC_GAUDI2D;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -260,7 +263,7 @@ int hl_device_open(struct drm_device *ddev, struct drm_file *file_priv)
|
||||
|
||||
out_err:
|
||||
mutex_unlock(&hdev->fpriv_list_lock);
|
||||
hl_mem_mgr_fini(&hpriv->mem_mgr);
|
||||
hl_mem_mgr_fini(&hpriv->mem_mgr, NULL);
|
||||
hl_mem_mgr_idr_destroy(&hpriv->mem_mgr);
|
||||
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
|
||||
mutex_destroy(&hpriv->ctx_lock);
|
||||
|
@ -585,9 +585,10 @@ int hl_get_temperature(struct hl_device *hdev,
|
||||
*value = (long) result;
|
||||
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get temperature from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get temperature from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
@ -610,8 +611,7 @@ int hl_set_temperature(struct hl_device *hdev,
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to set temperature of sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
@ -639,9 +639,10 @@ int hl_get_voltage(struct hl_device *hdev,
|
||||
*value = (long) result;
|
||||
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get voltage from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get voltage from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
@ -668,9 +669,10 @@ int hl_get_current(struct hl_device *hdev,
|
||||
*value = (long) result;
|
||||
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get current from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get current from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
@ -697,9 +699,10 @@ int hl_get_fan_speed(struct hl_device *hdev,
|
||||
*value = (long) result;
|
||||
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get fan speed from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get fan speed from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
@ -726,9 +729,10 @@ int hl_get_pwm_info(struct hl_device *hdev,
|
||||
*value = (long) result;
|
||||
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get pwm info from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get pwm info from sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
@ -751,8 +755,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to set pwm info to sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
@ -774,8 +777,7 @@ int hl_set_voltage(struct hl_device *hdev,
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to set voltage of sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
@ -797,10 +799,8 @@ int hl_set_current(struct hl_device *hdev,
|
||||
pkt.type = __cpu_to_le16(attr);
|
||||
pkt.value = __cpu_to_le64(value);
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to set current of sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
@ -830,8 +830,7 @@ int hl_set_power(struct hl_device *hdev,
|
||||
|
||||
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
|
||||
0, NULL);
|
||||
|
||||
if (rc)
|
||||
if (rc && rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to set power of sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
@ -859,9 +858,10 @@ int hl_get_power(struct hl_device *hdev,
|
||||
*value = (long) result;
|
||||
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get power of sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
if (rc != -EAGAIN)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"Failed to get power of sensor %d, error %d\n",
|
||||
sensor_index, rc);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
|
@ -652,14 +652,16 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
|
||||
*/
|
||||
int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
|
||||
{
|
||||
u32 size = hdev->asic_prop.fw_event_queue_size ? : HL_EQ_SIZE_IN_BYTES;
|
||||
void *p;
|
||||
|
||||
p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
|
||||
p = hl_cpu_accessible_dma_pool_alloc(hdev, size, &q->bus_address);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
q->hdev = hdev;
|
||||
q->kernel_address = p;
|
||||
q->size = size;
|
||||
q->ci = 0;
|
||||
q->prev_eqe_index = 0;
|
||||
|
||||
@ -678,7 +680,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
|
||||
{
|
||||
flush_workqueue(hdev->eq_wq);
|
||||
|
||||
hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
|
||||
hl_cpu_accessible_dma_pool_free(hdev, q->size, q->kernel_address);
|
||||
}
|
||||
|
||||
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
|
||||
@ -693,5 +695,30 @@ void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
|
||||
* when the device is operational again
|
||||
*/
|
||||
|
||||
memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
|
||||
memset(q->kernel_address, 0, q->size);
|
||||
}
|
||||
|
||||
void hl_eq_dump(struct hl_device *hdev, struct hl_eq *q)
|
||||
{
|
||||
u32 eq_length, eqe_size, ctl, ready, mode, type, index;
|
||||
struct hl_eq_header *hdr;
|
||||
u8 *ptr;
|
||||
int i;
|
||||
|
||||
eq_length = HL_EQ_LENGTH;
|
||||
eqe_size = q->size / HL_EQ_LENGTH;
|
||||
|
||||
dev_info(hdev->dev, "Contents of EQ entries headers:\n");
|
||||
|
||||
for (i = 0, ptr = q->kernel_address ; i < eq_length ; ++i, ptr += eqe_size) {
|
||||
hdr = (struct hl_eq_header *) ptr;
|
||||
ctl = le32_to_cpu(hdr->ctl);
|
||||
ready = FIELD_GET(EQ_CTL_READY_MASK, ctl);
|
||||
mode = FIELD_GET(EQ_CTL_EVENT_MODE_MASK, ctl);
|
||||
type = FIELD_GET(EQ_CTL_EVENT_TYPE_MASK, ctl);
|
||||
index = FIELD_GET(EQ_CTL_INDEX_MASK, ctl);
|
||||
|
||||
dev_info(hdev->dev, "%02u: %#010x [ready: %u, mode %u, type %04u, index %05u]\n",
|
||||
i, ctl, ready, mode, type, index);
|
||||
}
|
||||
}
|
||||
|
@ -318,28 +318,61 @@ void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
|
||||
idr_init(&mmg->handles);
|
||||
}
|
||||
|
||||
static void hl_mem_mgr_fini_stats_reset(struct hl_mem_mgr_fini_stats *stats)
|
||||
{
|
||||
if (!stats)
|
||||
return;
|
||||
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
}
|
||||
|
||||
static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats)
|
||||
{
|
||||
if (!stats)
|
||||
return;
|
||||
|
||||
switch (mem_id) {
|
||||
case HL_MMAP_TYPE_CB:
|
||||
++stats->n_busy_cb;
|
||||
break;
|
||||
case HL_MMAP_TYPE_TS_BUFF:
|
||||
++stats->n_busy_ts;
|
||||
break;
|
||||
default:
|
||||
/* we currently store only CB/TS so this shouldn't happen */
|
||||
++stats->n_busy_other;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hl_mem_mgr_fini - release unified memory manager
|
||||
*
|
||||
* @mmg: parent unified memory manager
|
||||
* @stats: if non-NULL, will return some counters for handles that could not be removed.
|
||||
*
|
||||
* Release the unified memory manager. Shall be called from an interrupt context.
|
||||
*/
|
||||
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
|
||||
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats)
|
||||
{
|
||||
struct hl_mmap_mem_buf *buf;
|
||||
struct idr *idp;
|
||||
const char *topic;
|
||||
u64 mem_id;
|
||||
u32 id;
|
||||
|
||||
hl_mem_mgr_fini_stats_reset(stats);
|
||||
|
||||
idp = &mmg->handles;
|
||||
|
||||
idr_for_each_entry(idp, buf, id) {
|
||||
topic = buf->behavior->topic;
|
||||
if (hl_mmap_mem_buf_put(buf) != 1)
|
||||
mem_id = buf->behavior->mem_id;
|
||||
if (hl_mmap_mem_buf_put(buf) != 1) {
|
||||
dev_err(mmg->dev,
|
||||
"%s: Buff handle %u for CTX is still alive\n",
|
||||
topic, id);
|
||||
hl_mem_mgr_fini_stats_inc(mem_id, stats);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "../habanalabs.h"
|
||||
|
||||
@ -262,7 +263,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flu
|
||||
mmu_funcs->flush(ctx);
|
||||
|
||||
if (trace_habanalabs_mmu_unmap_enabled() && !rc)
|
||||
trace_habanalabs_mmu_unmap(hdev->dev, virt_addr, 0, page_size, flush_pte);
|
||||
trace_habanalabs_mmu_unmap(&hdev->pdev->dev, virt_addr, 0, page_size, flush_pte);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -349,7 +350,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_s
|
||||
if (flush_pte)
|
||||
mmu_funcs->flush(ctx);
|
||||
|
||||
trace_habanalabs_mmu_map(hdev->dev, virt_addr, phys_addr, page_size, flush_pte);
|
||||
trace_habanalabs_mmu_map(&hdev->pdev->dev, virt_addr, phys_addr, page_size, flush_pte);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -599,6 +600,7 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
|
||||
case ASIC_GAUDI2:
|
||||
case ASIC_GAUDI2B:
|
||||
case ASIC_GAUDI2C:
|
||||
case ASIC_GAUDI2D:
|
||||
hl_mmu_v2_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
|
||||
if (prop->pmmu.host_resident)
|
||||
hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
|
||||
@ -644,7 +646,8 @@ int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
|
||||
rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
|
||||
if (rc)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"%s cache invalidation failed, rc=%d\n",
|
||||
"%s: %s cache invalidation failed, rc=%d\n",
|
||||
dev_name(&hdev->pdev->dev),
|
||||
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", rc);
|
||||
|
||||
return rc;
|
||||
@ -659,8 +662,9 @@ int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
|
||||
asid, va, size);
|
||||
if (rc)
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"%s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
|
||||
flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU", va, size, rc);
|
||||
"%s: %s cache range invalidation failed: va=%#llx, size=%llu, rc=%d",
|
||||
dev_name(&hdev->pdev->dev), flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU",
|
||||
va, size, rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
|
||||
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
|
||||
|
||||
if (unlikely(trace_habanalabs_elbi_read_enabled()))
|
||||
trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val);
|
||||
trace_habanalabs_elbi_read(&hdev->pdev->dev, (u32) addr, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -186,7 +186,7 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
|
||||
|
||||
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
|
||||
if (unlikely(trace_habanalabs_elbi_write_enabled()))
|
||||
trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val);
|
||||
trace_habanalabs_elbi_write(&hdev->pdev->dev, (u32) addr, val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -142,8 +142,9 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct hl_device *hdev = dev_get_drvdata(dev);
|
||||
|
||||
return sprintf(buf, "0x%08x\n",
|
||||
le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
|
||||
return sprintf(buf, "0x%08x%08x\n",
|
||||
le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_timestamp),
|
||||
le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
|
||||
}
|
||||
|
||||
static ssize_t cpucp_kernel_ver_show(struct device *dev,
|
||||
@ -270,6 +271,9 @@ static ssize_t device_type_show(struct device *dev,
|
||||
case ASIC_GAUDI2C:
|
||||
str = "GAUDI2C";
|
||||
break;
|
||||
case ASIC_GAUDI2D:
|
||||
str = "GAUDI2D";
|
||||
break;
|
||||
default:
|
||||
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
|
||||
hdev->asic_type);
|
||||
|
@ -1639,10 +1639,8 @@ static int gaudi_late_init(struct hl_device *hdev)
|
||||
}
|
||||
|
||||
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Scrub both SRAM and DRAM */
|
||||
rc = hdev->asic_funcs->scrub_device_mem(hdev);
|
||||
@ -4154,13 +4152,7 @@ static int gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
|
||||
|
||||
static int gaudi_suspend(struct hl_device *hdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
|
||||
|
||||
return rc;
|
||||
return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
|
||||
}
|
||||
|
||||
static int gaudi_resume(struct hl_device *hdev)
|
||||
|
@ -2601,6 +2601,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
|
||||
|
||||
prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
|
||||
|
||||
prop->supports_advanced_cpucp_rc = true;
|
||||
|
||||
return 0;
|
||||
|
||||
free_qprops:
|
||||
@ -3308,14 +3310,10 @@ static int gaudi2_late_init(struct hl_device *hdev)
|
||||
struct gaudi2_device *gaudi2 = hdev->asic_specific;
|
||||
int rc;
|
||||
|
||||
hdev->asic_prop.supports_advanced_cpucp_rc = true;
|
||||
|
||||
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS,
|
||||
gaudi2->virt_msix_db_dma_addr);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = gaudi2_fetch_psoc_frequency(hdev);
|
||||
if (rc) {
|
||||
@ -3783,7 +3781,7 @@ static int gaudi2_sw_init(struct hl_device *hdev)
|
||||
prop->supports_compute_reset = true;
|
||||
|
||||
/* Event queue sanity check added in FW version 1.11 */
|
||||
if (hl_is_fw_sw_ver_below(hdev, 1, 11))
|
||||
if (hl_fw_version_cmp(hdev, 1, 11, 0) < 0)
|
||||
hdev->event_queue.check_eqe_index = false;
|
||||
else
|
||||
hdev->event_queue.check_eqe_index = true;
|
||||
@ -3798,6 +3796,8 @@ static int gaudi2_sw_init(struct hl_device *hdev)
|
||||
if (rc)
|
||||
goto special_blocks_free;
|
||||
|
||||
hdev->heartbeat_debug_info.cpu_queue_id = GAUDI2_QUEUE_ID_CPU_PQ;
|
||||
|
||||
return 0;
|
||||
|
||||
special_blocks_free:
|
||||
@ -6314,26 +6314,6 @@ static void gaudi2_execute_hard_reset(struct hl_device *hdev)
|
||||
WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1);
|
||||
}
|
||||
|
||||
static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_timeout_us)
|
||||
{
|
||||
int i, rc = 0;
|
||||
u32 reg_val;
|
||||
|
||||
for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++)
|
||||
rc = hl_poll_timeout(
|
||||
hdev,
|
||||
mmCPU_RST_STATUS_TO_HOST,
|
||||
reg_val,
|
||||
reg_val == CPU_RST_STATUS_SOFT_RST_DONE,
|
||||
1000,
|
||||
poll_timeout_us);
|
||||
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Timeout while waiting for FW to complete soft reset (0x%x)\n",
|
||||
reg_val);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* gaudi2_execute_soft_reset - execute soft reset by driver/FW
|
||||
*
|
||||
@ -6346,23 +6326,8 @@ static int gaudi2_get_soft_rst_done_indication(struct hl_device *hdev, u32 poll_
|
||||
static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset,
|
||||
u32 poll_timeout_us)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!driver_performs_reset) {
|
||||
if (hl_is_fw_sw_ver_below(hdev, 1, 10)) {
|
||||
/* set SP to indicate reset request sent to FW */
|
||||
WREG32(mmCPU_RST_STATUS_TO_HOST, CPU_RST_STATUS_NA);
|
||||
|
||||
WREG32(mmGIC_HOST_SOFT_RST_IRQ_POLL_REG,
|
||||
gaudi2_irq_map_table[GAUDI2_EVENT_CPU_SOFT_RESET].cpu_id);
|
||||
|
||||
/* wait for f/w response */
|
||||
rc = gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
|
||||
} else {
|
||||
rc = hl_fw_send_soft_reset(hdev);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
if (!driver_performs_reset)
|
||||
return hl_fw_send_soft_reset(hdev);
|
||||
|
||||
/* Block access to engines, QMANs and SM during reset, these
|
||||
* RRs will be reconfigured after soft reset.
|
||||
@ -6502,13 +6467,7 @@ static int gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
|
||||
|
||||
static int gaudi2_suspend(struct hl_device *hdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
|
||||
|
||||
return rc;
|
||||
return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
|
||||
}
|
||||
|
||||
static int gaudi2_resume(struct hl_device *hdev)
|
||||
@ -7914,7 +7873,7 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
|
||||
bool has_block_id = false;
|
||||
u16 block_id;
|
||||
|
||||
if (!hl_is_fw_sw_ver_below(hdev, 1, 12))
|
||||
if (hl_fw_version_cmp(hdev, 1, 12, 0) >= 0)
|
||||
has_block_id = true;
|
||||
|
||||
ecc_address = le64_to_cpu(ecc_data->ecc_address);
|
||||
@ -8165,13 +8124,7 @@ static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
|
||||
}
|
||||
|
||||
hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
|
||||
|
||||
if (hl_is_fw_sw_ver_below(hdev, 1, 9) &&
|
||||
!hdev->asic_prop.fw_security_enabled &&
|
||||
((module_idx == 0) || (module_idx == 1)))
|
||||
lbw_rtr_id = DCORE0_RTR0;
|
||||
else
|
||||
lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
|
||||
lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
|
||||
break;
|
||||
case RAZWI_MME:
|
||||
sprintf(initiator_name, "MME_%u", module_idx);
|
||||
@ -9310,8 +9263,8 @@ static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type,
|
||||
static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
|
||||
struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt)
|
||||
{
|
||||
bool require_hard_reset = false;
|
||||
u32 addr, beat, beat_shift;
|
||||
bool rc = false;
|
||||
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
"READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n",
|
||||
@ -9343,7 +9296,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
|
||||
beat,
|
||||
le32_to_cpu(rd_err_data->dbg_rd_err_dm),
|
||||
le32_to_cpu(rd_err_data->dbg_rd_err_syndrome));
|
||||
rc |= true;
|
||||
require_hard_reset = true;
|
||||
}
|
||||
|
||||
beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT;
|
||||
@ -9356,7 +9309,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
|
||||
(le32_to_cpu(rd_err_data->dbg_rd_err_misc) &
|
||||
(HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >>
|
||||
(HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift));
|
||||
rc |= true;
|
||||
require_hard_reset = true;
|
||||
}
|
||||
|
||||
dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n", beat);
|
||||
@ -9366,7 +9319,7 @@ static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev,
|
||||
le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1]));
|
||||
}
|
||||
|
||||
return rc;
|
||||
return require_hard_reset;
|
||||
}
|
||||
|
||||
static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev,
|
||||
@ -9824,11 +9777,6 @@ static u16 event_id_to_engine_id(struct hl_device *hdev, u16 event_type)
|
||||
return U16_MAX;
|
||||
}
|
||||
|
||||
static void hl_eq_heartbeat_event_handle(struct hl_device *hdev)
|
||||
{
|
||||
hdev->eq_heartbeat_received = true;
|
||||
}
|
||||
|
||||
static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
|
||||
{
|
||||
struct gaudi2_device *gaudi2 = hdev->asic_specific;
|
||||
@ -10050,6 +9998,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
|
||||
if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, &eq_entry->sei_data)) {
|
||||
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
|
||||
reset_required = true;
|
||||
is_critical = eq_entry->sei_data.hdr.is_critical;
|
||||
}
|
||||
error_count++;
|
||||
break;
|
||||
@ -10070,7 +10019,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
|
||||
error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
|
||||
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
|
||||
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
|
||||
if (hl_is_fw_sw_ver_equal_or_greater(hdev, 1, 13))
|
||||
if (hl_fw_version_cmp(hdev, 1, 13, 0) >= 0)
|
||||
is_critical = true;
|
||||
break;
|
||||
|
||||
@ -10281,8 +10230,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
|
||||
gaudi2_print_event(hdev, event_type, true,
|
||||
"No error cause for H/W event %u", event_type);
|
||||
|
||||
if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) ||
|
||||
reset_required) {
|
||||
if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) || reset_required) {
|
||||
if (reset_required ||
|
||||
(gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD))
|
||||
reset_flags |= HL_DRV_RESET_HARD;
|
||||
|
@ -384,7 +384,7 @@ enum gaudi2_edma_id {
|
||||
/* User interrupt count is aligned with HW CQ count.
|
||||
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
|
||||
*/
|
||||
#define GAUDI2_NUM_USER_INTERRUPTS 255
|
||||
#define GAUDI2_NUM_USER_INTERRUPTS 64
|
||||
#define GAUDI2_NUM_RESERVED_INTERRUPTS 1
|
||||
#define GAUDI2_TOTAL_USER_INTERRUPTS (GAUDI2_NUM_USER_INTERRUPTS + GAUDI2_NUM_RESERVED_INTERRUPTS)
|
||||
|
||||
@ -416,11 +416,11 @@ enum gaudi2_irq_num {
|
||||
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
|
||||
GAUDI2_IRQ_NUM_TPC_ASSERT,
|
||||
GAUDI2_IRQ_NUM_EQ_ERROR,
|
||||
GAUDI2_IRQ_NUM_RESERVED_FIRST,
|
||||
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_TOTAL_USER_INTERRUPTS - 1),
|
||||
GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
|
||||
GAUDI2_IRQ_NUM_USER_FIRST = GAUDI2_IRQ_NUM_UNEXPECTED_ERROR + 1,
|
||||
GAUDI2_IRQ_NUM_USER_FIRST,
|
||||
GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
|
||||
GAUDI2_IRQ_NUM_RESERVED_FIRST,
|
||||
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_RESERVED_INTERRUPTS - 1),
|
||||
GAUDI2_IRQ_NUM_UNEXPECTED_ERROR = RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT,
|
||||
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
|
||||
};
|
||||
|
||||
|
@ -479,6 +479,7 @@ static const u32 gaudi2_pb_dcr0_edma0_unsecured_regs[] = {
|
||||
mmDCORE0_EDMA0_CORE_CTX_TE_NUMROWS,
|
||||
mmDCORE0_EDMA0_CORE_CTX_IDX,
|
||||
mmDCORE0_EDMA0_CORE_CTX_IDX_INC,
|
||||
mmDCORE0_EDMA0_CORE_WR_COMP_MAX_OUTSTAND,
|
||||
mmDCORE0_EDMA0_CORE_RD_LBW_RATE_LIM_CFG,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_0,
|
||||
mmDCORE0_EDMA0_QM_CQ_CFG0_1,
|
||||
|
@ -893,11 +893,8 @@ int goya_late_init(struct hl_device *hdev)
|
||||
WREG32(mmMMU_LOG2_DDR_SIZE, ilog2(prop->dram_size));
|
||||
|
||||
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_ENABLE_PCI_ACCESS, 0x0);
|
||||
if (rc) {
|
||||
dev_err(hdev->dev,
|
||||
"Failed to enable PCI access from CPU %d\n", rc);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* force setting to low frequency */
|
||||
goya->curr_pll_profile = PLL_LOW;
|
||||
@ -2864,13 +2861,7 @@ static int goya_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset)
|
||||
|
||||
int goya_suspend(struct hl_device *hdev)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
|
||||
if (rc)
|
||||
dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
|
||||
|
||||
return rc;
|
||||
return hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
|
||||
}
|
||||
|
||||
int goya_resume(struct hl_device *hdev)
|
||||
|
@ -63,9 +63,9 @@
|
||||
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START 0xFFF0F80000000000ull
|
||||
#define RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END 0xFFF0FFFFFFFFFFFFull
|
||||
|
||||
#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 256
|
||||
#define RESERVED_MSIX_UNEXPECTED_USER_ERROR_INTERRUPT 127
|
||||
|
||||
#define GAUDI2_MSIX_ENTRIES 512
|
||||
#define GAUDI2_MSIX_ENTRIES 128
|
||||
|
||||
#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */
|
||||
|
||||
|
@ -330,9 +330,9 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
{ .fc_id = 149, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "EDMA7_ECC_SERR" },
|
||||
{ .fc_id = 150, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "HDMA4_ECC_SERR" },
|
||||
.name = "EDMA4_ECC_SERR" },
|
||||
{ .fc_id = 151, .cpu_id = 48, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "HDMA5_ECC_SERR" },
|
||||
.name = "EDMA5_ECC_SERR" },
|
||||
{ .fc_id = 152, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "EDMA2_ECC_DERR" },
|
||||
{ .fc_id = 153, .cpu_id = 49, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
@ -856,55 +856,55 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
{ .fc_id = 412, .cpu_id = 84, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "PCIE_ADDR_DEC_ERR" },
|
||||
{ .fc_id = 413, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC0_AXI_ERR_RSP" },
|
||||
.name = "DCORE0_TPC0_AXI_ERR_RSP" },
|
||||
{ .fc_id = 414, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC1_AXI_ERR_RSP" },
|
||||
.name = "DCORE0_TPC1_AXI_ERR_RSP" },
|
||||
{ .fc_id = 415, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC2_AXI_ERR_RSP" },
|
||||
.name = "DCORE0_TPC2_AXI_ERR_RSP" },
|
||||
{ .fc_id = 416, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC3_AXI_ERR_RSP" },
|
||||
.name = "DCORE0_TPC3_AXI_ERR_RSP" },
|
||||
{ .fc_id = 417, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC4_AXI_ERR_RSP" },
|
||||
.name = "DCORE0_TPC4_AXI_ERR_RSP" },
|
||||
{ .fc_id = 418, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC5_AXI_ERR_RSP" },
|
||||
.name = "DCORE0_TPC5_AXI_ERR_RSP" },
|
||||
{ .fc_id = 419, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC6_AXI_ERR_RSP" },
|
||||
.name = "DCORE1_TPC0_AXI_ERR_RSP" },
|
||||
{ .fc_id = 420, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC7_AXI_ERR_RSP" },
|
||||
.name = "DCORE1_TPC1_AXI_ERR_RSP" },
|
||||
{ .fc_id = 421, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC8_AXI_ERR_RSP" },
|
||||
.name = "DCORE1_TPC2_AXI_ERR_RSP" },
|
||||
{ .fc_id = 422, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC9_AXI_ERR_RSP" },
|
||||
.name = "DCORE1_TPC3_AXI_ERR_RSP" },
|
||||
{ .fc_id = 423, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC10_AXI_ERR_RSP" },
|
||||
.name = "DCORE1_TPC4_AXI_ERR_RSP" },
|
||||
{ .fc_id = 424, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC11_AXI_ERR_RSP" },
|
||||
.name = "DCORE1_TPC5_AXI_ERR_RSP" },
|
||||
{ .fc_id = 425, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC12_AXI_ERR_RSP" },
|
||||
.name = "DCORE2_TPC0_AXI_ERR_RSP" },
|
||||
{ .fc_id = 426, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC13_AXI_ERR_RSP" },
|
||||
.name = "DCORE2_TPC1_AXI_ERR_RSP" },
|
||||
{ .fc_id = 427, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC14_AXI_ERR_RSP" },
|
||||
.name = "DCORE2_TPC2_AXI_ERR_RSP" },
|
||||
{ .fc_id = 428, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC15_AXI_ERR_RSP" },
|
||||
.name = "DCORE2_TPC3_AXI_ERR_RSP" },
|
||||
{ .fc_id = 429, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC16_AXI_ERR_RSP" },
|
||||
.name = "DCORE2_TPC4_AXI_ERR_RSP" },
|
||||
{ .fc_id = 430, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC17_AXI_ERR_RSP" },
|
||||
.name = "DCORE2_TPC5_AXI_ERR_RSP" },
|
||||
{ .fc_id = 431, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC18_AXI_ERR_RSP" },
|
||||
.name = "DCORE3_TPC0_AXI_ERR_RSP" },
|
||||
{ .fc_id = 432, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC19_AXI_ERR_RSP" },
|
||||
.name = "DCORE3_TPC1_AXI_ERR_RSP" },
|
||||
{ .fc_id = 433, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC20_AXI_ERR_RSP" },
|
||||
.name = "DCORE3_TPC2_AXI_ERR_RSP" },
|
||||
{ .fc_id = 434, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC21_AXI_ERR_RSP" },
|
||||
.name = "DCORE3_TPC3_AXI_ERR_RSP" },
|
||||
{ .fc_id = 435, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC22_AXI_ERR_RSP" },
|
||||
.name = "DCORE3_TPC4_AXI_ERR_RSP" },
|
||||
{ .fc_id = 436, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC23_AXI_ERR_RSP" },
|
||||
.name = "DCORE3_TPC5_AXI_ERR_RSP" },
|
||||
{ .fc_id = 437, .cpu_id = 85, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC24_AXI_ERR_RSP" },
|
||||
.name = "DCORE4_TPC0_AXI_ERR_RSP" },
|
||||
{ .fc_id = 438, .cpu_id = 86, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "AXI_ECC" },
|
||||
{ .fc_id = 439, .cpu_id = 87, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
@ -965,73 +965,73 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
.name = "MME3_CTRL_AXI_ERROR_RESPONSE" },
|
||||
{ .fc_id = 467, .cpu_id = 91, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "MME3_QMAN_SW_ERROR" },
|
||||
{ .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 468, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "PSOC_MME_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 469, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "PSOC_CPU_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 470, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_TPC_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 471, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_NIC_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 472, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_XBAR_MMU_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 473, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_XBAR_DMA_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 474, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_XBAR_IF_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 475, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_XBAR_BANK_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 476, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_XBAR_MMU_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 477, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_XBAR_DMA_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 478, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_XBAR_IF_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 479, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_XBAR_MESH_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 480, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_TPC_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 481, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_NIC_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 482, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "PMMU_MME_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 483, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_TPC_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 484, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_PCI_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 485, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_XBAR_MMU_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 486, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_XBAR_DMA_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 487, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_XBAR_IF_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 488, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_XBAR_MESH_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 489, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_XBAR_MMU_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 490, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_XBAR_DMA_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 491, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_XBAR_IF_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 492, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_XBAR_BANK_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 493, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_TPC_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 494, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "PSOC_VID_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 495, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "PMMU_VID_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 496, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE3_HBM_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 497, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_XBAR_HBM_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 498, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE1_HBM_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 499, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE0_HBM_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 500, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_XBAR_HBM_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
{ .fc_id = 501, .cpu_id = 92, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DCORE2_HBM_PLL_LOCK_ERR" },
|
||||
{ .fc_id = 502, .cpu_id = 93, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "CPU_AXI_ERR_RSP" },
|
||||
@ -1298,103 +1298,103 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
{ .fc_id = 633, .cpu_id = 130, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC0_BMON_SPMU" },
|
||||
{ .fc_id = 634, .cpu_id = 131, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC0_KERNEL_ERR" },
|
||||
.name = "DCORE0_TPC0_KERNEL_ERR" },
|
||||
{ .fc_id = 635, .cpu_id = 132, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC1_BMON_SPMU" },
|
||||
{ .fc_id = 636, .cpu_id = 133, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC1_KERNEL_ERR" },
|
||||
.name = "DCORE0_TPC1_KERNEL_ERR" },
|
||||
{ .fc_id = 637, .cpu_id = 134, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC2_BMON_SPMU" },
|
||||
{ .fc_id = 638, .cpu_id = 135, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC2_KERNEL_ERR" },
|
||||
.name = "DCORE0_TPC2_KERNEL_ERR" },
|
||||
{ .fc_id = 639, .cpu_id = 136, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC3_BMON_SPMU" },
|
||||
{ .fc_id = 640, .cpu_id = 137, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC3_KERNEL_ERR" },
|
||||
.name = "DCORE0_TPC3_KERNEL_ERR" },
|
||||
{ .fc_id = 641, .cpu_id = 138, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC4_BMON_SPMU" },
|
||||
{ .fc_id = 642, .cpu_id = 139, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC4_KERNEL_ERR" },
|
||||
.name = "DCORE0_TPC4_KERNEL_ERR" },
|
||||
{ .fc_id = 643, .cpu_id = 140, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC5_BMON_SPMU" },
|
||||
{ .fc_id = 644, .cpu_id = 141, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC5_KERNEL_ERR" },
|
||||
.name = "DCORE0_TPC5_KERNEL_ERR" },
|
||||
{ .fc_id = 645, .cpu_id = 150, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC6_BMON_SPMU" },
|
||||
{ .fc_id = 646, .cpu_id = 151, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC6_KERNEL_ERR" },
|
||||
.name = "DCORE1_TPC0_KERNEL_ERR" },
|
||||
{ .fc_id = 647, .cpu_id = 152, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC7_BMON_SPMU" },
|
||||
{ .fc_id = 648, .cpu_id = 153, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC7_KERNEL_ERR" },
|
||||
.name = "DCORE1_TPC1_KERNEL_ERR" },
|
||||
{ .fc_id = 649, .cpu_id = 146, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC8_BMON_SPMU" },
|
||||
{ .fc_id = 650, .cpu_id = 147, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC8_KERNEL_ERR" },
|
||||
.name = "DCORE1_TPC2_KERNEL_ERR" },
|
||||
{ .fc_id = 651, .cpu_id = 148, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC9_BMON_SPMU" },
|
||||
{ .fc_id = 652, .cpu_id = 149, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC9_KERNEL_ERR" },
|
||||
.name = "DCORE1_TPC3_KERNEL_ERR" },
|
||||
{ .fc_id = 653, .cpu_id = 142, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC10_BMON_SPMU" },
|
||||
{ .fc_id = 654, .cpu_id = 143, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC10_KERNEL_ERR" },
|
||||
.name = "DCORE1_TPC4_KERNEL_ERR" },
|
||||
{ .fc_id = 655, .cpu_id = 144, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC11_BMON_SPMU" },
|
||||
{ .fc_id = 656, .cpu_id = 145, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC11_KERNEL_ERR" },
|
||||
.name = "DCORE1_TPC5_KERNEL_ERR" },
|
||||
{ .fc_id = 657, .cpu_id = 162, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC12_BMON_SPMU" },
|
||||
{ .fc_id = 658, .cpu_id = 163, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC12_KERNEL_ERR" },
|
||||
.name = "DCORE2_TPC0_KERNEL_ERR" },
|
||||
{ .fc_id = 659, .cpu_id = 164, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC13_BMON_SPMU" },
|
||||
{ .fc_id = 660, .cpu_id = 165, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC13_KERNEL_ERR" },
|
||||
.name = "DCORE2_TPC1_KERNEL_ERR" },
|
||||
{ .fc_id = 661, .cpu_id = 158, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC14_BMON_SPMU" },
|
||||
{ .fc_id = 662, .cpu_id = 159, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC14_KERNEL_ERR" },
|
||||
.name = "DCORE2_TPC2_KERNEL_ERR" },
|
||||
{ .fc_id = 663, .cpu_id = 160, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC15_BMON_SPMU" },
|
||||
{ .fc_id = 664, .cpu_id = 161, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC15_KERNEL_ERR" },
|
||||
.name = "DCORE2_TPC3_KERNEL_ERR" },
|
||||
{ .fc_id = 665, .cpu_id = 154, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC16_BMON_SPMU" },
|
||||
{ .fc_id = 666, .cpu_id = 155, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC16_KERNEL_ERR" },
|
||||
.name = "DCORE2_TPC4_KERNEL_ERR" },
|
||||
{ .fc_id = 667, .cpu_id = 156, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC17_BMON_SPMU" },
|
||||
{ .fc_id = 668, .cpu_id = 157, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC17_KERNEL_ERR" },
|
||||
.name = "DCORE2_TPC5_KERNEL_ERR" },
|
||||
{ .fc_id = 669, .cpu_id = 166, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC18_BMON_SPMU" },
|
||||
{ .fc_id = 670, .cpu_id = 167, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC18_KERNEL_ERR" },
|
||||
.name = "DCORE3_TPC0_KERNEL_ERR" },
|
||||
{ .fc_id = 671, .cpu_id = 168, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC19_BMON_SPMU" },
|
||||
{ .fc_id = 672, .cpu_id = 169, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC19_KERNEL_ERR" },
|
||||
.name = "DCORE3_TPC1_KERNEL_ERR" },
|
||||
{ .fc_id = 673, .cpu_id = 170, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC20_BMON_SPMU" },
|
||||
{ .fc_id = 674, .cpu_id = 171, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC20_KERNEL_ERR" },
|
||||
.name = "DCORE3_TPC2_KERNEL_ERR" },
|
||||
{ .fc_id = 675, .cpu_id = 172, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC21_BMON_SPMU" },
|
||||
{ .fc_id = 676, .cpu_id = 173, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC21_KERNEL_ERR" },
|
||||
.name = "DCORE3_TPC3_KERNEL_ERR" },
|
||||
{ .fc_id = 677, .cpu_id = 174, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC22_BMON_SPMU" },
|
||||
{ .fc_id = 678, .cpu_id = 175, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC22_KERNEL_ERR" },
|
||||
.name = "DCORE3_TPC4_KERNEL_ERR" },
|
||||
{ .fc_id = 679, .cpu_id = 176, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC23_BMON_SPMU" },
|
||||
{ .fc_id = 680, .cpu_id = 177, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC23_KERNEL_ERR" },
|
||||
.name = "DCORE3_TPC5_KERNEL_ERR" },
|
||||
{ .fc_id = 681, .cpu_id = 178, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "TPC24_BMON_SPMU" },
|
||||
{ .fc_id = 682, .cpu_id = 179, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC24_KERNEL_ERR" },
|
||||
.name = "DCORE4_TPC0_KERNEL_ERR" },
|
||||
{ .fc_id = 683, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "" },
|
||||
{ .fc_id = 684, .cpu_id = 180, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
@ -1827,8 +1827,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
.name = "DEC0_BMON_SPMU" },
|
||||
{ .fc_id = 898, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "DEC1_SPI" },
|
||||
{ .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "DEC1_SPI" },
|
||||
{ .fc_id = 899, .cpu_id = 330, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "DEC1_BMON_SPMU" },
|
||||
{ .fc_id = 900, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "DEC2_SPI" },
|
||||
{ .fc_id = 901, .cpu_id = 331, .valid = 1, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
@ -2377,8 +2377,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
.name = "" },
|
||||
{ .fc_id = 1173, .cpu_id = 479, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "" },
|
||||
{ .fc_id = 1174, .cpu_id = 480, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "" },
|
||||
{ .fc_id = 1174, .cpu_id = 480, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "PSOC_DMA_QM" },
|
||||
{ .fc_id = 1175, .cpu_id = 481, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "" },
|
||||
{ .fc_id = 1176, .cpu_id = 482, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
@ -2442,55 +2442,55 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
{ .fc_id = 1205, .cpu_id = 511, .valid = 0, .msg = 0, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "" },
|
||||
{ .fc_id = 1206, .cpu_id = 512, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC0_QM" },
|
||||
.name = "DCORE0_TPC0_QM" },
|
||||
{ .fc_id = 1207, .cpu_id = 513, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC1_QM" },
|
||||
.name = "DCORE0_TPC1_QM" },
|
||||
{ .fc_id = 1208, .cpu_id = 514, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC2_QM" },
|
||||
.name = "DCORE0_TPC2_QM" },
|
||||
{ .fc_id = 1209, .cpu_id = 515, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC3_QM" },
|
||||
.name = "DCORE0_TPC3_QM" },
|
||||
{ .fc_id = 1210, .cpu_id = 516, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC4_QM" },
|
||||
.name = "DCORE0_TPC4_QM" },
|
||||
{ .fc_id = 1211, .cpu_id = 517, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC5_QM" },
|
||||
.name = "DCORE0_TPC5_QM" },
|
||||
{ .fc_id = 1212, .cpu_id = 518, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC6_QM" },
|
||||
.name = "DCORE1_TPC0_QM" },
|
||||
{ .fc_id = 1213, .cpu_id = 519, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC7_QM" },
|
||||
.name = "DCORE1_TPC1_QM" },
|
||||
{ .fc_id = 1214, .cpu_id = 520, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC8_QM" },
|
||||
.name = "DCORE1_TPC2_QM" },
|
||||
{ .fc_id = 1215, .cpu_id = 521, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC9_QM" },
|
||||
.name = "DCORE1_TPC3_QM" },
|
||||
{ .fc_id = 1216, .cpu_id = 522, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC10_QM" },
|
||||
.name = "DCORE1_TPC4_QM" },
|
||||
{ .fc_id = 1217, .cpu_id = 523, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC11_QM" },
|
||||
.name = "DCORE1_TPC5_QM" },
|
||||
{ .fc_id = 1218, .cpu_id = 524, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC12_QM" },
|
||||
.name = "DCORE2_TPC0_QM" },
|
||||
{ .fc_id = 1219, .cpu_id = 525, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC13_QM" },
|
||||
.name = "DCORE2_TPC1_QM" },
|
||||
{ .fc_id = 1220, .cpu_id = 526, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC14_QM" },
|
||||
.name = "DCORE2_TPC2_QM" },
|
||||
{ .fc_id = 1221, .cpu_id = 527, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC15_QM" },
|
||||
.name = "DCORE2_TPC3_QM" },
|
||||
{ .fc_id = 1222, .cpu_id = 528, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC16_QM" },
|
||||
.name = "DCORE2_TPC4_QM" },
|
||||
{ .fc_id = 1223, .cpu_id = 529, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC17_QM" },
|
||||
.name = "DCORE2_TPC5_QM" },
|
||||
{ .fc_id = 1224, .cpu_id = 530, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC18_QM" },
|
||||
.name = "DCORE3_TPC0_QM" },
|
||||
{ .fc_id = 1225, .cpu_id = 531, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC19_QM" },
|
||||
.name = "DCORE3_TPC1_QM" },
|
||||
{ .fc_id = 1226, .cpu_id = 532, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC20_QM" },
|
||||
.name = "DCORE3_TPC2_QM" },
|
||||
{ .fc_id = 1227, .cpu_id = 533, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC21_QM" },
|
||||
.name = "DCORE3_TPC3_QM" },
|
||||
{ .fc_id = 1228, .cpu_id = 534, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC22_QM" },
|
||||
.name = "DCORE3_TPC4_QM" },
|
||||
{ .fc_id = 1229, .cpu_id = 535, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC23_QM" },
|
||||
.name = "DCORE3_TPC5_QM" },
|
||||
{ .fc_id = 1230, .cpu_id = 536, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
.name = "TPC24_QM" },
|
||||
.name = "DCORE4_TPC0_QM" },
|
||||
{ .fc_id = 1231, .cpu_id = 537, .valid = 0, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "" },
|
||||
{ .fc_id = 1232, .cpu_id = 538, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_COMPUTE,
|
||||
@ -2674,19 +2674,19 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
|
||||
{ .fc_id = 1321, .cpu_id = 627, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_HARD,
|
||||
.name = "DEV_RESET_REQ" },
|
||||
{ .fc_id = 1322, .cpu_id = 628, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_PWR_BRK_ENTRY" },
|
||||
.name = "PWR_BRK_ENTRY" },
|
||||
{ .fc_id = 1323, .cpu_id = 629, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_PWR_BRK_EXT" },
|
||||
.name = "PWR_BRK_EXT" },
|
||||
{ .fc_id = 1324, .cpu_id = 630, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_PWR_RD_MODE0" },
|
||||
.name = "PWR_RD_MODE0" },
|
||||
{ .fc_id = 1325, .cpu_id = 631, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_PWR_RD_MODE1" },
|
||||
.name = "PWR_RD_MODE1" },
|
||||
{ .fc_id = 1326, .cpu_id = 632, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_PWR_RD_MODE2" },
|
||||
.name = "PWR_RD_MODE2" },
|
||||
{ .fc_id = 1327, .cpu_id = 633, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_PWR_RD_MODE3" },
|
||||
.name = "PWR_RD_MODE3" },
|
||||
{ .fc_id = 1328, .cpu_id = 634, .valid = 1, .msg = 1, .reset = EVENT_RESET_TYPE_NONE,
|
||||
.name = "ARC_EQ_HEARTBEAT" },
|
||||
.name = "EQ_HEARTBEAT" },
|
||||
};
|
||||
|
||||
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
|
||||
|
@ -45,6 +45,13 @@
|
||||
#define GAUDI2_ARM_RX_MB_OFFSET (GAUDI2_ARM_RX_MB_ADDR - \
|
||||
GAUDI2_SP_SRAM_BASE_ADDR)
|
||||
|
||||
#define POWER_MODE_LEVELS { \
|
||||
150000, /* 00 */ \
|
||||
250000, /* 01 */ \
|
||||
400000, /* 10 */ \
|
||||
/* 11: Normal mode */ \
|
||||
}
|
||||
|
||||
enum gaudi2_fw_status {
|
||||
GAUDI2_PID_STATUS_UP = 0x1, /* PID on ARC0 is up */
|
||||
GAUDI2_ARM_STATUS_UP = 0x2, /* ARM Linux Boot complete */
|
||||
@ -52,26 +59,6 @@ enum gaudi2_fw_status {
|
||||
GAUDI2_STATUS_LAST = 0xFF
|
||||
};
|
||||
|
||||
struct gaudi2_cold_rst_data {
|
||||
union {
|
||||
struct {
|
||||
u32 recovery_flag: 1;
|
||||
u32 validation_flag: 1;
|
||||
u32 efuse_read_flag: 1;
|
||||
u32 spsram_init_done : 1;
|
||||
u32 fake_security_enable : 1;
|
||||
u32 fake_sig_validation_en : 1;
|
||||
u32 bist_skip_enable : 1;
|
||||
u32 reserved1 : 1;
|
||||
u32 fake_bis_compliant : 1;
|
||||
u32 wd_rst_cause_arm : 1;
|
||||
u32 wd_rst_cause_arcpid : 1;
|
||||
u32 reserved : 21;
|
||||
};
|
||||
__le32 data;
|
||||
};
|
||||
};
|
||||
|
||||
enum gaudi2_rst_src {
|
||||
HL_COLD_RST = 1,
|
||||
HL_MANUAL_RST = 2,
|
||||
|
@ -58,4 +58,12 @@
|
||||
#define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT
|
||||
#define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER
|
||||
|
||||
/* Registers below are used to pass the boot_if data between ARM and ARC1 */
|
||||
#define mmARM_MSG_BOOT_ERR_SET mmCPU_IF_SPECIAL_GLBL_SPARE_0
|
||||
#define mmARM_MSG_BOOT_ERR_CLR mmCPU_IF_SPECIAL_GLBL_SPARE_1
|
||||
#define mmARM_MSG_BOOT_DEV_STS_SET mmCPU_IF_SPECIAL_GLBL_SPARE_2
|
||||
#define mmARM_MSG_BOOT_DEV_STS_CLR mmCPU_IF_SPECIAL_GLBL_SPARE_3
|
||||
#define mmMGMT_MSG_BOOT_ERR mmCPU_MSTR_IF_SPECIAL_GLBL_SPARE_0
|
||||
#define mmMGMT_MSG_BOOT_DEV_STS mmCPU_MSTR_IF_SPECIAL_GLBL_SPARE_1
|
||||
|
||||
#endif /* GAUDI2_REG_MAP_H_ */
|
||||
|
@ -25,7 +25,8 @@ enum hl_revision_id {
|
||||
REV_ID_INVALID = 0x00,
|
||||
REV_ID_A = 0x01,
|
||||
REV_ID_B = 0x02,
|
||||
REV_ID_C = 0x03
|
||||
REV_ID_C = 0x03,
|
||||
REV_ID_D = 0x04
|
||||
};
|
||||
|
||||
#endif /* INCLUDE_PCI_GENERAL_H_ */
|
||||
|
@ -1,19 +1,22 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
# Copyright (C) 2023 Intel Corporation
|
||||
# Copyright (C) 2023-2024 Intel Corporation
|
||||
|
||||
intel_vpu-y := \
|
||||
ivpu_drv.o \
|
||||
ivpu_fw.o \
|
||||
ivpu_fw_log.o \
|
||||
ivpu_gem.o \
|
||||
ivpu_hw_37xx.o \
|
||||
ivpu_hw_40xx.o \
|
||||
ivpu_hw.o \
|
||||
ivpu_hw_btrs.o \
|
||||
ivpu_hw_ip.o \
|
||||
ivpu_ipc.o \
|
||||
ivpu_job.o \
|
||||
ivpu_jsm_msg.o \
|
||||
ivpu_mmu.o \
|
||||
ivpu_mmu_context.o \
|
||||
ivpu_pm.o
|
||||
ivpu_ms.o \
|
||||
ivpu_pm.o \
|
||||
ivpu_sysfs.o
|
||||
|
||||
intel_vpu-$(CONFIG_DEBUG_FS) += ivpu_debugfs.o
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
@ -145,6 +145,30 @@ static const struct file_operations dvfs_mode_fops = {
|
||||
.write = dvfs_mode_fops_write,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
fw_dyndbg_fops_write(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
|
||||
{
|
||||
struct ivpu_device *vdev = file->private_data;
|
||||
char buffer[VPU_DYNDBG_CMD_MAX_LEN] = {};
|
||||
int ret;
|
||||
|
||||
if (size >= VPU_DYNDBG_CMD_MAX_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
ret = strncpy_from_user(buffer, user_buf, size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ivpu_jsm_dyndbg_control(vdev, buffer, size);
|
||||
return size;
|
||||
}
|
||||
|
||||
static const struct file_operations fw_dyndbg_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.write = fw_dyndbg_fops_write,
|
||||
};
|
||||
|
||||
static int fw_log_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct ivpu_device *vdev = s->private;
|
||||
@ -335,6 +359,61 @@ static const struct file_operations ivpu_reset_engine_fops = {
|
||||
.write = ivpu_reset_engine_fn,
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
|
||||
{
|
||||
struct ivpu_device *vdev = file->private_data;
|
||||
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
|
||||
return -ENODEV;
|
||||
if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY))
|
||||
return -ENODEV;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static const struct file_operations ivpu_resume_engine_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.write = ivpu_resume_engine_fn,
|
||||
};
|
||||
|
||||
static int dct_active_get(void *data, u64 *active_percent)
|
||||
{
|
||||
struct ivpu_device *vdev = data;
|
||||
|
||||
*active_percent = vdev->pm->dct_active_percent;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dct_active_set(void *data, u64 active_percent)
|
||||
{
|
||||
struct ivpu_device *vdev = data;
|
||||
int ret;
|
||||
|
||||
if (active_percent > 100)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (active_percent)
|
||||
ret = ivpu_pm_dct_enable(vdev, active_percent);
|
||||
else
|
||||
ret = ivpu_pm_dct_disable(vdev);
|
||||
|
||||
ivpu_rpm_put(vdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
|
||||
|
||||
void ivpu_debugfs_init(struct ivpu_device *vdev)
|
||||
{
|
||||
struct dentry *debugfs_root = vdev->drm.debugfs_root;
|
||||
@ -347,6 +426,8 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
|
||||
debugfs_create_file("dvfs_mode", 0200, debugfs_root, vdev,
|
||||
&dvfs_mode_fops);
|
||||
|
||||
debugfs_create_file("fw_dyndbg", 0200, debugfs_root, vdev,
|
||||
&fw_dyndbg_fops);
|
||||
debugfs_create_file("fw_log", 0644, debugfs_root, vdev,
|
||||
&fw_log_fops);
|
||||
debugfs_create_file("fw_trace_destination_mask", 0200, debugfs_root, vdev,
|
||||
@ -358,8 +439,12 @@ void ivpu_debugfs_init(struct ivpu_device *vdev)
|
||||
|
||||
debugfs_create_file("reset_engine", 0200, debugfs_root, vdev,
|
||||
&ivpu_reset_engine_fops);
|
||||
debugfs_create_file("resume_engine", 0200, debugfs_root, vdev,
|
||||
&ivpu_resume_engine_fops);
|
||||
|
||||
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX)
|
||||
if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX) {
|
||||
debugfs_create_file("fw_profiling_freq_drive", 0200,
|
||||
debugfs_root, vdev, &fw_profiling_freq_fops);
|
||||
debugfs_create_file("dct", 0644, debugfs_root, vdev, &ivpu_dct_fops);
|
||||
}
|
||||
}
|
||||
|
@ -26,7 +26,9 @@
|
||||
#include "ivpu_jsm_msg.h"
|
||||
#include "ivpu_mmu.h"
|
||||
#include "ivpu_mmu_context.h"
|
||||
#include "ivpu_ms.h"
|
||||
#include "ivpu_pm.h"
|
||||
#include "ivpu_sysfs.h"
|
||||
|
||||
#ifndef DRIVER_VERSION_STR
|
||||
#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
|
||||
@ -51,10 +53,18 @@ u8 ivpu_pll_max_ratio = U8_MAX;
|
||||
module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
|
||||
MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
|
||||
|
||||
int ivpu_sched_mode;
|
||||
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
|
||||
MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
|
||||
|
||||
bool ivpu_disable_mmu_cont_pages;
|
||||
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
|
||||
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
|
||||
MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
|
||||
|
||||
bool ivpu_force_snoop;
|
||||
module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
|
||||
MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
|
||||
|
||||
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
@ -74,7 +84,6 @@ static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *fi
|
||||
ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
|
||||
|
||||
ivpu_cmdq_release_all_locked(file_priv);
|
||||
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
|
||||
ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
|
||||
ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
|
||||
file_priv->bound = false;
|
||||
@ -97,6 +106,7 @@ static void file_priv_release(struct kref *ref)
|
||||
mutex_unlock(&vdev->context_list_lock);
|
||||
pm_runtime_put_autosuspend(vdev->drm.dev);
|
||||
|
||||
mutex_destroy(&file_priv->ms_lock);
|
||||
mutex_destroy(&file_priv->lock);
|
||||
kfree(file_priv);
|
||||
}
|
||||
@ -119,7 +129,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
|
||||
{
|
||||
switch (args->index) {
|
||||
case DRM_IVPU_CAP_METRIC_STREAMER:
|
||||
args->value = 0;
|
||||
args->value = 1;
|
||||
break;
|
||||
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
|
||||
args->value = 1;
|
||||
@ -228,10 +238,13 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
|
||||
goto err_dev_exit;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&file_priv->ms_instance_list);
|
||||
|
||||
file_priv->vdev = vdev;
|
||||
file_priv->bound = true;
|
||||
kref_init(&file_priv->ref);
|
||||
mutex_init(&file_priv->lock);
|
||||
mutex_init(&file_priv->ms_lock);
|
||||
|
||||
mutex_lock(&vdev->context_list_lock);
|
||||
|
||||
@ -260,6 +273,7 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
|
||||
xa_erase_irq(&vdev->context_xa, ctx_id);
|
||||
err_unlock:
|
||||
mutex_unlock(&vdev->context_list_lock);
|
||||
mutex_destroy(&file_priv->ms_lock);
|
||||
mutex_destroy(&file_priv->lock);
|
||||
kfree(file_priv);
|
||||
err_dev_exit:
|
||||
@ -275,6 +289,7 @@ static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
|
||||
file_priv->ctx.id, current->comm, task_pid_nr(current));
|
||||
|
||||
ivpu_ms_cleanup(file_priv);
|
||||
ivpu_file_priv_put(&file_priv);
|
||||
}
|
||||
|
||||
@ -285,6 +300,10 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
|
||||
};
|
||||
|
||||
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
|
||||
@ -301,7 +320,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
|
||||
while (1) {
|
||||
ivpu_ipc_irq_handler(vdev, NULL);
|
||||
ivpu_ipc_irq_handler(vdev);
|
||||
ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
|
||||
if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
|
||||
break;
|
||||
@ -323,6 +342,21 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ivpu_hw_sched_init(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
|
||||
ret = ivpu_jsm_hws_setup_priority_bands(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ivpu_boot() - Start VPU firmware
|
||||
* @vdev: VPU device
|
||||
@ -356,6 +390,15 @@ int ivpu_boot(struct ivpu_device *vdev)
|
||||
enable_irq(vdev->irq);
|
||||
ivpu_hw_irq_enable(vdev);
|
||||
ivpu_ipc_enable(vdev);
|
||||
|
||||
if (ivpu_fw_is_cold_boot(vdev)) {
|
||||
ret = ivpu_pm_dct_init(vdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ivpu_hw_sched_init(vdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -408,11 +451,52 @@ static const struct drm_driver driver = {
|
||||
.minor = DRM_IVPU_DRIVER_MINOR,
|
||||
};
|
||||
|
||||
static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv;
|
||||
unsigned long ctx_id;
|
||||
|
||||
mutex_lock(&vdev->context_list_lock);
|
||||
|
||||
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
|
||||
if (!file_priv->has_mmu_faults || file_priv->aborted)
|
||||
continue;
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
ivpu_context_abort_locked(file_priv);
|
||||
file_priv->aborted = true;
|
||||
mutex_unlock(&file_priv->lock);
|
||||
}
|
||||
|
||||
mutex_unlock(&vdev->context_list_lock);
|
||||
}
|
||||
|
||||
static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
|
||||
{
|
||||
struct ivpu_device *vdev = arg;
|
||||
u8 irq_src;
|
||||
|
||||
return ivpu_ipc_irq_thread_handler(vdev);
|
||||
if (kfifo_is_empty(&vdev->hw->irq.fifo))
|
||||
return IRQ_NONE;
|
||||
|
||||
while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
|
||||
switch (irq_src) {
|
||||
case IVPU_HW_IRQ_SRC_IPC:
|
||||
ivpu_ipc_irq_thread_handler(vdev);
|
||||
break;
|
||||
case IVPU_HW_IRQ_SRC_MMU_EVTQ:
|
||||
ivpu_context_abort_invalid(vdev);
|
||||
break;
|
||||
case IVPU_HW_IRQ_SRC_DCT:
|
||||
ivpu_pm_dct_irq_thread_handler(vdev);
|
||||
break;
|
||||
default:
|
||||
ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int ivpu_irq_init(struct ivpu_device *vdev)
|
||||
@ -426,9 +510,11 @@ static int ivpu_irq_init(struct ivpu_device *vdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ivpu_irq_handlers_init(vdev);
|
||||
|
||||
vdev->irq = pci_irq_vector(pdev, 0);
|
||||
|
||||
ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
|
||||
ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
|
||||
ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
|
||||
@ -505,13 +591,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
||||
if (!vdev->pm)
|
||||
return -ENOMEM;
|
||||
|
||||
if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
|
||||
vdev->hw->ops = &ivpu_hw_40xx_ops;
|
||||
if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
|
||||
vdev->hw->dma_bits = 48;
|
||||
} else {
|
||||
vdev->hw->ops = &ivpu_hw_37xx_ops;
|
||||
else
|
||||
vdev->hw->dma_bits = 38;
|
||||
}
|
||||
|
||||
vdev->platform = IVPU_PLATFORM_INVALID;
|
||||
vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
|
||||
@ -540,7 +623,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
|
||||
goto err_xa_destroy;
|
||||
|
||||
/* Init basic HW info based on buttress registers which are accessible before power up */
|
||||
ret = ivpu_hw_info_init(vdev);
|
||||
ret = ivpu_hw_init(vdev);
|
||||
if (ret)
|
||||
goto err_xa_destroy;
|
||||
|
||||
@ -612,13 +695,14 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
|
||||
|
||||
static void ivpu_dev_fini(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
ivpu_pm_cancel_recovery(vdev);
|
||||
ivpu_pm_disable(vdev);
|
||||
ivpu_prepare_for_reset(vdev);
|
||||
ivpu_shutdown(vdev);
|
||||
|
||||
ivpu_jobs_abort_all(vdev);
|
||||
ivpu_ms_cleanup_all(vdev);
|
||||
ivpu_job_done_consumer_fini(vdev);
|
||||
ivpu_pm_cancel_recovery(vdev);
|
||||
ivpu_bo_unbind_all_user_contexts(vdev);
|
||||
|
||||
ivpu_ipc_fini(vdev);
|
||||
@ -658,6 +742,7 @@ static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
return ret;
|
||||
|
||||
ivpu_debugfs_init(vdev);
|
||||
ivpu_sysfs_init(vdev);
|
||||
|
||||
ret = drm_dev_register(&vdev->drm, 0);
|
||||
if (ret) {
|
||||
|
@ -27,8 +27,15 @@
|
||||
#define PCI_DEVICE_ID_ARL 0xad1d
|
||||
#define PCI_DEVICE_ID_LNL 0x643e
|
||||
|
||||
#define IVPU_HW_37XX 37
|
||||
#define IVPU_HW_40XX 40
|
||||
#define IVPU_HW_IP_37XX 37
|
||||
#define IVPU_HW_IP_40XX 40
|
||||
#define IVPU_HW_IP_50XX 50
|
||||
#define IVPU_HW_IP_60XX 60
|
||||
|
||||
#define IVPU_HW_IP_REV_LNL_B0 4
|
||||
|
||||
#define IVPU_HW_BTRS_MTL 1
|
||||
#define IVPU_HW_BTRS_LNL 2
|
||||
|
||||
#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
|
||||
/* SSID 1 is used by the VPU to represent reserved context */
|
||||
@ -39,7 +46,11 @@
|
||||
#define IVPU_MIN_DB 1
|
||||
#define IVPU_MAX_DB 255
|
||||
|
||||
#define IVPU_NUM_ENGINES 2
|
||||
#define IVPU_NUM_ENGINES 2
|
||||
#define IVPU_NUM_PRIORITIES 4
|
||||
#define IVPU_NUM_CMDQS_PER_CTX (IVPU_NUM_ENGINES * IVPU_NUM_PRIORITIES)
|
||||
|
||||
#define IVPU_CMDQ_INDEX(engine, priority) ((engine) * IVPU_NUM_PRIORITIES + (priority))
|
||||
|
||||
#define IVPU_PLATFORM_SILICON 0
|
||||
#define IVPU_PLATFORM_SIMICS 2
|
||||
@ -93,6 +104,7 @@ struct ivpu_wa_table {
|
||||
bool interrupt_clear_with_0;
|
||||
bool disable_clock_relinquish;
|
||||
bool disable_d0i3_msg;
|
||||
bool wp0_during_power_up;
|
||||
};
|
||||
|
||||
struct ivpu_hw_info;
|
||||
@ -131,11 +143,13 @@ struct ivpu_device {
|
||||
|
||||
atomic64_t unique_id_counter;
|
||||
|
||||
ktime_t busy_start_ts;
|
||||
ktime_t busy_time;
|
||||
|
||||
struct {
|
||||
int boot;
|
||||
int jsm;
|
||||
int tdr;
|
||||
int reschedule_suspend;
|
||||
int autosuspend;
|
||||
int d0i3_entry_msg;
|
||||
} timeout;
|
||||
@ -149,22 +163,31 @@ struct ivpu_file_priv {
|
||||
struct kref ref;
|
||||
struct ivpu_device *vdev;
|
||||
struct mutex lock; /* Protects cmdq */
|
||||
struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
|
||||
struct ivpu_cmdq *cmdq[IVPU_NUM_CMDQS_PER_CTX];
|
||||
struct ivpu_mmu_context ctx;
|
||||
struct mutex ms_lock; /* Protects ms_instance_list, ms_info_bo */
|
||||
struct list_head ms_instance_list;
|
||||
struct ivpu_bo *ms_info_bo;
|
||||
bool has_mmu_faults;
|
||||
bool bound;
|
||||
bool aborted;
|
||||
};
|
||||
|
||||
extern int ivpu_dbg_mask;
|
||||
extern u8 ivpu_pll_min_ratio;
|
||||
extern u8 ivpu_pll_max_ratio;
|
||||
extern int ivpu_sched_mode;
|
||||
extern bool ivpu_disable_mmu_cont_pages;
|
||||
extern bool ivpu_force_snoop;
|
||||
|
||||
#define IVPU_TEST_MODE_FW_TEST BIT(0)
|
||||
#define IVPU_TEST_MODE_NULL_HW BIT(1)
|
||||
#define IVPU_TEST_MODE_NULL_SUBMISSION BIT(2)
|
||||
#define IVPU_TEST_MODE_D0I3_MSG_DISABLE BIT(4)
|
||||
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
|
||||
#define IVPU_TEST_MODE_PREEMPTION_DISABLE BIT(6)
|
||||
#define IVPU_TEST_MODE_HWS_EXTRA_EVENTS BIT(7)
|
||||
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
|
||||
extern int ivpu_test_mode;
|
||||
|
||||
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
|
||||
@ -184,16 +207,32 @@ static inline u16 ivpu_device_id(struct ivpu_device *vdev)
|
||||
return to_pci_dev(vdev->drm.dev)->device;
|
||||
}
|
||||
|
||||
static inline int ivpu_hw_gen(struct ivpu_device *vdev)
|
||||
static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
|
||||
{
|
||||
switch (ivpu_device_id(vdev)) {
|
||||
case PCI_DEVICE_ID_MTL:
|
||||
case PCI_DEVICE_ID_ARL:
|
||||
return IVPU_HW_37XX;
|
||||
return IVPU_HW_IP_37XX;
|
||||
case PCI_DEVICE_ID_LNL:
|
||||
return IVPU_HW_40XX;
|
||||
return IVPU_HW_IP_40XX;
|
||||
default:
|
||||
ivpu_err(vdev, "Unknown NPU device\n");
|
||||
dump_stack();
|
||||
ivpu_err(vdev, "Unknown NPU IP generation\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
|
||||
{
|
||||
switch (ivpu_device_id(vdev)) {
|
||||
case PCI_DEVICE_ID_MTL:
|
||||
case PCI_DEVICE_ID_ARL:
|
||||
return IVPU_HW_BTRS_MTL;
|
||||
case PCI_DEVICE_ID_LNL:
|
||||
return IVPU_HW_BTRS_LNL;
|
||||
default:
|
||||
dump_stack();
|
||||
ivpu_err(vdev, "Unknown buttress generation\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -231,4 +270,9 @@ static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
|
||||
return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
|
||||
}
|
||||
|
||||
static inline bool ivpu_is_force_snoop_enabled(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_force_snoop;
|
||||
}
|
||||
|
||||
#endif /* __IVPU_DRV_H__ */
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
@ -44,6 +44,8 @@
|
||||
#define IVPU_FW_CHECK_API_VER_LT(vdev, fw_hdr, name, major, minor) \
|
||||
ivpu_fw_check_api_ver_lt(vdev, fw_hdr, #name, VPU_##name##_API_VER_INDEX, major, minor)
|
||||
|
||||
#define IVPU_FOCUS_PRESENT_TIMER_MS 1000
|
||||
|
||||
static char *ivpu_firmware;
|
||||
module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
|
||||
MODULE_PARM_DESC(firmware, "NPU firmware binary in /lib/firmware/..");
|
||||
@ -52,10 +54,10 @@ static struct {
|
||||
int gen;
|
||||
const char *name;
|
||||
} fw_names[] = {
|
||||
{ IVPU_HW_37XX, "vpu_37xx.bin" },
|
||||
{ IVPU_HW_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
|
||||
{ IVPU_HW_40XX, "vpu_40xx.bin" },
|
||||
{ IVPU_HW_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
|
||||
{ IVPU_HW_IP_37XX, "vpu_37xx.bin" },
|
||||
{ IVPU_HW_IP_37XX, "intel/vpu/vpu_37xx_v0.0.bin" },
|
||||
{ IVPU_HW_IP_40XX, "vpu_40xx.bin" },
|
||||
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
|
||||
};
|
||||
|
||||
static int ivpu_fw_request(struct ivpu_device *vdev)
|
||||
@ -71,7 +73,7 @@ static int ivpu_fw_request(struct ivpu_device *vdev)
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
|
||||
if (fw_names[i].gen != ivpu_hw_gen(vdev))
|
||||
if (fw_names[i].gen != ivpu_hw_ip_gen(vdev))
|
||||
continue;
|
||||
|
||||
ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i].name, vdev->drm.dev);
|
||||
@ -121,6 +123,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
|
||||
{
|
||||
if (addr < range_start || addr + size > range_start + range_size)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int ivpu_fw_parse(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_fw_info *fw = vdev->fw;
|
||||
@ -200,10 +210,27 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
|
||||
|
||||
fw->dvfs_mode = 0;
|
||||
|
||||
fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
|
||||
fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
|
||||
|
||||
if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
|
||||
fw_hdr->ro_section_size,
|
||||
fw_hdr->image_load_address,
|
||||
fw_hdr->image_size)) {
|
||||
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
|
||||
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fw->read_only_addr = fw_hdr->ro_section_start_address;
|
||||
fw->read_only_size = fw_hdr->ro_section_size;
|
||||
|
||||
ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
|
||||
fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
|
||||
fw->runtime_addr, image_load_addr, fw->entry_point);
|
||||
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
|
||||
fw->read_only_addr, fw->read_only_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -241,7 +268,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -265,6 +292,13 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
|
||||
fw->read_only_size);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to set firmware image read-only\n");
|
||||
goto err_free_fw_mem;
|
||||
}
|
||||
|
||||
fw->mem_log_crit = ivpu_bo_create_global(vdev, IVPU_FW_CRITICAL_BUFFER_SIZE,
|
||||
DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
|
||||
if (!fw->mem_log_crit) {
|
||||
@ -464,6 +498,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
|
||||
boot_params->punit_telemetry_sram_size);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
|
||||
boot_params->vpu_telemetry_enable);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_scheduling_mode = 0x%x\n",
|
||||
boot_params->vpu_scheduling_mode);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.dvfs_mode = %u\n",
|
||||
boot_params->dvfs_mode);
|
||||
ivpu_dbg(vdev, FW_BOOT, "boot_params.d0i3_delayed_entry = %d\n",
|
||||
@ -504,7 +540,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
||||
|
||||
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
|
||||
boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
|
||||
boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
|
||||
boot_params->frequency = ivpu_hw_pll_freq_get(vdev);
|
||||
|
||||
/*
|
||||
* This param is a debug firmware feature. It switches default clock
|
||||
@ -561,9 +597,12 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
|
||||
boot_params->verbose_tracing_buff_addr = vdev->fw->mem_log_verb->vpu_addr;
|
||||
boot_params->verbose_tracing_buff_size = ivpu_bo_size(vdev->fw->mem_log_verb);
|
||||
|
||||
boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
|
||||
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
|
||||
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
|
||||
boot_params->punit_telemetry_sram_base = ivpu_hw_telemetry_offset_get(vdev);
|
||||
boot_params->punit_telemetry_sram_size = ivpu_hw_telemetry_size_get(vdev);
|
||||
boot_params->vpu_telemetry_enable = ivpu_hw_telemetry_enable_get(vdev);
|
||||
boot_params->vpu_scheduling_mode = vdev->hw->sched_mode;
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
|
||||
boot_params->vpu_focus_present_timer_ms = IVPU_FOCUS_PRESENT_TIMER_MS;
|
||||
boot_params->dvfs_mode = vdev->fw->dvfs_mode;
|
||||
if (!IVPU_WA(disable_d0i3_msg))
|
||||
boot_params->d0i3_delayed_entry = 1;
|
||||
|
@ -28,6 +28,10 @@ struct ivpu_fw_info {
|
||||
u32 trace_destination_mask;
|
||||
u64 trace_hw_component_mask;
|
||||
u32 dvfs_mode;
|
||||
u32 primary_preempt_buf_size;
|
||||
u32 secondary_preempt_buf_size;
|
||||
u64 read_only_addr;
|
||||
u32 read_only_size;
|
||||
};
|
||||
|
||||
int ivpu_fw_init(struct ivpu_device *vdev);
|
||||
|
@ -60,16 +60,19 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
|
||||
return bo->flags & DRM_IVPU_BO_CACHE_MASK;
|
||||
}
|
||||
|
||||
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
|
||||
{
|
||||
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
|
||||
}
|
||||
|
||||
static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
|
||||
{
|
||||
return to_ivpu_device(bo->base.base.dev);
|
||||
}
|
||||
|
||||
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
|
||||
{
|
||||
if (ivpu_is_force_snoop_enabled(ivpu_bo_to_vdev(bo)))
|
||||
return true;
|
||||
|
||||
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
|
||||
}
|
||||
|
||||
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
|
||||
{
|
||||
if (vpu_addr < bo->vpu_addr)
|
||||
|
331
drivers/accel/ivpu/ivpu_hw.c
Normal file
331
drivers/accel/ivpu/ivpu_hw.c
Normal file
@ -0,0 +1,331 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020 - 2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_hw.h"
|
||||
#include "ivpu_hw_btrs.h"
|
||||
#include "ivpu_hw_ip.h"
|
||||
|
||||
#include <linux/dmi.h>
|
||||
|
||||
static char *platform_to_str(u32 platform)
|
||||
{
|
||||
switch (platform) {
|
||||
case IVPU_PLATFORM_SILICON:
|
||||
return "SILICON";
|
||||
case IVPU_PLATFORM_SIMICS:
|
||||
return "SIMICS";
|
||||
case IVPU_PLATFORM_FPGA:
|
||||
return "FPGA";
|
||||
default:
|
||||
return "Invalid platform";
|
||||
}
|
||||
}
|
||||
|
||||
static const struct dmi_system_id dmi_platform_simulation[] = {
|
||||
{
|
||||
.ident = "Intel Simics",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
|
||||
DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
|
||||
DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Intel Simics",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Simics"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static void platform_init(struct ivpu_device *vdev)
|
||||
{
|
||||
if (dmi_check_system(dmi_platform_simulation))
|
||||
vdev->platform = IVPU_PLATFORM_SIMICS;
|
||||
else
|
||||
vdev->platform = IVPU_PLATFORM_SILICON;
|
||||
|
||||
ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
|
||||
platform_to_str(vdev->platform), vdev->platform);
|
||||
}
|
||||
|
||||
static void wa_init(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
|
||||
vdev->wa.clear_runtime_mem = false;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
|
||||
|
||||
if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
|
||||
ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
|
||||
vdev->wa.disable_clock_relinquish = true;
|
||||
|
||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
|
||||
vdev->wa.wp0_during_power_up = true;
|
||||
|
||||
IVPU_PRINT_WA(punit_disabled);
|
||||
IVPU_PRINT_WA(clear_runtime_mem);
|
||||
IVPU_PRINT_WA(interrupt_clear_with_0);
|
||||
IVPU_PRINT_WA(disable_clock_relinquish);
|
||||
IVPU_PRINT_WA(wp0_during_power_up);
|
||||
}
|
||||
|
||||
static void timeouts_init(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
|
||||
vdev->timeout.boot = -1;
|
||||
vdev->timeout.jsm = -1;
|
||||
vdev->timeout.tdr = -1;
|
||||
vdev->timeout.autosuspend = -1;
|
||||
vdev->timeout.d0i3_entry_msg = -1;
|
||||
} else if (ivpu_is_fpga(vdev)) {
|
||||
vdev->timeout.boot = 100000;
|
||||
vdev->timeout.jsm = 50000;
|
||||
vdev->timeout.tdr = 2000000;
|
||||
vdev->timeout.autosuspend = -1;
|
||||
vdev->timeout.d0i3_entry_msg = 500;
|
||||
} else if (ivpu_is_simics(vdev)) {
|
||||
vdev->timeout.boot = 50;
|
||||
vdev->timeout.jsm = 500;
|
||||
vdev->timeout.tdr = 10000;
|
||||
vdev->timeout.autosuspend = -1;
|
||||
vdev->timeout.d0i3_entry_msg = 100;
|
||||
} else {
|
||||
vdev->timeout.boot = 1000;
|
||||
vdev->timeout.jsm = 500;
|
||||
vdev->timeout.tdr = 2000;
|
||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
|
||||
vdev->timeout.autosuspend = 10;
|
||||
else
|
||||
vdev->timeout.autosuspend = 100;
|
||||
vdev->timeout.d0i3_entry_msg = 5;
|
||||
}
|
||||
}
|
||||
|
||||
static void memory_ranges_init(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.user, 0xc0000000, 255 * SZ_1M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
|
||||
} else {
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.user, 0x80000000, SZ_256M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000 + SZ_256M, SZ_2G - SZ_256M);
|
||||
ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_8G);
|
||||
}
|
||||
}
|
||||
|
||||
static int wp_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_wp_drive(vdev, true);
|
||||
}
|
||||
|
||||
static int wp_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_wp_drive(vdev, false);
|
||||
}
|
||||
|
||||
int ivpu_hw_power_up(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IVPU_WA(wp0_during_power_up)) {
|
||||
/* WP requests may fail when powering down, so issue WP 0 here */
|
||||
ret = wp_disable(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
|
||||
}
|
||||
|
||||
ret = ivpu_hw_btrs_d0i3_disable(vdev);
|
||||
if (ret)
|
||||
ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
|
||||
|
||||
ret = wp_enable(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
|
||||
if (IVPU_WA(disable_clock_relinquish))
|
||||
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
|
||||
ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
|
||||
ivpu_hw_btrs_ats_print_lnl(vdev);
|
||||
}
|
||||
|
||||
ret = ivpu_hw_ip_host_ss_configure(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ivpu_hw_ip_idle_gen_disable(vdev);
|
||||
|
||||
ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ivpu_hw_ip_pwr_domain_enable(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
|
||||
ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
|
||||
|
||||
ret = ivpu_hw_ip_top_noc_enable(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
|
||||
vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
|
||||
}
|
||||
|
||||
int ivpu_hw_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ivpu_hw_btrs_ip_reset(vdev)) {
|
||||
ivpu_err(vdev, "Failed to reset NPU IP\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (wp_disable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to disable workpoint\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_hw_power_down(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
save_d0i3_entry_timestamp(vdev);
|
||||
|
||||
if (!ivpu_hw_is_idle(vdev))
|
||||
ivpu_warn(vdev, "NPU not idle during power down\n");
|
||||
|
||||
if (ivpu_hw_reset(vdev)) {
|
||||
ivpu_err(vdev, "Failed to reset NPU\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (ivpu_hw_btrs_d0i3_enable(vdev)) {
|
||||
ivpu_err(vdev, "Failed to enter D0I3\n");
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_hw_init(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_hw_btrs_info_init(vdev);
|
||||
ivpu_hw_btrs_freq_ratios_init(vdev);
|
||||
memory_ranges_init(vdev);
|
||||
platform_init(vdev);
|
||||
wa_init(vdev);
|
||||
timeouts_init(vdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ivpu_hw_boot_fw(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ivpu_hw_ip_snoop_disable(vdev);
|
||||
ivpu_hw_ip_tbu_mmu_enable(vdev);
|
||||
ret = ivpu_hw_ip_soc_cpu_boot(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
|
||||
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
|
||||
return;
|
||||
}
|
||||
|
||||
if (enable)
|
||||
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
|
||||
else
|
||||
vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
|
||||
}
|
||||
|
||||
void ivpu_irq_handlers_init(struct ivpu_device *vdev)
|
||||
{
|
||||
INIT_KFIFO(vdev->hw->irq.fifo);
|
||||
|
||||
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
|
||||
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
|
||||
else
|
||||
vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
|
||||
else
|
||||
vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
|
||||
}
|
||||
|
||||
void ivpu_hw_irq_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
kfifo_reset(&vdev->hw->irq.fifo);
|
||||
ivpu_hw_ip_irq_enable(vdev);
|
||||
ivpu_hw_btrs_irq_enable(vdev);
|
||||
}
|
||||
|
||||
void ivpu_hw_irq_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_hw_btrs_irq_disable(vdev);
|
||||
ivpu_hw_ip_irq_disable(vdev);
|
||||
}
|
||||
|
||||
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
|
||||
{
|
||||
struct ivpu_device *vdev = ptr;
|
||||
bool ip_handled, btrs_handled;
|
||||
|
||||
ivpu_hw_btrs_global_int_disable(vdev);
|
||||
|
||||
btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
|
||||
if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
|
||||
ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
|
||||
else
|
||||
ip_handled = false;
|
||||
|
||||
/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
|
||||
ivpu_hw_btrs_global_int_enable(vdev);
|
||||
|
||||
if (!kfifo_is_empty(&vdev->hw->irq.fifo))
|
||||
return IRQ_WAKE_THREAD;
|
||||
if (ip_handled || btrs_handled)
|
||||
return IRQ_HANDLED;
|
||||
return IRQ_NONE;
|
||||
}
|
@ -1,39 +1,22 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_HW_H__
|
||||
#define __IVPU_HW_H__
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include <linux/kfifo.h>
|
||||
|
||||
struct ivpu_hw_ops {
|
||||
int (*info_init)(struct ivpu_device *vdev);
|
||||
int (*power_up)(struct ivpu_device *vdev);
|
||||
int (*boot_fw)(struct ivpu_device *vdev);
|
||||
int (*power_down)(struct ivpu_device *vdev);
|
||||
int (*reset)(struct ivpu_device *vdev);
|
||||
bool (*is_idle)(struct ivpu_device *vdev);
|
||||
int (*wait_for_idle)(struct ivpu_device *vdev);
|
||||
void (*wdt_disable)(struct ivpu_device *vdev);
|
||||
void (*diagnose_failure)(struct ivpu_device *vdev);
|
||||
u32 (*profiling_freq_get)(struct ivpu_device *vdev);
|
||||
void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
|
||||
u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
|
||||
u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
|
||||
u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
|
||||
u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
|
||||
u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
|
||||
void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id);
|
||||
u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev);
|
||||
u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev);
|
||||
void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr);
|
||||
void (*irq_clear)(struct ivpu_device *vdev);
|
||||
void (*irq_enable)(struct ivpu_device *vdev);
|
||||
void (*irq_disable)(struct ivpu_device *vdev);
|
||||
irqreturn_t (*irq_handler)(int irq, void *ptr);
|
||||
};
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_hw_btrs.h"
|
||||
#include "ivpu_hw_ip.h"
|
||||
|
||||
#define IVPU_HW_IRQ_FIFO_LENGTH 1024
|
||||
|
||||
#define IVPU_HW_IRQ_SRC_IPC 1
|
||||
#define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
|
||||
#define IVPU_HW_IRQ_SRC_DCT 3
|
||||
|
||||
struct ivpu_addr_range {
|
||||
resource_size_t start;
|
||||
@ -41,7 +24,11 @@ struct ivpu_addr_range {
|
||||
};
|
||||
|
||||
struct ivpu_hw_info {
|
||||
const struct ivpu_hw_ops *ops;
|
||||
struct {
|
||||
bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
|
||||
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
|
||||
DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
|
||||
} irq;
|
||||
struct {
|
||||
struct ivpu_addr_range global;
|
||||
struct ivpu_addr_range user;
|
||||
@ -59,6 +46,7 @@ struct ivpu_hw_info {
|
||||
u32 profiling_freq;
|
||||
} pll;
|
||||
u32 tile_fuse;
|
||||
u32 sched_mode;
|
||||
u32 sku;
|
||||
u16 config;
|
||||
int dma_bits;
|
||||
@ -66,127 +54,28 @@ struct ivpu_hw_info {
|
||||
u64 d0i3_entry_vpu_ts;
|
||||
};
|
||||
|
||||
extern const struct ivpu_hw_ops ivpu_hw_37xx_ops;
|
||||
extern const struct ivpu_hw_ops ivpu_hw_40xx_ops;
|
||||
int ivpu_hw_init(struct ivpu_device *vdev);
|
||||
int ivpu_hw_power_up(struct ivpu_device *vdev);
|
||||
int ivpu_hw_power_down(struct ivpu_device *vdev);
|
||||
int ivpu_hw_reset(struct ivpu_device *vdev);
|
||||
int ivpu_hw_boot_fw(struct ivpu_device *vdev);
|
||||
void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable);
|
||||
void ivpu_irq_handlers_init(struct ivpu_device *vdev);
|
||||
void ivpu_hw_irq_enable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_irq_disable(struct ivpu_device *vdev);
|
||||
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
|
||||
|
||||
static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
|
||||
static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
|
||||
{
|
||||
return vdev->hw->ops->info_init(vdev);
|
||||
};
|
||||
|
||||
static inline int ivpu_hw_power_up(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_dbg(vdev, PM, "HW power up\n");
|
||||
|
||||
return vdev->hw->ops->power_up(vdev);
|
||||
};
|
||||
|
||||
static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->boot_fw(vdev);
|
||||
};
|
||||
|
||||
static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->is_idle(vdev);
|
||||
};
|
||||
|
||||
static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->wait_for_idle(vdev);
|
||||
};
|
||||
|
||||
static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_dbg(vdev, PM, "HW power down\n");
|
||||
|
||||
return vdev->hw->ops->power_down(vdev);
|
||||
};
|
||||
|
||||
static inline int ivpu_hw_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_dbg(vdev, PM, "HW reset\n");
|
||||
|
||||
return vdev->hw->ops->reset(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->hw->ops->wdt_disable(vdev);
|
||||
};
|
||||
|
||||
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->profiling_freq_get(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
return vdev->hw->ops->profiling_freq_drive(vdev, enable);
|
||||
};
|
||||
|
||||
/* Register indirect accesses */
|
||||
static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->reg_pll_freq_get(vdev);
|
||||
};
|
||||
|
||||
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
|
||||
{
|
||||
return vdev->hw->ops->ratio_to_freq(vdev, ratio);
|
||||
return vdev->hw->irq.btrs_irq_handler(vdev, irq);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
|
||||
{
|
||||
return vdev->hw->ops->reg_telemetry_offset_get(vdev);
|
||||
};
|
||||
return vdev->hw->irq.ip_irq_handler(vdev, irq);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->reg_telemetry_size_get(vdev);
|
||||
};
|
||||
|
||||
static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->reg_telemetry_enable_get(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id)
|
||||
{
|
||||
vdev->hw->ops->reg_db_set(vdev, db_id);
|
||||
};
|
||||
|
||||
static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->reg_ipc_rx_addr_get(vdev);
|
||||
};
|
||||
|
||||
static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->ops->reg_ipc_rx_count_get(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
|
||||
{
|
||||
vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->hw->ops->irq_clear(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->hw->ops->irq_enable(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->hw->ops->irq_disable(vdev);
|
||||
};
|
||||
|
||||
static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size)
|
||||
static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
|
||||
{
|
||||
range->start = start;
|
||||
range->end = start + size;
|
||||
@ -197,9 +86,75 @@ static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
|
||||
return range->end - range->start;
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
|
||||
{
|
||||
return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
|
||||
}
|
||||
|
||||
static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_hw_ip_irq_clear(vdev);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_pll_freq_get(vdev);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return vdev->hw->pll.profiling_freq;
|
||||
}
|
||||
|
||||
static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
|
||||
{
|
||||
vdev->hw->ops->diagnose_failure(vdev);
|
||||
ivpu_hw_ip_diagnose_failure(vdev);
|
||||
ivpu_hw_btrs_diagnose_failure(vdev);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_telemetry_offset_get(vdev);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_telemetry_size_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_telemetry_size_get(vdev);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_telemetry_enable_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_telemetry_enable_get(vdev);
|
||||
}
|
||||
|
||||
static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_is_idle(vdev);
|
||||
}
|
||||
|
||||
static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_btrs_wait_for_idle(vdev);
|
||||
}
|
||||
|
||||
static inline void ivpu_hw_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
|
||||
{
|
||||
ivpu_hw_ip_ipc_tx_set(vdev, vpu_addr);
|
||||
}
|
||||
|
||||
static inline void ivpu_hw_db_set(struct ivpu_device *vdev, u32 db_id)
|
||||
{
|
||||
ivpu_hw_ip_db_set(vdev, db_id);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_ipc_rx_addr_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_ip_ipc_rx_addr_get(vdev);
|
||||
}
|
||||
|
||||
static inline u32 ivpu_hw_ipc_rx_count_get(struct ivpu_device *vdev)
|
||||
{
|
||||
return ivpu_hw_ip_ipc_rx_count_get(vdev);
|
||||
}
|
||||
|
||||
#endif /* __IVPU_HW_H__ */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,78 +8,6 @@
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define VPU_37XX_BUTTRESS_INTERRUPT_TYPE 0x00000000u
|
||||
|
||||
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT 0x00000004u
|
||||
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
|
||||
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
|
||||
#define VPU_37XX_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_CMD 0x00000014u
|
||||
#define VPU_37XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD 0x00000018u
|
||||
#define VPU_37XX_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_CURRENT_PLL 0x0000001cu
|
||||
#define VPU_37XX_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_PLL_ENABLE 0x00000020u
|
||||
|
||||
#define VPU_37XX_BUTTRESS_FMIN_FUSE 0x00000024u
|
||||
#define VPU_37XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
|
||||
#define VPU_37XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_FMAX_FUSE 0x00000028u
|
||||
#define VPU_37XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_TILE_FUSE 0x0000002cu
|
||||
#define VPU_37XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
|
||||
#define VPU_37XX_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_LOCAL_INT_MASK 0x00000030u
|
||||
#define VPU_37XX_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
|
||||
|
||||
#define VPU_37XX_BUTTRESS_PLL_STATUS 0x00000040u
|
||||
#define VPU_37XX_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_VPU_STATUS 0x00000044u
|
||||
#define VPU_37XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
|
||||
#define VPU_37XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
|
||||
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
|
||||
#define VPU_37XX_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_VPU_IP_RESET 0x00000050u
|
||||
#define VPU_37XX_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
|
||||
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
|
||||
#define VPU_37XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
|
||||
|
||||
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
|
||||
#define VPU_37XX_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
|
||||
#define VPU_37XX_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
|
||||
|
||||
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG 0x000000b0u
|
||||
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
|
||||
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
|
||||
#define VPU_37XX_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
|
||||
|
||||
#define VPU_37XX_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
|
||||
|
||||
#define VPU_37XX_HOST_SS_CPR_CLK_SET 0x00000084u
|
||||
#define VPU_37XX_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
|
||||
#define VPU_37XX_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,91 +8,6 @@
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT 0x00000000u
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
|
||||
#define VPU_40XX_BUTTRESS_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_LOCAL_INT_MASK 0x00000004u
|
||||
#define VPU_40XX_BUTTRESS_GLOBAL_INT_MASK 0x00000008u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_HM_ATS 0x0000000cu
|
||||
|
||||
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG1 0x00000010u
|
||||
#define VPU_40XX_BUTTRESS_ATS_ERR_LOG2 0x00000014u
|
||||
#define VPU_40XX_BUTTRESS_ATS_ERR_CLEAR 0x00000018u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_CFI0_ERR_LOG 0x0000001cu
|
||||
#define VPU_40XX_BUTTRESS_CFI0_ERR_CLEAR 0x00000020u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_CFI1_ERR_LOG 0x00000040u
|
||||
#define VPU_40XX_BUTTRESS_CFI1_ERR_CLEAR 0x00000044u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_LOW 0x00000048u
|
||||
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_HIGH 0x0000004cu
|
||||
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI0_CLEAR 0x00000050u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_PORT_ARBITRATION_WEIGHTS 0x00000054u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_LOW 0x00000058u
|
||||
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_HIGH 0x0000005cu
|
||||
#define VPU_40XX_BUTTRESS_IMR_ERR_CFI1_CLEAR 0x00000060u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0 0x00000130u
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1 0x00000134u
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2 0x00000138u
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_CMD 0x0000013cu
|
||||
#define VPU_40XX_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_PLL_FREQ 0x00000148u
|
||||
#define VPU_40XX_BUTTRESS_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_TILE_FUSE 0x00000150u
|
||||
#define VPU_40XX_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
|
||||
#define VPU_40XX_BUTTRESS_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS 0x00000154u
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
|
||||
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_IP_RESET 0x00000160u
|
||||
#define VPU_40XX_BUTTRESS_IP_RESET_TRIGGER_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_D0I3_CONTROL 0x00000164u
|
||||
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
|
||||
#define VPU_40XX_BUTTRESS_D0I3_CONTROL_I3_MASK BIT_MASK(2)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000168u
|
||||
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_SIZE 0x0000016cu
|
||||
#define VPU_40XX_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000170u
|
||||
|
||||
#define VPU_40XX_BUTTRESS_FMIN_FUSE 0x00000174u
|
||||
#define VPU_40XX_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
|
||||
#define VPU_40XX_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
|
||||
|
||||
#define VPU_40XX_BUTTRESS_FMAX_FUSE 0x00000178u
|
||||
#define VPU_40XX_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
|
||||
|
||||
#define VPU_40XX_HOST_SS_CPR_CLK_EN 0x00000080u
|
||||
#define VPU_40XX_HOST_SS_CPR_CLK_EN_TOP_NOC_MASK BIT_MASK(1)
|
||||
#define VPU_40XX_HOST_SS_CPR_CLK_EN_DSS_MAS_MASK BIT_MASK(10)
|
||||
@ -198,6 +113,12 @@
|
||||
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
|
||||
#define VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0_CSS_CPU_MASK BIT_MASK(3)
|
||||
|
||||
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY 0x00030068u
|
||||
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY_POST_DLY_MASK GENMASK(7, 0)
|
||||
|
||||
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
|
||||
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
|
||||
|
||||
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
|
||||
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
|
||||
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
|
||||
@ -205,6 +126,9 @@
|
||||
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE 0x00030204u
|
||||
#define VPU_40XX_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_50XX_HOST_SS_AON_FABRIC_REQ_OVERRIDE 0x00030210u
|
||||
#define VPU_50XX_HOST_SS_AON_FABRIC_REQ_OVERRIDE_REQ_OVERRIDE_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO 0x00040040u
|
||||
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_DONE_MASK BIT_MASK(0)
|
||||
#define VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
|
||||
|
905
drivers/accel/ivpu/ivpu_hw_btrs.c
Normal file
905
drivers/accel/ivpu/ivpu_hw_btrs.c
Normal file
@ -0,0 +1,905 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_hw.h"
|
||||
#include "ivpu_hw_btrs.h"
|
||||
#include "ivpu_hw_btrs_lnl_reg.h"
|
||||
#include "ivpu_hw_btrs_mtl_reg.h"
|
||||
#include "ivpu_hw_reg_io.h"
|
||||
#include "ivpu_pm.h"
|
||||
|
||||
#define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
|
||||
(REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
|
||||
|
||||
#define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
|
||||
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
|
||||
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
|
||||
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
|
||||
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
|
||||
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
|
||||
|
||||
#define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
|
||||
FREQ_CHANGE)))
|
||||
|
||||
#define BTRS_IRQ_DISABLE_MASK ((u32)-1)
|
||||
|
||||
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
|
||||
|
||||
#define BTRS_MTL_WP_CONFIG_1_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_5_3)
|
||||
#define BTRS_MTL_WP_CONFIG_1_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_1_TILE, MTL_PLL_RATIO_4_3)
|
||||
#define BTRS_MTL_WP_CONFIG_2_TILE_5_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_5_3)
|
||||
#define BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3)
|
||||
#define BTRS_MTL_WP_CONFIG_0_TILE_PLL_OFF WP_CONFIG(0, 0)
|
||||
|
||||
#define PLL_CDYN_DEFAULT 0x80
|
||||
#define PLL_EPP_DEFAULT 0x80
|
||||
#define PLL_CONFIG_DEFAULT 0x0
|
||||
#define PLL_SIMULATION_FREQ 10000000
|
||||
#define PLL_REF_CLK_FREQ 50000000
|
||||
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
|
||||
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
|
||||
#define TIMEOUT_US (150 * USEC_PER_MSEC)
|
||||
|
||||
/* Work point configuration values */
|
||||
#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
|
||||
#define MTL_CONFIG_1_TILE 0x01
|
||||
#define MTL_CONFIG_2_TILE 0x02
|
||||
#define MTL_PLL_RATIO_5_3 0x01
|
||||
#define MTL_PLL_RATIO_4_3 0x02
|
||||
#define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
|
||||
#define BTRS_MTL_TILE_SKU_BOTH 0x3630
|
||||
|
||||
#define BTRS_LNL_TILE_MAX_NUM 6
|
||||
#define BTRS_LNL_TILE_MAX_MASK 0x3f
|
||||
|
||||
#define WEIGHTS_DEFAULT 0xf711f711u
|
||||
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
|
||||
|
||||
#define DCT_REQ 0x2
|
||||
#define DCT_ENABLE 0x1
|
||||
#define DCT_DISABLE 0x0
|
||||
|
||||
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
|
||||
{
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
|
||||
if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) {
|
||||
/* Writing 1s does not clear the interrupt status register */
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void freq_ratios_init_mtl(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
u32 fmin_fuse, fmax_fuse;
|
||||
|
||||
fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE);
|
||||
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
|
||||
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse);
|
||||
|
||||
fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE);
|
||||
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
|
||||
}
|
||||
|
||||
static void freq_ratios_init_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
u32 fmin_fuse, fmax_fuse;
|
||||
|
||||
fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE);
|
||||
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
|
||||
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse);
|
||||
|
||||
fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE);
|
||||
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
freq_ratios_init_mtl(vdev);
|
||||
else
|
||||
freq_ratios_init_lnl(vdev);
|
||||
|
||||
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
|
||||
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
|
||||
hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
|
||||
}
|
||||
|
||||
static bool tile_disable_check(u32 config)
|
||||
{
|
||||
/* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
|
||||
if (config == 0)
|
||||
return true;
|
||||
|
||||
if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1))
|
||||
return false;
|
||||
|
||||
if ((config & (config - 1)) == 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config)
|
||||
{
|
||||
u32 fuse;
|
||||
u32 config;
|
||||
|
||||
fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE);
|
||||
if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) {
|
||||
ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
|
||||
if (!tile_disable_check(config)) {
|
||||
ivpu_err(vdev, "Fuse: Invalid tile disable config (0x%x)\n", config);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (config)
|
||||
ivpu_dbg(vdev, MISC, "Fuse: %d tiles enabled. Tile number %d disabled\n",
|
||||
BTRS_LNL_TILE_MAX_NUM - 1, ffs(config) - 1);
|
||||
else
|
||||
ivpu_dbg(vdev, MISC, "Fuse: All %d tiles enabled\n", BTRS_LNL_TILE_MAX_NUM);
|
||||
|
||||
*tile_fuse_config = config;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int info_init_mtl(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
|
||||
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
|
||||
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
|
||||
hw->config = BTRS_MTL_WP_CONFIG_2_TILE_4_3_RATIO;
|
||||
hw->sched_mode = ivpu_sched_mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int info_init_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
u32 tile_fuse_config;
|
||||
int ret;
|
||||
|
||||
ret = read_tile_config_fuse(vdev, &tile_fuse_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
hw->sched_mode = ivpu_sched_mode;
|
||||
hw->tile_fuse = tile_fuse_config;
|
||||
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return info_init_mtl(vdev);
|
||||
else
|
||||
return info_init_lnl(vdev);
|
||||
}
|
||||
|
||||
static int wp_request_sync(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
|
||||
else
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
|
||||
}
|
||||
|
||||
static int wait_for_status_ready(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
u32 exp_val = enable ? 0x1 : 0x0;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
|
||||
else
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
|
||||
}
|
||||
|
||||
struct wp_request {
|
||||
u16 min;
|
||||
u16 max;
|
||||
u16 target;
|
||||
u16 cfg;
|
||||
u16 epp;
|
||||
u16 cdyn;
|
||||
};
|
||||
|
||||
static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val);
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val);
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val);
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD);
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val);
|
||||
}
|
||||
|
||||
static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val);
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val);
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val);
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD);
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val);
|
||||
}
|
||||
|
||||
static void wp_request(struct ivpu_device *vdev, struct wp_request *wp)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
wp_request_mtl(vdev, wp);
|
||||
else
|
||||
wp_request_lnl(vdev, wp);
|
||||
}
|
||||
|
||||
static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = wp_request_sync(vdev);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
wp_request(vdev, wp);
|
||||
|
||||
ret = wp_request_sync(vdev);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
|
||||
wp->min = hw->pll.min_ratio;
|
||||
wp->max = hw->pll.max_ratio;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
|
||||
wp->target = enable ? hw->pll.pn_ratio : 0;
|
||||
wp->cfg = enable ? hw->config : 0;
|
||||
wp->cdyn = 0;
|
||||
wp->epp = 0;
|
||||
} else {
|
||||
wp->target = hw->pll.pn_ratio;
|
||||
wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
|
||||
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
|
||||
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
|
||||
}
|
||||
|
||||
/* Simics cannot start without at least one tile */
|
||||
if (enable && ivpu_is_simics(vdev))
|
||||
wp->cfg = 1;
|
||||
}
|
||||
|
||||
static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
u32 exp_val = enable ? 0x1 : 0x0;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL)
|
||||
return 0;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
struct wp_request wp;
|
||||
int ret;
|
||||
|
||||
if (IVPU_WA(punit_disabled)) {
|
||||
ivpu_dbg(vdev, PM, "Skipping workpoint request\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
prepare_wp_request(vdev, &wp, enable);
|
||||
|
||||
ivpu_dbg(vdev, PM, "PLL workpoint request: %u Hz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
|
||||
PLL_RATIO_TO_FREQ(wp.target), wp.cfg, wp.epp, wp.cdyn);
|
||||
|
||||
ret = wp_request_send(vdev, &wp);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for_pll_lock(vdev, enable);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Timed out waiting for PLL lock\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = wait_for_status_ready(vdev, enable);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Timed out waiting for NPU ready status\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL);
|
||||
if (enable)
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
|
||||
else
|
||||
val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL);
|
||||
if (enable)
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
|
||||
else
|
||||
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int d0i3_drive(struct ivpu_device *vdev, bool enable)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return d0i3_drive_mtl(vdev, enable);
|
||||
else
|
||||
return d0i3_drive_lnl(vdev, enable);
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
ret = d0i3_drive(vdev, true);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
|
||||
|
||||
udelay(5); /* VPU requires 5 us to complete the transition */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
ret = d0i3_drive(vdev, false);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return 0;
|
||||
|
||||
if (ivpu_is_simics(vdev))
|
||||
return 0;
|
||||
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
|
||||
}
|
||||
|
||||
static int ip_reset_mtl(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET);
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ip_reset_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
int ret;
|
||||
u32 val;
|
||||
|
||||
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret) {
|
||||
ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET);
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val);
|
||||
|
||||
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
|
||||
if (ret)
|
||||
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev)
|
||||
{
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return 0;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return ip_reset_mtl(vdev);
|
||||
else
|
||||
return ip_reset_lnl(vdev);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
|
||||
|
||||
if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
|
||||
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
|
||||
else
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
|
||||
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable");
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
|
||||
|
||||
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
|
||||
}
|
||||
|
||||
bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (IVPU_WA(punit_disabled))
|
||||
return true;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
|
||||
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS);
|
||||
|
||||
return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) &&
|
||||
REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val);
|
||||
} else {
|
||||
val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
|
||||
|
||||
return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) &&
|
||||
REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val);
|
||||
}
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
|
||||
else
|
||||
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
|
||||
}
|
||||
|
||||
/* Handler for IRQs from Buttress core (irqB) */
|
||||
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
|
||||
{
|
||||
u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
|
||||
bool schedule_recovery = false;
|
||||
|
||||
if (!status)
|
||||
return false;
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status))
|
||||
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x",
|
||||
REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
|
||||
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) {
|
||||
u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
|
||||
|
||||
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
|
||||
ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log),
|
||||
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log),
|
||||
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log));
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
/* This must be done after interrupts are cleared at the source. */
|
||||
if (IVPU_WA(interrupt_clear_with_0))
|
||||
/*
|
||||
* Writing 1 triggers an interrupt, so we can't perform read update write.
|
||||
* Clear local interrupt status by writing 0 to all bits.
|
||||
*/
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
|
||||
else
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status);
|
||||
|
||||
if (schedule_recovery)
|
||||
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Handler for IRQs from Buttress core (irqB) */
|
||||
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
|
||||
{
|
||||
u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
|
||||
bool schedule_recovery = false;
|
||||
|
||||
if (!status)
|
||||
return false;
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
|
||||
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
|
||||
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_DCT))
|
||||
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status))
|
||||
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
|
||||
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) {
|
||||
ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) {
|
||||
ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) {
|
||||
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) {
|
||||
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1);
|
||||
schedule_recovery = true;
|
||||
}
|
||||
|
||||
/* This must be done after interrupts are cleared at the source. */
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status);
|
||||
|
||||
if (schedule_recovery)
|
||||
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
|
||||
{
|
||||
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
|
||||
u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
|
||||
u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
|
||||
|
||||
if (cmd != DCT_REQ) {
|
||||
ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
|
||||
return -EBADR;
|
||||
}
|
||||
|
||||
switch (param1) {
|
||||
case DCT_ENABLE:
|
||||
*enable = true;
|
||||
return 0;
|
||||
case DCT_DISABLE:
|
||||
*enable = false;
|
||||
return 0;
|
||||
default:
|
||||
ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
|
||||
{
|
||||
u32 val = 0;
|
||||
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
|
||||
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
|
||||
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
|
||||
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
|
||||
}
|
||||
|
||||
static u32 pll_ratio_to_freq_mtl(u32 ratio, u32 config)
|
||||
{
|
||||
u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
|
||||
u32 cpu_clock;
|
||||
|
||||
if ((config & 0xff) == MTL_PLL_RATIO_4_3)
|
||||
cpu_clock = pll_clock * 2 / 4;
|
||||
else
|
||||
cpu_clock = pll_clock * 2 / 5;
|
||||
|
||||
return cpu_clock;
|
||||
}
|
||||
|
||||
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
|
||||
{
|
||||
struct ivpu_hw_info *hw = vdev->hw;
|
||||
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return pll_ratio_to_freq_mtl(ratio, hw->config);
|
||||
else
|
||||
return PLL_RATIO_TO_FREQ(ratio);
|
||||
}
|
||||
|
||||
static u32 pll_freq_get_mtl(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 pll_curr_ratio;
|
||||
|
||||
pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
|
||||
pll_curr_ratio &= VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK;
|
||||
|
||||
if (!ivpu_is_silicon(vdev))
|
||||
return PLL_SIMULATION_FREQ;
|
||||
|
||||
return pll_ratio_to_freq_mtl(pll_curr_ratio, vdev->hw->config);
|
||||
}
|
||||
|
||||
static u32 pll_freq_get_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 pll_curr_ratio;
|
||||
|
||||
pll_curr_ratio = REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
|
||||
pll_curr_ratio &= VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK;
|
||||
|
||||
return PLL_RATIO_TO_FREQ(pll_curr_ratio);
|
||||
}
|
||||
|
||||
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return pll_freq_get_mtl(vdev);
|
||||
else
|
||||
return pll_freq_get_lnl(vdev);
|
||||
}
|
||||
|
||||
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET);
|
||||
else
|
||||
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET);
|
||||
}
|
||||
|
||||
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE);
|
||||
else
|
||||
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE);
|
||||
}
|
||||
|
||||
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE);
|
||||
else
|
||||
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
|
||||
else
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
|
||||
else
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK));
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
|
||||
} else {
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK));
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
|
||||
}
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
|
||||
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
|
||||
} else {
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
|
||||
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
static void diagnose_failure_mtl(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg))
|
||||
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) {
|
||||
u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
|
||||
|
||||
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
|
||||
log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log),
|
||||
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log),
|
||||
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log));
|
||||
}
|
||||
}
|
||||
|
||||
static void diagnose_failure_lnl(struct ivpu_device *vdev)
|
||||
{
|
||||
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) {
|
||||
ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg))
|
||||
ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg))
|
||||
ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg))
|
||||
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg))
|
||||
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
|
||||
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg))
|
||||
ivpu_err(vdev, "Survivability IRQ\n");
|
||||
}
|
||||
|
||||
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
|
||||
{
|
||||
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
|
||||
return diagnose_failure_mtl(vdev);
|
||||
else
|
||||
return diagnose_failure_lnl(vdev);
|
||||
}
|
50
drivers/accel/ivpu/ivpu_hw_btrs.h
Normal file
50
drivers/accel/ivpu/ivpu_hw_btrs.h
Normal file
@ -0,0 +1,50 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_HW_BTRS_H__
|
||||
#define __IVPU_HW_BTRS_H__
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_hw_37xx_reg.h"
|
||||
#include "ivpu_hw_40xx_reg.h"
|
||||
#include "ivpu_hw_reg_io.h"
|
||||
|
||||
#define PLL_PROFILING_FREQ_DEFAULT 38400000
|
||||
#define PLL_PROFILING_FREQ_HIGH 400000000
|
||||
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
|
||||
|
||||
#define DCT_DEFAULT_ACTIVE_PERCENT 15u
|
||||
#define DCT_PERIOD_US 35300u
|
||||
|
||||
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev);
|
||||
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev);
|
||||
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable);
|
||||
int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev);
|
||||
int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev);
|
||||
int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev);
|
||||
bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev);
|
||||
int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev);
|
||||
int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev);
|
||||
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
|
||||
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
|
||||
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
|
||||
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
|
||||
u32 ivpu_hw_btrs_pll_freq_get(struct ivpu_device *vdev);
|
||||
u32 ivpu_hw_btrs_ratio_to_freq(struct ivpu_device *vdev, u32 ratio);
|
||||
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
|
||||
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
|
||||
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev);
|
||||
|
||||
#endif /* __IVPU_HW_BTRS_H__ */
|
108
drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
Normal file
108
drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
Normal file
@ -0,0 +1,108 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_HW_BTRS_LNL_REG_H__
|
||||
#define __IVPU_HW_BTRS_LNL_REG_H__
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT 0x00000000u
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_CFI0_ERR_MASK BIT_MASK(2)
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_CFI1_ERR_MASK BIT_MASK(3)
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_IMR0_ERR_MASK BIT_MASK(4)
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_IMR1_ERR_MASK BIT_MASK(5)
|
||||
#define VPU_HW_BTRS_LNL_INTERRUPT_STAT_SURV_ERR_MASK BIT_MASK(6)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_LOCAL_INT_MASK 0x00000004u
|
||||
#define VPU_HW_BTRS_LNL_GLOBAL_INT_MASK 0x00000008u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_HM_ATS 0x0000000cu
|
||||
|
||||
#define VPU_HW_BTRS_LNL_ATS_ERR_LOG1 0x00000010u
|
||||
#define VPU_HW_BTRS_LNL_ATS_ERR_LOG2 0x00000014u
|
||||
#define VPU_HW_BTRS_LNL_ATS_ERR_CLEAR 0x00000018u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_CFI0_ERR_LOG 0x0000001cu
|
||||
#define VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR 0x00000020u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS 0x00000024u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_CFI1_ERR_LOG 0x00000040u
|
||||
#define VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR 0x00000044u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW 0x00000048u
|
||||
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH 0x0000004cu
|
||||
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR 0x00000050u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS 0x00000054u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW 0x00000058u
|
||||
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH 0x0000005cu
|
||||
#define VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR 0x00000060u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS 0x00000070u
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_CMD_MASK GENMASK(7, 0)
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM1_MASK GENMASK(15, 8)
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM2_MASK GENMASK(23, 16)
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS_PARAM3_MASK GENMASK(31, 24)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW 0x00000074u
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_CMD_MASK GENMASK(7, 0)
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM1_MASK GENMASK(15, 8)
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM2_MASK GENMASK(23, 16)
|
||||
#define VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW_PARAM3_MASK GENMASK(31, 24)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0 0x00000130u
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1 0x00000134u
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2 0x00000138u
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2_CDYN_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_CMD 0x0000013cu
|
||||
#define VPU_HW_BTRS_LNL_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u
|
||||
#define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u
|
||||
#define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS 0x00000154u
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_READY_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_IDLE_MASK BIT_MASK(1)
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
|
||||
#define VPU_HW_BTRS_LNL_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_IP_RESET 0x00000160u
|
||||
#define VPU_HW_BTRS_LNL_IP_RESET_TRIGGER_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_D0I3_CONTROL 0x00000164u
|
||||
#define VPU_HW_BTRS_LNL_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_LNL_D0I3_CONTROL_I3_MASK BIT_MASK(2)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET 0x00000168u
|
||||
#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE 0x0000016cu
|
||||
#define VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE 0x00000170u
|
||||
|
||||
#define VPU_HW_BTRS_LNL_FMIN_FUSE 0x00000174u
|
||||
#define VPU_HW_BTRS_LNL_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
|
||||
#define VPU_HW_BTRS_LNL_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
|
||||
|
||||
#define VPU_HW_BTRS_LNL_FMAX_FUSE 0x00000178u
|
||||
#define VPU_HW_BTRS_LNL_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
|
||||
|
||||
#endif /* __IVPU_HW_BTRS_LNL_REG_H__ */
|
83
drivers/accel/ivpu/ivpu_hw_btrs_mtl_reg.h
Normal file
83
drivers/accel/ivpu/ivpu_hw_btrs_mtl_reg.h
Normal file
@ -0,0 +1,83 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_HW_BTRS_MTL_REG_H__
|
||||
#define __IVPU_HW_BTRS_MTL_REG_H__
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
#define VPU_HW_BTRS_MTL_INTERRUPT_TYPE 0x00000000u
|
||||
|
||||
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT 0x00000004u
|
||||
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
|
||||
#define VPU_HW_BTRS_MTL_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0 0x00000008u
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1 0x0000000cu
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2 0x00000010u
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_CMD 0x00000014u
|
||||
#define VPU_HW_BTRS_MTL_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_WP_DOWNLOAD 0x00000018u
|
||||
#define VPU_HW_BTRS_MTL_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_CURRENT_PLL 0x0000001cu
|
||||
#define VPU_HW_BTRS_MTL_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_PLL_ENABLE 0x00000020u
|
||||
|
||||
#define VPU_HW_BTRS_MTL_FMIN_FUSE 0x00000024u
|
||||
#define VPU_HW_BTRS_MTL_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
|
||||
#define VPU_HW_BTRS_MTL_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_FMAX_FUSE 0x00000028u
|
||||
#define VPU_HW_BTRS_MTL_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_TILE_FUSE 0x0000002cu
|
||||
#define VPU_HW_BTRS_MTL_TILE_FUSE_VALID_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_MTL_TILE_FUSE_SKU_MASK GENMASK(3, 2)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_LOCAL_INT_MASK 0x00000030u
|
||||
#define VPU_HW_BTRS_MTL_GLOBAL_INT_MASK 0x00000034u
|
||||
|
||||
#define VPU_HW_BTRS_MTL_PLL_STATUS 0x00000040u
|
||||
#define VPU_HW_BTRS_MTL_PLL_STATUS_LOCK_MASK BIT_MASK(1)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_VPU_STATUS 0x00000044u
|
||||
#define VPU_HW_BTRS_MTL_VPU_STATUS_READY_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_MTL_VPU_STATUS_IDLE_MASK BIT_MASK(1)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL 0x00000060u
|
||||
#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
|
||||
#define VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_VPU_IP_RESET 0x00000050u
|
||||
#define VPU_HW_BTRS_MTL_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET 0x00000080u
|
||||
#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE 0x00000084u
|
||||
#define VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE 0x00000088u
|
||||
|
||||
#define VPU_HW_BTRS_MTL_ATS_ERR_LOG_0 0x000000a0u
|
||||
#define VPU_HW_BTRS_MTL_ATS_ERR_LOG_1 0x000000a4u
|
||||
#define VPU_HW_BTRS_MTL_ATS_ERR_CLEAR 0x000000a8u
|
||||
|
||||
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG 0x000000b0u
|
||||
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
|
||||
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
|
||||
#define VPU_HW_BTRS_MTL_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
|
||||
|
||||
#define VPU_HW_BTRS_MTL_UFI_ERR_CLEAR 0x000000b4u
|
||||
|
||||
#endif /* __IVPU_HW_BTRS_MTL_REG_H__ */
|
1174
drivers/accel/ivpu/ivpu_hw_ip.c
Normal file
1174
drivers/accel/ivpu/ivpu_hw_ip.c
Normal file
File diff suppressed because it is too large
Load Diff
36
drivers/accel/ivpu/ivpu_hw_ip.h
Normal file
36
drivers/accel/ivpu/ivpu_hw_ip.h
Normal file
@ -0,0 +1,36 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_HW_IP_H__
|
||||
#define __IVPU_HW_IP_H__
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
|
||||
int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev);
|
||||
int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev);
|
||||
int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev);
|
||||
int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev);
|
||||
u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev);
|
||||
int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev);
|
||||
u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev);
|
||||
bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq);
|
||||
bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq);
|
||||
void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id);
|
||||
u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr);
|
||||
void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_fabric_req_override_enable_50xx(struct ivpu_device *vdev);
|
||||
void ivpu_hw_ip_fabric_req_override_disable_50xx(struct ivpu_device *vdev);
|
||||
|
||||
#endif /* __IVPU_HW_IP_H__ */
|
@ -129,7 +129,7 @@ static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
|
||||
|
||||
static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
|
||||
{
|
||||
ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
|
||||
ivpu_hw_ipc_tx_set(vdev, vpu_addr);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -210,8 +210,7 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
|
||||
ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
|
||||
}
|
||||
|
||||
static int
|
||||
ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
|
||||
int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
|
||||
{
|
||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||
int ret;
|
||||
@ -378,7 +377,7 @@ ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons
|
||||
return false;
|
||||
}
|
||||
|
||||
void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
|
||||
void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||
struct ivpu_ipc_consumer *cons;
|
||||
@ -392,8 +391,8 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
|
||||
* Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
|
||||
* Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
|
||||
*/
|
||||
while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
|
||||
vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
|
||||
while (ivpu_hw_ipc_rx_count_get(vdev)) {
|
||||
vpu_addr = ivpu_hw_ipc_rx_addr_get(vdev);
|
||||
if (vpu_addr == REG_IO_ERROR) {
|
||||
ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n");
|
||||
return;
|
||||
@ -442,11 +441,12 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread)
|
||||
}
|
||||
}
|
||||
|
||||
if (wake_thread)
|
||||
*wake_thread = !list_empty(&ipc->cb_msg_list);
|
||||
if (!list_empty(&ipc->cb_msg_list))
|
||||
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_IPC))
|
||||
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
|
||||
}
|
||||
|
||||
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
|
||||
void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
|
||||
{
|
||||
struct ivpu_ipc_info *ipc = vdev->ipc;
|
||||
struct ivpu_ipc_rx_msg *rx_msg, *r;
|
||||
@ -462,8 +462,6 @@ irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev)
|
||||
rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
|
||||
ivpu_ipc_rx_msg_del(vdev, rx_msg);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
int ivpu_ipc_init(struct ivpu_device *vdev)
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_IPC_H__
|
||||
@ -89,13 +89,15 @@ void ivpu_ipc_enable(struct ivpu_device *vdev);
|
||||
void ivpu_ipc_disable(struct ivpu_device *vdev);
|
||||
void ivpu_ipc_reset(struct ivpu_device *vdev);
|
||||
|
||||
void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
|
||||
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
|
||||
void ivpu_ipc_irq_handler(struct ivpu_device *vdev);
|
||||
void ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);
|
||||
|
||||
void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||
u32 channel, ivpu_ipc_rx_callback_t callback);
|
||||
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
|
||||
|
||||
int ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||
struct vpu_jsm_msg *req);
|
||||
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
|
||||
struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
|
||||
unsigned long timeout_ms);
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <drm/drm_file.h>
|
||||
@ -12,11 +12,13 @@
|
||||
#include <uapi/drm/ivpu_accel.h>
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
#include "ivpu_fw.h"
|
||||
#include "ivpu_hw.h"
|
||||
#include "ivpu_ipc.h"
|
||||
#include "ivpu_job.h"
|
||||
#include "ivpu_jsm_msg.h"
|
||||
#include "ivpu_pm.h"
|
||||
#include "vpu_boot_api.h"
|
||||
|
||||
#define CMD_BUF_IDX 0
|
||||
#define JOB_ID_JOB_MASK GENMASK(7, 0)
|
||||
@ -25,14 +27,60 @@
|
||||
|
||||
static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
ivpu_hw_reg_db_set(vdev, cmdq->db_id);
|
||||
ivpu_hw_db_set(vdev, cmdq->db_id);
|
||||
}
|
||||
|
||||
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
|
||||
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
|
||||
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
|
||||
u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
|
||||
struct ivpu_addr_range range;
|
||||
|
||||
if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
|
||||
return 0;
|
||||
|
||||
range.start = vdev->hw->ranges.user.end - (primary_size * IVPU_NUM_CMDQS_PER_CTX);
|
||||
range.end = vdev->hw->ranges.user.end;
|
||||
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, primary_size,
|
||||
DRM_IVPU_BO_WC);
|
||||
if (!cmdq->primary_preempt_buf) {
|
||||
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
range.start = vdev->hw->ranges.shave.end - (secondary_size * IVPU_NUM_CMDQS_PER_CTX);
|
||||
range.end = vdev->hw->ranges.shave.end;
|
||||
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &range, secondary_size,
|
||||
DRM_IVPU_BO_WC);
|
||||
if (!cmdq->secondary_preempt_buf) {
|
||||
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
|
||||
goto err_free_primary;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_primary:
|
||||
ivpu_bo_free(cmdq->primary_preempt_buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
|
||||
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
if (vdev->hw->sched_mode != VPU_SCHEDULING_MODE_HW)
|
||||
return;
|
||||
|
||||
drm_WARN_ON(&vdev->drm, !cmdq->primary_preempt_buf);
|
||||
drm_WARN_ON(&vdev->drm, !cmdq->secondary_preempt_buf);
|
||||
ivpu_bo_free(cmdq->primary_preempt_buf);
|
||||
ivpu_bo_free(cmdq->secondary_preempt_buf);
|
||||
}
|
||||
|
||||
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB};
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct vpu_job_queue_header *jobq_header;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
int ret;
|
||||
|
||||
@ -50,18 +98,14 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 e
|
||||
if (!cmdq->mem)
|
||||
goto err_erase_xa;
|
||||
|
||||
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
|
||||
sizeof(struct vpu_job_queue_entry));
|
||||
|
||||
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
|
||||
jobq_header = &cmdq->jobq->header;
|
||||
jobq_header->engine_idx = engine;
|
||||
jobq_header->head = 0;
|
||||
jobq_header->tail = 0;
|
||||
wmb(); /* Flush WC buffer for jobq->header */
|
||||
ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
|
||||
if (ret)
|
||||
goto err_free_cmdq_mem;
|
||||
|
||||
return cmdq;
|
||||
|
||||
err_free_cmdq_mem:
|
||||
ivpu_bo_free(cmdq->mem);
|
||||
err_erase_xa:
|
||||
xa_erase(&vdev->db_xa, cmdq->db_id);
|
||||
err_free_cmdq:
|
||||
@ -74,92 +118,183 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
|
||||
if (!cmdq)
|
||||
return;
|
||||
|
||||
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
|
||||
ivpu_bo_free(cmdq->mem);
|
||||
xa_erase(&file_priv->vdev->db_xa, cmdq->db_id);
|
||||
kfree(cmdq);
|
||||
}
|
||||
|
||||
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
|
||||
static int ivpu_hws_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine,
|
||||
u8 priority)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
|
||||
int ret;
|
||||
|
||||
ret = ivpu_jsm_hws_create_cmdq(vdev, file_priv->ctx.id, file_priv->ctx.id, cmdq->db_id,
|
||||
task_pid_nr(current), engine,
|
||||
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ivpu_jsm_hws_set_context_sched_properties(vdev, file_priv->ctx.id, cmdq->db_id,
|
||||
priority);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
int ret;
|
||||
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW)
|
||||
ret = ivpu_jsm_hws_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->db_id,
|
||||
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
|
||||
else
|
||||
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
|
||||
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
|
||||
|
||||
if (!ret)
|
||||
ivpu_dbg(vdev, JOB, "DB %d registered to ctx %d\n", cmdq->db_id, file_priv->ctx.id);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
ivpu_cmdq_init(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq, u16 engine, u8 priority)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
struct vpu_job_queue_header *jobq_header;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
if (cmdq->db_registered)
|
||||
return 0;
|
||||
|
||||
cmdq->entry_count = (u32)((ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header)) /
|
||||
sizeof(struct vpu_job_queue_entry));
|
||||
|
||||
cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
|
||||
jobq_header = &cmdq->jobq->header;
|
||||
jobq_header->engine_idx = engine;
|
||||
jobq_header->head = 0;
|
||||
jobq_header->tail = 0;
|
||||
wmb(); /* Flush WC buffer for jobq->header */
|
||||
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
|
||||
ret = ivpu_hws_cmdq_init(file_priv, cmdq, engine, priority);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ivpu_register_db(file_priv, cmdq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cmdq->db_registered = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ivpu_cmdq_fini(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
if (!cmdq->db_registered)
|
||||
return 0;
|
||||
|
||||
cmdq->db_registered = false;
|
||||
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
|
||||
ret = ivpu_jsm_hws_destroy_cmdq(vdev, file_priv->ctx.id, cmdq->db_id);
|
||||
if (!ret)
|
||||
ivpu_dbg(vdev, JOB, "Command queue %d destroyed\n", cmdq->db_id);
|
||||
}
|
||||
|
||||
ret = ivpu_jsm_unregister_db(vdev, cmdq->db_id);
|
||||
if (!ret)
|
||||
ivpu_dbg(vdev, JOB, "DB %d unregistered\n", cmdq->db_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine,
|
||||
u8 priority)
|
||||
{
|
||||
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
|
||||
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
if (!cmdq) {
|
||||
cmdq = ivpu_cmdq_alloc(file_priv, engine);
|
||||
cmdq = ivpu_cmdq_alloc(file_priv);
|
||||
if (!cmdq)
|
||||
return NULL;
|
||||
file_priv->cmdq[engine] = cmdq;
|
||||
file_priv->cmdq[cmdq_idx] = cmdq;
|
||||
}
|
||||
|
||||
if (cmdq->db_registered)
|
||||
return cmdq;
|
||||
|
||||
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
|
||||
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
|
||||
ret = ivpu_cmdq_init(file_priv, cmdq, engine, priority);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
cmdq->db_registered = true;
|
||||
|
||||
return cmdq;
|
||||
}
|
||||
|
||||
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
|
||||
static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine, u8 priority)
|
||||
{
|
||||
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
|
||||
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
|
||||
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
if (cmdq) {
|
||||
file_priv->cmdq[engine] = NULL;
|
||||
if (cmdq->db_registered)
|
||||
ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
|
||||
|
||||
file_priv->cmdq[cmdq_idx] = NULL;
|
||||
ivpu_cmdq_fini(file_priv, cmdq);
|
||||
ivpu_cmdq_free(file_priv, cmdq);
|
||||
}
|
||||
}
|
||||
|
||||
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
int i;
|
||||
u16 engine;
|
||||
u8 priority;
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
for (i = 0; i < IVPU_NUM_ENGINES; i++)
|
||||
ivpu_cmdq_release_locked(file_priv, i);
|
||||
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++)
|
||||
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++)
|
||||
ivpu_cmdq_release_locked(file_priv, engine, priority);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the doorbell as unregistered and reset job queue pointers.
|
||||
* Mark the doorbell as unregistered
|
||||
* This function needs to be called when the VPU hardware is restarted
|
||||
* and FW loses job queue state. The next time job queue is used it
|
||||
* will be registered again.
|
||||
*/
|
||||
static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
|
||||
static void ivpu_cmdq_reset(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
if (cmdq) {
|
||||
cmdq->db_registered = false;
|
||||
cmdq->jobq->header.head = 0;
|
||||
cmdq->jobq->header.tail = 0;
|
||||
wmb(); /* Flush WC buffer for jobq header */
|
||||
}
|
||||
}
|
||||
|
||||
static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
int i;
|
||||
u16 engine;
|
||||
u8 priority;
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
|
||||
for (i = 0; i < IVPU_NUM_ENGINES; i++)
|
||||
ivpu_cmdq_reset_locked(file_priv, i);
|
||||
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
|
||||
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
|
||||
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
|
||||
struct ivpu_cmdq *cmdq = file_priv->cmdq[cmdq_idx];
|
||||
|
||||
if (cmdq)
|
||||
cmdq->db_registered = false;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&file_priv->lock);
|
||||
}
|
||||
@ -172,10 +307,36 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
|
||||
mutex_lock(&vdev->context_list_lock);
|
||||
|
||||
xa_for_each(&vdev->context_xa, ctx_id, file_priv)
|
||||
ivpu_cmdq_reset_all(file_priv);
|
||||
ivpu_cmdq_reset(file_priv);
|
||||
|
||||
mutex_unlock(&vdev->context_list_lock);
|
||||
}
|
||||
|
||||
static void ivpu_cmdq_fini_all(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
u16 engine;
|
||||
u8 priority;
|
||||
|
||||
for (engine = 0; engine < IVPU_NUM_ENGINES; engine++) {
|
||||
for (priority = 0; priority < IVPU_NUM_PRIORITIES; priority++) {
|
||||
int cmdq_idx = IVPU_CMDQ_INDEX(engine, priority);
|
||||
|
||||
if (file_priv->cmdq[cmdq_idx])
|
||||
ivpu_cmdq_fini(file_priv, file_priv->cmdq[cmdq_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv)
|
||||
{
|
||||
struct ivpu_device *vdev = file_priv->vdev;
|
||||
|
||||
lockdep_assert_held(&file_priv->lock);
|
||||
|
||||
ivpu_cmdq_fini_all(file_priv);
|
||||
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_OS)
|
||||
ivpu_jsm_context_release(vdev, file_priv->ctx.id);
|
||||
}
|
||||
|
||||
static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
||||
@ -199,6 +360,15 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
|
||||
entry->flags = 0;
|
||||
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
|
||||
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
|
||||
|
||||
if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW &&
|
||||
(unlikely(!(ivpu_test_mode & IVPU_TEST_MODE_PREEMPTION_DISABLE)))) {
|
||||
entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
|
||||
entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
|
||||
entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
|
||||
entry->secondary_preempt_buf_size = ivpu_bo_size(cmdq->secondary_preempt_buf);
|
||||
}
|
||||
|
||||
wmb(); /* Ensure that tail is updated after filling entry */
|
||||
header->tail = next_entry;
|
||||
wmb(); /* Flush WC buffer for jobq header */
|
||||
@ -295,11 +465,28 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *vdev, u32 job_id)
|
||||
{
|
||||
struct ivpu_job *job;
|
||||
|
||||
xa_lock(&vdev->submitted_jobs_xa);
|
||||
job = __xa_erase(&vdev->submitted_jobs_xa, job_id);
|
||||
|
||||
if (xa_empty(&vdev->submitted_jobs_xa) && job) {
|
||||
vdev->busy_time = ktime_add(ktime_sub(ktime_get(), vdev->busy_start_ts),
|
||||
vdev->busy_time);
|
||||
}
|
||||
|
||||
xa_unlock(&vdev->submitted_jobs_xa);
|
||||
|
||||
return job;
|
||||
}
|
||||
|
||||
static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
|
||||
{
|
||||
struct ivpu_job *job;
|
||||
|
||||
job = xa_erase(&vdev->submitted_jobs_xa, job_id);
|
||||
job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
|
||||
if (!job)
|
||||
return -ENOENT;
|
||||
|
||||
@ -328,12 +515,13 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
|
||||
ivpu_job_signal_and_destroy(vdev, id, DRM_IVPU_JOB_STATUS_ABORTED);
|
||||
}
|
||||
|
||||
static int ivpu_job_submit(struct ivpu_job *job)
|
||||
static int ivpu_job_submit(struct ivpu_job *job, u8 priority)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = job->file_priv;
|
||||
struct ivpu_device *vdev = job->vdev;
|
||||
struct xa_limit job_id_range;
|
||||
struct ivpu_cmdq *cmdq;
|
||||
bool is_first_job;
|
||||
int ret;
|
||||
|
||||
ret = ivpu_rpm_get(vdev);
|
||||
@ -342,10 +530,10 @@ static int ivpu_job_submit(struct ivpu_job *job)
|
||||
|
||||
mutex_lock(&file_priv->lock);
|
||||
|
||||
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
|
||||
cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx, priority);
|
||||
if (!cmdq) {
|
||||
ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
|
||||
file_priv->ctx.id, job->engine_idx);
|
||||
ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d engine %d prio %d\n",
|
||||
file_priv->ctx.id, job->engine_idx, priority);
|
||||
ret = -EINVAL;
|
||||
goto err_unlock_file_priv;
|
||||
}
|
||||
@ -354,6 +542,7 @@ static int ivpu_job_submit(struct ivpu_job *job)
|
||||
job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
|
||||
|
||||
xa_lock(&vdev->submitted_jobs_xa);
|
||||
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
|
||||
ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
|
||||
if (ret) {
|
||||
ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
|
||||
@ -373,10 +562,12 @@ static int ivpu_job_submit(struct ivpu_job *job)
|
||||
wmb(); /* Flush WC buffer for jobq header */
|
||||
} else {
|
||||
ivpu_cmdq_ring_db(vdev, cmdq);
|
||||
if (is_first_job)
|
||||
vdev->busy_start_ts = ktime_get();
|
||||
}
|
||||
|
||||
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
|
||||
job->job_id, file_priv->ctx.id, job->engine_idx,
|
||||
ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d prio %d addr 0x%llx next %d\n",
|
||||
job->job_id, file_priv->ctx.id, job->engine_idx, priority,
|
||||
job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
|
||||
|
||||
xa_unlock(&vdev->submitted_jobs_xa);
|
||||
@ -464,6 +655,14 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u8 ivpu_job_to_hws_priority(struct ivpu_file_priv *file_priv, u8 priority)
|
||||
{
|
||||
if (priority == DRM_IVPU_JOB_PRIORITY_DEFAULT)
|
||||
return DRM_IVPU_JOB_PRIORITY_NORMAL;
|
||||
|
||||
return priority - 1;
|
||||
}
|
||||
|
||||
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct ivpu_file_priv *file_priv = file->driver_priv;
|
||||
@ -472,6 +671,7 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
struct ivpu_job *job;
|
||||
u32 *buf_handles;
|
||||
int idx, ret;
|
||||
u8 priority;
|
||||
|
||||
if (params->engine > DRM_IVPU_ENGINE_COPY)
|
||||
return -EINVAL;
|
||||
@ -525,8 +725,10 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
goto err_destroy_job;
|
||||
}
|
||||
|
||||
priority = ivpu_job_to_hws_priority(file_priv, params->priority);
|
||||
|
||||
down_read(&vdev->pm->reset_lock);
|
||||
ret = ivpu_job_submit(job);
|
||||
ret = ivpu_job_submit(job, priority);
|
||||
up_read(&vdev->pm->reset_lock);
|
||||
if (ret)
|
||||
goto err_signal_fence;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_JOB_H__
|
||||
@ -24,6 +24,8 @@ struct ivpu_file_priv;
|
||||
*/
|
||||
struct ivpu_cmdq {
|
||||
struct vpu_job_queue *jobq;
|
||||
struct ivpu_bo *primary_preempt_buf;
|
||||
struct ivpu_bo *secondary_preempt_buf;
|
||||
struct ivpu_bo *mem;
|
||||
u32 entry_count;
|
||||
u32 db_id;
|
||||
@ -55,6 +57,8 @@ struct ivpu_job {
|
||||
|
||||
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
|
||||
|
||||
void ivpu_context_abort_locked(struct ivpu_file_priv *file_priv);
|
||||
|
||||
void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
|
||||
void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "ivpu_drv.h"
|
||||
@ -103,14 +103,10 @@ int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret) {
|
||||
ivpu_err_ratelimited(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
|
||||
|
||||
ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
|
||||
@ -123,14 +119,10 @@ int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret) {
|
||||
ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to unregister doorbell %u: %d\n", db_id, ret);
|
||||
|
||||
ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
|
||||
@ -255,11 +247,16 @@ int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.ssid_release.host_ssid = host_ssid;
|
||||
|
||||
return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to release context: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
|
||||
@ -281,3 +278,283 @@ int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev)
|
||||
|
||||
return ivpu_hw_wait_for_idle(vdev);
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
|
||||
u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_CREATE_CMD_QUEUE };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.hws_create_cmdq.host_ssid = ctx_id;
|
||||
req.payload.hws_create_cmdq.process_id = pid;
|
||||
req.payload.hws_create_cmdq.engine_idx = engine;
|
||||
req.payload.hws_create_cmdq.cmdq_group = cmdq_group;
|
||||
req.payload.hws_create_cmdq.cmdq_id = cmdq_id;
|
||||
req.payload.hws_create_cmdq.cmdq_base = cmdq_base;
|
||||
req.payload.hws_create_cmdq.cmdq_size = cmdq_size;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to create command queue: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DESTROY_CMD_QUEUE };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.hws_destroy_cmdq.host_ssid = ctx_id;
|
||||
req.payload.hws_destroy_cmdq.cmdq_id = cmdq_id;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to destroy command queue: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
|
||||
u64 cmdq_base, u32 cmdq_size)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_REGISTER_DB };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret = 0;
|
||||
|
||||
req.payload.hws_register_db.db_id = db_id;
|
||||
req.payload.hws_register_db.host_ssid = ctx_id;
|
||||
req.payload.hws_register_db.cmdq_id = cmdq_id;
|
||||
req.payload.hws_register_db.cmdq_base = cmdq_base;
|
||||
req.payload.hws_register_db.cmdq_size = cmdq_size;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_err_ratelimited(vdev, "Failed to register doorbell %u: %d\n", db_id, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_ENGINE_RESUME };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
if (engine >= VPU_ENGINE_NB)
|
||||
return -EINVAL;
|
||||
|
||||
req.payload.hws_resume_engine.engine_idx = engine;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_err_ratelimited(vdev, "Failed to resume engine %d: %d\n", engine, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
|
||||
u32 priority)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.hws_set_context_sched_properties.host_ssid = ctx_id;
|
||||
req.payload.hws_set_context_sched_properties.cmdq_id = cmdq_id;
|
||||
req.payload.hws_set_context_sched_properties.priority_band = priority;
|
||||
req.payload.hws_set_context_sched_properties.realtime_priority_level = 0;
|
||||
req.payload.hws_set_context_sched_properties.in_process_priority = 0;
|
||||
req.payload.hws_set_context_sched_properties.context_quantum = 20000;
|
||||
req.payload.hws_set_context_sched_properties.grace_period_same_priority = 10000;
|
||||
req.payload.hws_set_context_sched_properties.grace_period_lower_priority = 0;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to set context sched properties: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
|
||||
u64 vpu_log_buffer_va)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.hws_set_scheduling_log.engine_idx = engine_idx;
|
||||
req.payload.hws_set_scheduling_log.host_ssid = host_ssid;
|
||||
req.payload.hws_set_scheduling_log.vpu_log_buffer_va = vpu_log_buffer_va;
|
||||
req.payload.hws_set_scheduling_log.notify_index = 0;
|
||||
req.payload.hws_set_scheduling_log.enable_extra_events =
|
||||
ivpu_test_mode & IVPU_TEST_MODE_HWS_EXTRA_EVENTS;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to set scheduling log: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
/* Idle */
|
||||
req.payload.hws_priority_band_setup.grace_period[0] = 0;
|
||||
req.payload.hws_priority_band_setup.process_grace_period[0] = 50000;
|
||||
req.payload.hws_priority_band_setup.process_quantum[0] = 160000;
|
||||
/* Normal */
|
||||
req.payload.hws_priority_band_setup.grace_period[1] = 50000;
|
||||
req.payload.hws_priority_band_setup.process_grace_period[1] = 50000;
|
||||
req.payload.hws_priority_band_setup.process_quantum[1] = 300000;
|
||||
/* Focus */
|
||||
req.payload.hws_priority_band_setup.grace_period[2] = 50000;
|
||||
req.payload.hws_priority_band_setup.process_grace_period[2] = 50000;
|
||||
req.payload.hws_priority_band_setup.process_quantum[2] = 200000;
|
||||
/* Realtime */
|
||||
req.payload.hws_priority_band_setup.grace_period[3] = 0;
|
||||
req.payload.hws_priority_band_setup.process_grace_period[3] = 50000;
|
||||
req.payload.hws_priority_band_setup.process_quantum[3] = 200000;
|
||||
|
||||
req.payload.hws_priority_band_setup.normal_band_percentage = 10;
|
||||
|
||||
ret = ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP,
|
||||
&resp, VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to set priority bands: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
|
||||
u64 sampling_rate, u64 buffer_addr, u64 buffer_size)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_START };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
|
||||
req.payload.metric_streamer_start.sampling_rate = sampling_rate;
|
||||
req.payload.metric_streamer_start.buffer_addr = buffer_addr;
|
||||
req.payload.metric_streamer_start.buffer_size = buffer_size;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_START_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret) {
|
||||
ivpu_warn_ratelimited(vdev, "Failed to start metric streamer: ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_STOP };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.metric_streamer_stop.metric_group_mask = metric_group_mask;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret)
|
||||
ivpu_warn_ratelimited(vdev, "Failed to stop metric streamer: ret %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
|
||||
u64 buffer_addr, u64 buffer_size, u64 *bytes_written)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_UPDATE };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.metric_streamer_update.metric_group_mask = metric_group_mask;
|
||||
req.payload.metric_streamer_update.buffer_addr = buffer_addr;
|
||||
req.payload.metric_streamer_update.buffer_size = buffer_size;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret) {
|
||||
ivpu_warn_ratelimited(vdev, "Failed to update metric streamer: ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (buffer_size && resp.payload.metric_streamer_done.bytes_written > buffer_size) {
|
||||
ivpu_warn_ratelimited(vdev, "MS buffer overflow: bytes_written %#llx > buffer_size %#llx\n",
|
||||
resp.payload.metric_streamer_done.bytes_written, buffer_size);
|
||||
return -EOVERFLOW;
|
||||
}
|
||||
|
||||
*bytes_written = resp.payload.metric_streamer_done.bytes_written;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
|
||||
u64 buffer_size, u32 *sample_size, u64 *info_size)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_METRIC_STREAMER_INFO };
|
||||
struct vpu_jsm_msg resp;
|
||||
int ret;
|
||||
|
||||
req.payload.metric_streamer_start.metric_group_mask = metric_group_mask;
|
||||
req.payload.metric_streamer_start.buffer_addr = buffer_addr;
|
||||
req.payload.metric_streamer_start.buffer_size = buffer_size;
|
||||
|
||||
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE, &resp,
|
||||
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
|
||||
if (ret) {
|
||||
ivpu_warn_ratelimited(vdev, "Failed to get metric streamer info: ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!resp.payload.metric_streamer_done.sample_size) {
|
||||
ivpu_warn_ratelimited(vdev, "Invalid sample size\n");
|
||||
return -EBADMSG;
|
||||
}
|
||||
|
||||
if (sample_size)
|
||||
*sample_size = resp.payload.metric_streamer_done.sample_size;
|
||||
if (info_size)
|
||||
*info_size = resp.payload.metric_streamer_done.bytes_written;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_ENABLE };
|
||||
struct vpu_jsm_msg resp;
|
||||
|
||||
req.payload.pwr_dct_control.dct_active_us = active_us;
|
||||
req.payload.pwr_dct_control.dct_inactive_us = inactive_us;
|
||||
|
||||
return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_ENABLE_DONE,
|
||||
&resp, VPU_IPC_CHAN_ASYNC_CMD,
|
||||
vdev->timeout.jsm);
|
||||
}
|
||||
|
||||
int ivpu_jsm_dct_disable(struct ivpu_device *vdev)
|
||||
{
|
||||
struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DCT_DISABLE };
|
||||
struct vpu_jsm_msg resp;
|
||||
|
||||
return ivpu_ipc_send_receive_active(vdev, &req, VPU_JSM_MSG_DCT_DISABLE_DONE,
|
||||
&resp, VPU_IPC_CHAN_ASYNC_CMD,
|
||||
vdev->timeout.jsm);
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __IVPU_JSM_MSG_H__
|
||||
@ -23,4 +23,24 @@ int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 tra
|
||||
u64 trace_hw_component_mask);
|
||||
int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
|
||||
int ivpu_jsm_pwr_d0i3_enter(struct ivpu_device *vdev);
|
||||
int ivpu_jsm_hws_create_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_group, u32 cmdq_id,
|
||||
u32 pid, u32 engine, u64 cmdq_base, u32 cmdq_size);
|
||||
int ivpu_jsm_hws_destroy_cmdq(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id);
|
||||
int ivpu_jsm_hws_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id, u32 db_id,
|
||||
u64 cmdq_base, u32 cmdq_size);
|
||||
int ivpu_jsm_hws_resume_engine(struct ivpu_device *vdev, u32 engine);
|
||||
int ivpu_jsm_hws_set_context_sched_properties(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id,
|
||||
u32 priority);
|
||||
int ivpu_jsm_hws_set_scheduling_log(struct ivpu_device *vdev, u32 engine_idx, u32 host_ssid,
|
||||
u64 vpu_log_buffer_va);
|
||||
int ivpu_jsm_hws_setup_priority_bands(struct ivpu_device *vdev);
|
||||
int ivpu_jsm_metric_streamer_start(struct ivpu_device *vdev, u64 metric_group_mask,
|
||||
u64 sampling_rate, u64 buffer_addr, u64 buffer_size);
|
||||
int ivpu_jsm_metric_streamer_stop(struct ivpu_device *vdev, u64 metric_group_mask);
|
||||
int ivpu_jsm_metric_streamer_update(struct ivpu_device *vdev, u64 metric_group_mask,
|
||||
u64 buffer_addr, u64 buffer_size, u64 *bytes_written);
|
||||
int ivpu_jsm_metric_streamer_info(struct ivpu_device *vdev, u64 metric_group_mask, u64 buffer_addr,
|
||||
u64 buffer_size, u32 *sample_size, u64 *info_size);
|
||||
int ivpu_jsm_dct_enable(struct ivpu_device *vdev, u32 active_us, u32 inactive_us);
|
||||
int ivpu_jsm_dct_disable(struct ivpu_device *vdev);
|
||||
#endif
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020-2023 Intel Corporation
|
||||
* Copyright (C) 2020-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/circ_buf.h>
|
||||
@ -519,7 +519,8 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
|
||||
if (!ivpu_is_force_snoop_enabled(vdev))
|
||||
clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
|
||||
REGV_WR32(IVPU_MMU_REG_CMDQ_PROD, q->prod);
|
||||
|
||||
ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
|
||||
@ -567,7 +568,8 @@ static int ivpu_mmu_reset(struct ivpu_device *vdev)
|
||||
int ret;
|
||||
|
||||
memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
|
||||
clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
|
||||
if (!ivpu_is_force_snoop_enabled(vdev))
|
||||
clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
|
||||
mmu->cmdq.prod = 0;
|
||||
mmu->cmdq.cons = 0;
|
||||
|
||||
@ -661,7 +663,8 @@ static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
|
||||
WRITE_ONCE(entry[1], str[1]);
|
||||
WRITE_ONCE(entry[0], str[0]);
|
||||
|
||||
clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
|
||||
if (!ivpu_is_force_snoop_enabled(vdev))
|
||||
clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
|
||||
|
||||
ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
|
||||
}
|
||||
@ -735,7 +738,8 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
|
||||
WRITE_ONCE(entry[3], cd[3]);
|
||||
WRITE_ONCE(entry[0], cd[0]);
|
||||
|
||||
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
|
||||
if (!ivpu_is_force_snoop_enabled(vdev))
|
||||
clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
|
||||
|
||||
ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
|
||||
cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
|
||||
@ -874,8 +878,9 @@ static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
|
||||
u64 in_addr = ((u64)event[5]) << 32 | event[4];
|
||||
u32 sid = event[1];
|
||||
|
||||
ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
|
||||
op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
|
||||
ivpu_err_ratelimited(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
|
||||
op, ivpu_mmu_event_to_str(op), ssid, sid,
|
||||
event[2], event[3], in_addr, fetch_addr);
|
||||
}
|
||||
|
||||
static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
|
||||
@ -911,6 +916,9 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
|
||||
ivpu_mmu_user_context_mark_invalid(vdev, ssid);
|
||||
REGV_WR32(IVPU_MMU_REG_EVTQ_CONS_SEC, vdev->mmu->evtq.cons);
|
||||
}
|
||||
|
||||
if (!kfifo_put(&vdev->hw->irq.fifo, IVPU_HW_IRQ_SRC_MMU_EVTQ))
|
||||
ivpu_err_ratelimited(vdev, "IRQ FIFO full\n");
|
||||
}
|
||||
|
||||
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user