mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
amd-drm-next-6.13-2024-11-15:
amdgpu: - Parition fixes - GFX 12 fixes - SR-IOV fixes - MES fixes - RAS fixes - GC queue handling fixes - VCN fixes - Add sysfs reset masks - Better error messages for P2P failurs - SMU fixes - Documentation updates - GFX11 enforce isolation updates - Display HPD fixes - PSR fixes - Panel replay fixes - DP MST fixes - USB4 fixes - Misc display fixes and cleanups - VRAM handling fix for APUs - NBIO fix amdkfd: - INIT_WORK fix - Refcount fix - KFD MES scheduling fixes drm/fourcc: - Add missing tiling mode -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZzd6agAKCRC93/aFa7yZ 2PxAAP9PADzrN23cgBoRn/KJKCxr4xIdckmgHQe85UeHbfGiggD/VOSp0UqjK4IQ KyEqYueVyo9tn7906edE6K0GRI52TA8= =6BB4 -----END PGP SIGNATURE----- Merge tag 'amd-drm-next-6.13-2024-11-15' of https://gitlab.freedesktop.org/agd5f/linux into drm-next amd-drm-next-6.13-2024-11-15: amdgpu: - Parition fixes - GFX 12 fixes - SR-IOV fixes - MES fixes - RAS fixes - GC queue handling fixes - VCN fixes - Add sysfs reset masks - Better error messages for P2P failurs - SMU fixes - Documentation updates - GFX11 enforce isolation updates - Display HPD fixes - PSR fixes - Panel replay fixes - DP MST fixes - USB4 fixes - Misc display fixes and cleanups - VRAM handling fix for APUs - NBIO fix amdkfd: - INIT_WORK fix - Refcount fix - KFD MES scheduling fixes drm/fourcc: - Add missing tiling mode Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20241115165012.573465-1-alexander.deucher@amd.com
This commit is contained in:
commit
ade5add00d
@ -16,4 +16,5 @@ Next (GCN), Radeon DNA (RDNA), and Compute DNA (CDNA) architectures.
|
||||
thermal
|
||||
driver-misc
|
||||
debugging
|
||||
process-isolation
|
||||
amdgpu-glossary
|
||||
|
59
Documentation/gpu/amdgpu/process-isolation.rst
Normal file
59
Documentation/gpu/amdgpu/process-isolation.rst
Normal file
@ -0,0 +1,59 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=========================
|
||||
AMDGPU Process Isolation
|
||||
=========================
|
||||
|
||||
The AMDGPU driver includes a feature that enables automatic process isolation on the graphics engine. This feature serializes access to the graphics engine and adds a cleaner shader which clears the Local Data Store (LDS) and General Purpose Registers (GPRs) between jobs. All processes using the GPU, including both graphics and compute workloads, are serialized when this feature is enabled. On GPUs that support partitionable graphics engines, this feature can be enabled on a per-partition basis.
|
||||
|
||||
In addition, there is an interface to manually run the cleaner shader when the use of the GPU is complete. This may be preferable in some use cases, such as a single-user system where the login manager triggers the cleaner shader when the user logs out.
|
||||
|
||||
Process Isolation
|
||||
=================
|
||||
|
||||
The `run_cleaner_shader` and `enforce_isolation` sysfs interfaces allow users to manually execute the cleaner shader and control the process isolation feature, respectively.
|
||||
|
||||
Partition Handling
|
||||
------------------
|
||||
|
||||
The `enforce_isolation` file in sysfs can be used to enable process isolation and automatic shader cleanup between processes. On GPUs that support graphics engine partitioning, this can be enabled per partition. The partition and its current setting (0 disabled, 1 enabled) can be read from sysfs. On GPUs that do not support graphics engine partitioning, only a single partition will be present. Writing 1 to the partition position enables enforce isolation, writing 0 disables it.
|
||||
|
||||
Example of enabling enforce isolation on a GPU with multiple partitions:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ echo 1 0 1 0 > /sys/class/drm/card0/device/enforce_isolation
|
||||
$ cat /sys/class/drm/card0/device/enforce_isolation
|
||||
1 0 1 0
|
||||
|
||||
The output indicates that enforce isolation is enabled on zeroth and second parition and disabled on first and fourth parition.
|
||||
|
||||
For devices with a single partition or those that do not support partitions, there will be only one element:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ echo 1 > /sys/class/drm/card0/device/enforce_isolation
|
||||
$ cat /sys/class/drm/card0/device/enforce_isolation
|
||||
1
|
||||
|
||||
Cleaner Shader Execution
|
||||
========================
|
||||
|
||||
The driver can trigger a cleaner shader to clean up the LDS and GPR state on the graphics engine. When process isolation is enabled, this happens automatically between processes. In addition, there is a sysfs file to manually trigger cleaner shader execution.
|
||||
|
||||
To manually trigger the execution of the cleaner shader, write `0` to the `run_cleaner_shader` sysfs file:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ echo 0 > /sys/class/drm/card0/device/run_cleaner_shader
|
||||
|
||||
For multi-partition devices, you can specify the partition index when triggering the cleaner shader:
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ echo 0 > /sys/class/drm/card0/device/run_cleaner_shader # For partition 0
|
||||
$ echo 1 > /sys/class/drm/card0/device/run_cleaner_shader # For partition 1
|
||||
$ echo 2 > /sys/class/drm/card0/device/run_cleaner_shader # For partition 2
|
||||
# ... and so on for each partition
|
||||
|
||||
This command initiates the cleaner shader, which will run and complete before any new tasks are scheduled on the GPU.
|
@ -299,6 +299,12 @@ extern int amdgpu_wbrf;
|
||||
#define AMDGPU_RESET_VCE (1 << 13)
|
||||
#define AMDGPU_RESET_VCE1 (1 << 14)
|
||||
|
||||
/* reset mask */
|
||||
#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
|
||||
#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
|
||||
#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
|
||||
#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
|
||||
|
||||
/* max cursor sizes (in pixels) */
|
||||
#define CIK_CURSOR_WIDTH 128
|
||||
#define CIK_CURSOR_HEIGHT 128
|
||||
@ -1464,6 +1470,8 @@ struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
|
||||
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
|
||||
struct dma_fence *gang);
|
||||
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
|
||||
ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
|
||||
ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
|
||||
|
||||
/* atpx handler */
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
|
@ -158,7 +158,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (start + count >= max_count)
|
||||
if (start + count > max_count)
|
||||
return -EINVAL;
|
||||
|
||||
count = min_t(int, count, max_count);
|
||||
|
@ -834,6 +834,9 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
if (!kiq_ring->sched.ready || adev->job_hang)
|
||||
return 0;
|
||||
|
||||
ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
|
||||
if (!ring_funcs)
|
||||
return -ENOMEM;
|
||||
@ -858,8 +861,14 @@ int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
|
||||
|
||||
kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
|
||||
|
||||
if (kiq_ring->sched.ready && !adev->job_hang)
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
/* Submit unmap queue packet */
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
/*
|
||||
* Ring test will do a basic scratch register change check. Just run
|
||||
* this to ensure that unmap queues that is submitted before got
|
||||
* processed successfully before returning.
|
||||
*/
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
|
||||
|
@ -4236,7 +4236,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||
* for throttling interrupt) = 60 seconds.
|
||||
*/
|
||||
ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
|
||||
ratelimit_state_init(&adev->virt.ras_telemetry_rs, 5 * HZ, 1);
|
||||
|
||||
ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
|
||||
ratelimit_set_flags(&adev->virt.ras_telemetry_rs, RATELIMIT_MSG_ON_RELEASE);
|
||||
|
||||
/* Registers mapping */
|
||||
/* TODO: block userspace mapping of io register */
|
||||
@ -5186,6 +5189,9 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
|
||||
amdgpu_ras_resume(adev);
|
||||
|
||||
amdgpu_virt_ras_telemetry_post_reset(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6200,6 +6206,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
|
||||
bool p2p_access =
|
||||
!adev->gmc.xgmi.connected_to_cpu &&
|
||||
!(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
|
||||
if (!p2p_access)
|
||||
dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
|
||||
pci_name(peer_adev->pdev));
|
||||
|
||||
bool is_large_bar = adev->gmc.visible_vram_size &&
|
||||
adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
|
||||
@ -6715,3 +6724,47 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
|
||||
{
|
||||
ssize_t size = 0;
|
||||
|
||||
if (!ring || !ring->adev)
|
||||
return size;
|
||||
|
||||
if (amdgpu_device_should_recover_gpu(ring->adev))
|
||||
size |= AMDGPU_RESET_TYPE_FULL;
|
||||
|
||||
if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
|
||||
!amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
|
||||
size |= AMDGPU_RESET_TYPE_SOFT_RESET;
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
|
||||
{
|
||||
ssize_t size = 0;
|
||||
|
||||
if (supported_reset == 0) {
|
||||
size += sysfs_emit_at(buf, size, "unsupported");
|
||||
size += sysfs_emit_at(buf, size, "\n");
|
||||
return size;
|
||||
|
||||
}
|
||||
|
||||
if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
|
||||
size += sysfs_emit_at(buf, size, "soft ");
|
||||
|
||||
if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
|
||||
size += sysfs_emit_at(buf, size, "queue ");
|
||||
|
||||
if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
|
||||
size += sysfs_emit_at(buf, size, "pipe ");
|
||||
|
||||
if (supported_reset & AMDGPU_RESET_TYPE_FULL)
|
||||
size += sysfs_emit_at(buf, size, "full ");
|
||||
|
||||
size += sysfs_emit_at(buf, size, "\n");
|
||||
return size;
|
||||
}
|
||||
|
@ -515,6 +515,9 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
if (!kiq_ring->sched.ready || adev->job_hang || amdgpu_in_reset(adev))
|
||||
return 0;
|
||||
|
||||
spin_lock(&kiq->ring_lock);
|
||||
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
|
||||
adev->gfx.num_compute_rings)) {
|
||||
@ -528,20 +531,15 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
|
||||
&adev->gfx.compute_ring[j],
|
||||
RESET_QUEUES, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is workaround: only skip kiq_ring test
|
||||
* during ras recovery in suspend stage for gfx9.4.3
|
||||
/* Submit unmap queue packet */
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
/*
|
||||
* Ring test will do a basic scratch register change check. Just run
|
||||
* this to ensure that unmap queues that is submitted before got
|
||||
* processed successfully before returning.
|
||||
*/
|
||||
if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
|
||||
amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
|
||||
amdgpu_ras_in_recovery(adev)) {
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
return 0;
|
||||
}
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
|
||||
if (kiq_ring->sched.ready && !adev->job_hang)
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
|
||||
return r;
|
||||
@ -569,8 +567,11 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
|
||||
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&kiq->ring_lock);
|
||||
if (!adev->gfx.kiq[0].ring.sched.ready || adev->job_hang)
|
||||
return 0;
|
||||
|
||||
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
|
||||
spin_lock(&kiq->ring_lock);
|
||||
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
|
||||
adev->gfx.num_gfx_rings)) {
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
@ -583,11 +584,17 @@ int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
|
||||
&adev->gfx.gfx_ring[j],
|
||||
PREEMPT_QUEUES, 0, 0);
|
||||
}
|
||||
}
|
||||
/* Submit unmap queue packet */
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
|
||||
if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
|
||||
/*
|
||||
* Ring test will do a basic scratch register change check.
|
||||
* Just run this to ensure that unmap queues that is submitted
|
||||
* before got processed successfully before returning.
|
||||
*/
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -692,7 +699,13 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
|
||||
kiq->pmf->kiq_map_queues(kiq_ring,
|
||||
&adev->gfx.compute_ring[j]);
|
||||
}
|
||||
|
||||
/* Submit map queue packet */
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
/*
|
||||
* Ring test will do a basic scratch register change check. Just run
|
||||
* this to ensure that map queues that is submitted before got
|
||||
* processed successfully before returning.
|
||||
*/
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
if (r)
|
||||
@ -743,7 +756,13 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
|
||||
&adev->gfx.gfx_ring[j]);
|
||||
}
|
||||
}
|
||||
|
||||
/* Submit map queue packet */
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
/*
|
||||
* Ring test will do a basic scratch register change check. Just run
|
||||
* this to ensure that map queues that is submitted before got
|
||||
* processed successfully before returning.
|
||||
*/
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
spin_unlock(&kiq->ring_lock);
|
||||
if (r)
|
||||
@ -885,6 +904,9 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return r;
|
||||
|
||||
if (adev->gfx.cp_ecc_error_irq.funcs) {
|
||||
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
|
||||
if (r)
|
||||
@ -1576,9 +1598,11 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
|
||||
if (adev->enforce_isolation[i] && !partition_values[i]) {
|
||||
/* Going from enabled to disabled */
|
||||
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
|
||||
amdgpu_mes_set_enforce_isolation(adev, i, false);
|
||||
} else if (!adev->enforce_isolation[i] && partition_values[i]) {
|
||||
/* Going from disabled to enabled */
|
||||
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
|
||||
amdgpu_mes_set_enforce_isolation(adev, i, true);
|
||||
}
|
||||
adev->enforce_isolation[i] = partition_values[i];
|
||||
}
|
||||
@ -1588,6 +1612,32 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(run_cleaner_shader, 0200,
|
||||
NULL, amdgpu_gfx_set_run_cleaner_shader);
|
||||
|
||||
@ -1601,6 +1651,11 @@ static DEVICE_ATTR(current_compute_partition, 0644,
|
||||
|
||||
static DEVICE_ATTR(available_compute_partition, 0444,
|
||||
amdgpu_gfx_get_available_compute_partition, NULL);
|
||||
static DEVICE_ATTR(gfx_reset_mask, 0444,
|
||||
amdgpu_gfx_get_gfx_reset_mask, NULL);
|
||||
|
||||
static DEVICE_ATTR(compute_reset_mask, 0444,
|
||||
amdgpu_gfx_get_compute_reset_mask, NULL);
|
||||
|
||||
static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
|
||||
{
|
||||
@ -1666,6 +1721,40 @@ static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
|
||||
device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
|
||||
}
|
||||
|
||||
static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (!amdgpu_gpu_recovery)
|
||||
return r;
|
||||
|
||||
if (adev->gfx.num_gfx_rings) {
|
||||
r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (adev->gfx.num_compute_rings) {
|
||||
r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!amdgpu_gpu_recovery)
|
||||
return;
|
||||
|
||||
if (adev->gfx.num_gfx_rings)
|
||||
device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
|
||||
|
||||
if (adev->gfx.num_compute_rings)
|
||||
device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
|
||||
}
|
||||
|
||||
int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
@ -1680,6 +1769,10 @@ int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
dev_err(adev->dev, "failed to create isolation sysfs files");
|
||||
|
||||
r = amdgpu_gfx_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
dev_err(adev->dev, "failed to create reset mask sysfs files");
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1687,6 +1780,7 @@ void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
amdgpu_gfx_sysfs_xcp_fini(adev);
|
||||
amdgpu_gfx_sysfs_isolation_shader_fini(adev);
|
||||
amdgpu_gfx_sysfs_reset_mask_fini(adev);
|
||||
}
|
||||
|
||||
int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
|
||||
|
@ -424,6 +424,8 @@ struct amdgpu_gfx {
|
||||
/* reset mask */
|
||||
uint32_t grbm_soft_reset;
|
||||
uint32_t srbm_soft_reset;
|
||||
uint32_t gfx_supported_reset;
|
||||
uint32_t compute_supported_reset;
|
||||
|
||||
/* gfx off */
|
||||
bool gfx_off_state; /* true: enabled, false: disabled */
|
||||
|
@ -47,7 +47,7 @@ int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
|
||||
adev->jpeg.indirect_sram = true;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
if (adev->jpeg.harvest_config & (1U << i))
|
||||
continue;
|
||||
|
||||
if (adev->jpeg.indirect_sram) {
|
||||
@ -73,7 +73,7 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
if (adev->jpeg.harvest_config & (1U << i))
|
||||
continue;
|
||||
|
||||
amdgpu_bo_free_kernel(
|
||||
@ -110,7 +110,7 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
if (adev->jpeg.harvest_config & (1U << i))
|
||||
continue;
|
||||
|
||||
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
|
||||
@ -357,7 +357,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
mask = (1 << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
|
||||
mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
|
||||
if ((val & mask) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
@ -388,7 +388,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val)
|
||||
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
|
||||
ring = &adev->jpeg.inst[i].ring_dec[j];
|
||||
if (ring->sched.ready)
|
||||
mask |= 1 << ((i * adev->jpeg.num_jpeg_rings) + j);
|
||||
mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j);
|
||||
}
|
||||
}
|
||||
*val = mask;
|
||||
@ -415,3 +415,38 @@ void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev)
|
||||
&amdgpu_debugfs_jpeg_sched_mask_fops);
|
||||
#endif
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(jpeg_reset_mask, 0444,
|
||||
amdgpu_get_jpeg_reset_mask, NULL);
|
||||
|
||||
int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (adev->jpeg.num_jpeg_inst) {
|
||||
r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->jpeg.num_jpeg_inst)
|
||||
device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
|
||||
}
|
||||
|
@ -128,6 +128,7 @@ struct amdgpu_jpeg {
|
||||
uint16_t inst_mask;
|
||||
uint8_t num_inst_per_aid;
|
||||
bool indirect_sram;
|
||||
uint32_t supported_reset;
|
||||
};
|
||||
|
||||
int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
|
||||
@ -150,5 +151,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
|
||||
int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
|
||||
enum AMDGPU_UCODE_ID ucode_id);
|
||||
void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
|
||||
int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
|
||||
void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
|
||||
|
||||
#endif /*__AMDGPU_JPEG_H__*/
|
||||
|
@ -104,7 +104,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
|
||||
r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&adev->mes.event_log_gpu_obj,
|
||||
&adev->mes.event_log_gpu_addr,
|
||||
&adev->mes.event_log_cpu_addr);
|
||||
@ -192,17 +192,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
(uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
|
||||
}
|
||||
|
||||
r = amdgpu_device_wb_get(adev, &adev->mes.read_val_offs);
|
||||
if (r) {
|
||||
dev_err(adev->dev,
|
||||
"(%d) read_val_offs alloc failed\n", r);
|
||||
goto error;
|
||||
}
|
||||
adev->mes.read_val_gpu_addr =
|
||||
adev->wb.gpu_addr + (adev->mes.read_val_offs * 4);
|
||||
adev->mes.read_val_ptr =
|
||||
(uint32_t *)&adev->wb.wb[adev->mes.read_val_offs];
|
||||
|
||||
r = amdgpu_mes_doorbell_init(adev);
|
||||
if (r)
|
||||
goto error;
|
||||
@ -223,8 +212,6 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
|
||||
amdgpu_device_wb_free(adev,
|
||||
adev->mes.query_status_fence_offs[i]);
|
||||
}
|
||||
if (adev->mes.read_val_ptr)
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
|
||||
idr_destroy(&adev->mes.pasid_idr);
|
||||
idr_destroy(&adev->mes.gang_id_idr);
|
||||
@ -249,8 +236,6 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
|
||||
amdgpu_device_wb_free(adev,
|
||||
adev->mes.query_status_fence_offs[i]);
|
||||
}
|
||||
if (adev->mes.read_val_ptr)
|
||||
amdgpu_device_wb_free(adev, adev->mes.read_val_offs);
|
||||
|
||||
amdgpu_mes_doorbell_free(adev);
|
||||
|
||||
@ -921,10 +906,19 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
{
|
||||
struct mes_misc_op_input op_input;
|
||||
int r, val = 0;
|
||||
uint32_t addr_offset = 0;
|
||||
uint64_t read_val_gpu_addr;
|
||||
uint32_t *read_val_ptr;
|
||||
|
||||
if (amdgpu_device_wb_get(adev, &addr_offset)) {
|
||||
DRM_ERROR("critical bug! too many mes readers\n");
|
||||
goto error;
|
||||
}
|
||||
read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
|
||||
read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
|
||||
op_input.op = MES_MISC_OP_READ_REG;
|
||||
op_input.read_reg.reg_offset = reg;
|
||||
op_input.read_reg.buffer_addr = adev->mes.read_val_gpu_addr;
|
||||
op_input.read_reg.buffer_addr = read_val_gpu_addr;
|
||||
|
||||
if (!adev->mes.funcs->misc_op) {
|
||||
DRM_ERROR("mes rreg is not supported!\n");
|
||||
@ -935,9 +929,11 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
|
||||
if (r)
|
||||
DRM_ERROR("failed to read reg (0x%x)\n", reg);
|
||||
else
|
||||
val = *(adev->mes.read_val_ptr);
|
||||
val = *(read_val_ptr);
|
||||
|
||||
error:
|
||||
if (addr_offset)
|
||||
amdgpu_device_wb_free(adev, addr_offset);
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -1682,6 +1678,29 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
|
||||
return is_supported;
|
||||
}
|
||||
|
||||
/* Fix me -- node_id is used to identify the correct MES instances in the future */
|
||||
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
|
||||
{
|
||||
struct mes_misc_op_input op_input = {0};
|
||||
int r;
|
||||
|
||||
op_input.op = MES_MISC_OP_CHANGE_CONFIG;
|
||||
op_input.change_config.option.limit_single_process = enable ? 1 : 0;
|
||||
|
||||
if (!adev->mes.funcs->misc_op) {
|
||||
dev_err(adev->dev, "mes change config is not supported!\n");
|
||||
r = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
|
||||
if (r)
|
||||
dev_err(adev->dev, "failed to change_config.\n");
|
||||
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
||||
|
@ -40,6 +40,7 @@
|
||||
#define AMDGPU_MES_VERSION_MASK 0x00000fff
|
||||
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
|
||||
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
|
||||
#define AMDGPU_MES_MSCRATCH_SIZE 0x8000
|
||||
|
||||
enum amdgpu_mes_priority_level {
|
||||
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
|
||||
@ -120,9 +121,6 @@ struct amdgpu_mes {
|
||||
uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
|
||||
uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
|
||||
uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
|
||||
uint32_t read_val_offs;
|
||||
uint64_t read_val_gpu_addr;
|
||||
uint32_t *read_val_ptr;
|
||||
|
||||
uint32_t saved_flags;
|
||||
|
||||
@ -311,6 +309,7 @@ enum mes_misc_opcode {
|
||||
MES_MISC_OP_WRM_REG_WAIT,
|
||||
MES_MISC_OP_WRM_REG_WR_WAIT,
|
||||
MES_MISC_OP_SET_SHADER_DEBUGGER,
|
||||
MES_MISC_OP_CHANGE_CONFIG,
|
||||
};
|
||||
|
||||
struct mes_misc_op_input {
|
||||
@ -349,6 +348,21 @@ struct mes_misc_op_input {
|
||||
uint32_t tcp_watch_cntl[4];
|
||||
uint32_t trap_en;
|
||||
} set_shader_debugger;
|
||||
|
||||
struct {
|
||||
union {
|
||||
struct {
|
||||
uint32_t limit_single_process : 1;
|
||||
uint32_t enable_hws_logging_buffer : 1;
|
||||
uint32_t reserved : 30;
|
||||
};
|
||||
uint32_t all;
|
||||
} option;
|
||||
struct {
|
||||
uint32_t tdr_level;
|
||||
uint32_t tdr_delay;
|
||||
} tdr_config;
|
||||
} change_config;
|
||||
};
|
||||
};
|
||||
|
||||
@ -519,4 +533,7 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
|
||||
}
|
||||
|
||||
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
|
||||
|
||||
#endif /* __AMDGPU_MES_H__ */
|
||||
|
@ -162,7 +162,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||
* When GTT is just an alternative to VRAM make sure that we
|
||||
* only use it as fallback and still try to fill up VRAM first.
|
||||
*/
|
||||
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
|
||||
if (domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
|
||||
!(adev->flags & AMD_IS_APU))
|
||||
places[c].flags |= TTM_PL_FLAG_FALLBACK;
|
||||
c++;
|
||||
}
|
||||
|
@ -1214,6 +1214,42 @@ static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
|
||||
struct ras_query_if *query_if,
|
||||
struct ras_err_data *err_data,
|
||||
struct ras_query_context *qctx)
|
||||
{
|
||||
unsigned long new_ue, new_ce, new_de;
|
||||
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
|
||||
const char *blk_name = get_ras_block_str(&query_if->head);
|
||||
u64 event_id = qctx->evid.event_id;
|
||||
|
||||
new_ce = err_data->ce_count - obj->err_data.ce_count;
|
||||
new_ue = err_data->ue_count - obj->err_data.ue_count;
|
||||
new_de = err_data->de_count - obj->err_data.de_count;
|
||||
|
||||
if (new_ce) {
|
||||
RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
|
||||
"detected in %s block\n",
|
||||
new_ce,
|
||||
blk_name);
|
||||
}
|
||||
|
||||
if (new_ue) {
|
||||
RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
|
||||
"detected in %s block\n",
|
||||
new_ue,
|
||||
blk_name);
|
||||
}
|
||||
|
||||
if (new_de) {
|
||||
RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
|
||||
"detected in %s block\n",
|
||||
new_de,
|
||||
blk_name);
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
|
||||
{
|
||||
struct ras_err_node *err_node;
|
||||
@ -1237,6 +1273,15 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
|
||||
struct ras_err_data *err_data)
|
||||
{
|
||||
/* Host reports absolute counts */
|
||||
obj->err_data.ue_count = err_data->ue_count;
|
||||
obj->err_data.ce_count = err_data->ce_count;
|
||||
obj->err_data.de_count = err_data->de_count;
|
||||
}
|
||||
|
||||
static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
|
||||
{
|
||||
struct ras_common_if head;
|
||||
@ -1323,7 +1368,9 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
|
||||
if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
|
||||
return -EINVAL;
|
||||
|
||||
if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
|
||||
if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
|
||||
return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
|
||||
} else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
|
||||
if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
|
||||
amdgpu_ras_get_ecc_info(adev, err_data);
|
||||
} else {
|
||||
@ -1405,14 +1452,22 @@ static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
|
||||
if (ret)
|
||||
goto out_fini_err_data;
|
||||
|
||||
amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
|
||||
if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
|
||||
amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
|
||||
amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
|
||||
} else {
|
||||
/* Host provides absolute error counts. First generate the report
|
||||
* using the previous VF internal count against new host count.
|
||||
* Then Update VF internal count.
|
||||
*/
|
||||
amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
|
||||
amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
|
||||
}
|
||||
|
||||
info->ue_count = obj->err_data.ue_count;
|
||||
info->ce_count = obj->err_data.ce_count;
|
||||
info->de_count = obj->err_data.de_count;
|
||||
|
||||
amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
|
||||
|
||||
out_fini_err_data:
|
||||
amdgpu_ras_error_data_fini(&err_data);
|
||||
|
||||
@ -3453,6 +3508,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
|
||||
if (!amdgpu_ras_asic_supported(adev))
|
||||
return;
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (amdgpu_virt_get_ras_capability(adev))
|
||||
goto init_ras_enabled_flag;
|
||||
}
|
||||
|
||||
/* query ras capability from psp */
|
||||
if (amdgpu_psp_get_ras_capability(&adev->psp))
|
||||
goto init_ras_enabled_flag;
|
||||
@ -3925,7 +3985,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
/* Guest side doesn't need init ras feature */
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
|
||||
@ -4392,11 +4452,14 @@ bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
|
||||
return false;
|
||||
}
|
||||
|
||||
if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode))
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
*error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
|
||||
} else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
|
||||
*error_query_mode =
|
||||
(con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
|
||||
else
|
||||
} else {
|
||||
*error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -365,6 +365,7 @@ enum amdgpu_ras_error_query_mode {
|
||||
AMDGPU_RAS_INVALID_ERROR_QUERY = 0,
|
||||
AMDGPU_RAS_DIRECT_ERROR_QUERY = 1,
|
||||
AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2,
|
||||
AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3,
|
||||
};
|
||||
|
||||
/* ras error status reisger fields */
|
||||
|
@ -413,3 +413,44 @@ void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
|
||||
&amdgpu_debugfs_sdma_sched_mask_fops);
|
||||
#endif
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(sdma_reset_mask, 0444,
|
||||
amdgpu_get_sdma_reset_mask, NULL);
|
||||
|
||||
int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (!amdgpu_gpu_recovery)
|
||||
return r;
|
||||
|
||||
if (adev->sdma.num_instances) {
|
||||
r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!amdgpu_gpu_recovery)
|
||||
return;
|
||||
|
||||
if (adev->sdma.num_instances)
|
||||
device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
|
||||
}
|
||||
|
@ -116,6 +116,7 @@ struct amdgpu_sdma {
|
||||
struct ras_common_if *ras_if;
|
||||
struct amdgpu_sdma_ras *ras;
|
||||
uint32_t *ip_dump;
|
||||
uint32_t supported_reset;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -176,4 +177,6 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
|
||||
bool duplicate);
|
||||
int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
|
||||
void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev);
|
||||
int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev);
|
||||
void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev);
|
||||
#endif
|
||||
|
@ -318,6 +318,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *r
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return r;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||
r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
|
||||
if (r)
|
||||
|
@ -523,6 +523,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
|
||||
|
||||
adev->unique_id =
|
||||
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
|
||||
adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all;
|
||||
adev->virt.ras_telemetry_en_caps.all =
|
||||
((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all;
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
|
||||
@ -703,6 +706,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
|
||||
adev->virt.fw_reserve.p_vf2pf =
|
||||
(struct amd_sriov_msg_vf2pf_info_header *)
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
|
||||
adev->virt.fw_reserve.ras_telemetry =
|
||||
(adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
|
||||
} else if (adev->mman.drv_vram_usage_va) {
|
||||
adev->virt.fw_reserve.p_pf2vf =
|
||||
(struct amd_sriov_msg_pf2vf_info_header *)
|
||||
@ -710,6 +715,8 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
|
||||
adev->virt.fw_reserve.p_vf2pf =
|
||||
(struct amd_sriov_msg_vf2pf_info_header *)
|
||||
(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
|
||||
adev->virt.fw_reserve.ras_telemetry =
|
||||
(adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
|
||||
}
|
||||
|
||||
amdgpu_virt_read_pf2vf_data(adev);
|
||||
@ -1144,3 +1151,185 @@ bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
|
||||
|
||||
return xnack_mode;
|
||||
}
|
||||
|
||||
bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
|
||||
|
||||
if (!amdgpu_sriov_ras_caps_en(adev))
|
||||
return false;
|
||||
|
||||
if (adev->virt.ras_en_caps.bits.block_umc)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC);
|
||||
if (adev->virt.ras_en_caps.bits.block_sdma)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA);
|
||||
if (adev->virt.ras_en_caps.bits.block_gfx)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX);
|
||||
if (adev->virt.ras_en_caps.bits.block_mmhub)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB);
|
||||
if (adev->virt.ras_en_caps.bits.block_athub)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB);
|
||||
if (adev->virt.ras_en_caps.bits.block_pcie_bif)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF);
|
||||
if (adev->virt.ras_en_caps.bits.block_hdp)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP);
|
||||
if (adev->virt.ras_en_caps.bits.block_xgmi_wafl)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL);
|
||||
if (adev->virt.ras_en_caps.bits.block_df)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF);
|
||||
if (adev->virt.ras_en_caps.bits.block_smn)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN);
|
||||
if (adev->virt.ras_en_caps.bits.block_sem)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM);
|
||||
if (adev->virt.ras_en_caps.bits.block_mp0)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0);
|
||||
if (adev->virt.ras_en_caps.bits.block_mp1)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1);
|
||||
if (adev->virt.ras_en_caps.bits.block_fuse)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE);
|
||||
if (adev->virt.ras_en_caps.bits.block_mca)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA);
|
||||
if (adev->virt.ras_en_caps.bits.block_vcn)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN);
|
||||
if (adev->virt.ras_en_caps.bits.block_jpeg)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG);
|
||||
if (adev->virt.ras_en_caps.bits.block_ih)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH);
|
||||
if (adev->virt.ras_en_caps.bits.block_mpio)
|
||||
adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO);
|
||||
|
||||
if (adev->virt.ras_en_caps.bits.poison_propogation_mode)
|
||||
con->poison_supported = true; /* Poison is handled by host */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline enum amd_sriov_ras_telemetry_gpu_block
|
||||
amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) {
|
||||
switch (block) {
|
||||
case AMDGPU_RAS_BLOCK__UMC:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_UMC;
|
||||
case AMDGPU_RAS_BLOCK__SDMA:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_SDMA;
|
||||
case AMDGPU_RAS_BLOCK__GFX:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_GFX;
|
||||
case AMDGPU_RAS_BLOCK__MMHUB:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_MMHUB;
|
||||
case AMDGPU_RAS_BLOCK__ATHUB:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_ATHUB;
|
||||
case AMDGPU_RAS_BLOCK__PCIE_BIF:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF;
|
||||
case AMDGPU_RAS_BLOCK__HDP:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_HDP;
|
||||
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL;
|
||||
case AMDGPU_RAS_BLOCK__DF:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_DF;
|
||||
case AMDGPU_RAS_BLOCK__SMN:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_SMN;
|
||||
case AMDGPU_RAS_BLOCK__SEM:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_SEM;
|
||||
case AMDGPU_RAS_BLOCK__MP0:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_MP0;
|
||||
case AMDGPU_RAS_BLOCK__MP1:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_MP1;
|
||||
case AMDGPU_RAS_BLOCK__FUSE:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_FUSE;
|
||||
case AMDGPU_RAS_BLOCK__MCA:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_MCA;
|
||||
case AMDGPU_RAS_BLOCK__VCN:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_VCN;
|
||||
case AMDGPU_RAS_BLOCK__JPEG:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_JPEG;
|
||||
case AMDGPU_RAS_BLOCK__IH:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_IH;
|
||||
case AMDGPU_RAS_BLOCK__MPIO:
|
||||
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
|
||||
default:
|
||||
dev_err(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n", block);
|
||||
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
|
||||
}
|
||||
}
|
||||
|
||||
static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
|
||||
struct amdsriov_ras_telemetry *host_telemetry)
|
||||
{
|
||||
struct amd_sriov_ras_telemetry_error_count *tmp = NULL;
|
||||
uint32_t checksum, used_size;
|
||||
|
||||
checksum = host_telemetry->header.checksum;
|
||||
used_size = host_telemetry->header.used_size;
|
||||
|
||||
if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
|
||||
return 0;
|
||||
|
||||
tmp = kmalloc(used_size, GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(tmp, &host_telemetry->body.error_count, used_size);
|
||||
|
||||
if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
|
||||
goto out;
|
||||
|
||||
memcpy(&adev->virt.count_cache, tmp,
|
||||
min(used_size, sizeof(adev->virt.count_cache)));
|
||||
out:
|
||||
kfree(tmp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update)
|
||||
{
|
||||
struct amdgpu_virt *virt = &adev->virt;
|
||||
|
||||
/* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
|
||||
* will ignore incoming guest messages. Ratelimit the guest messages to
|
||||
* prevent guest self DOS.
|
||||
*/
|
||||
if (__ratelimit(&adev->virt.ras_telemetry_rs) || force_update) {
|
||||
if (!virt->ops->req_ras_err_count(adev))
|
||||
amdgpu_virt_cache_host_error_counts(adev,
|
||||
adev->virt.fw_reserve.ras_telemetry);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Bypass ACA interface and query ECC counts directly from host */
|
||||
int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
|
||||
struct ras_err_data *err_data)
|
||||
{
|
||||
enum amd_sriov_ras_telemetry_gpu_block sriov_block;
|
||||
|
||||
sriov_block = amdgpu_ras_block_to_sriov(adev, block);
|
||||
|
||||
if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
|
||||
!amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Host Access may be lost during reset, just return last cached data. */
|
||||
if (down_read_trylock(&adev->reset_domain->sem)) {
|
||||
amdgpu_virt_req_ras_err_count_internal(adev, false);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
}
|
||||
|
||||
err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count;
|
||||
err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count;
|
||||
err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
unsigned long ue_count, ce_count;
|
||||
|
||||
if (amdgpu_sriov_ras_telemetry_en(adev)) {
|
||||
amdgpu_virt_req_ras_err_count_internal(adev, true);
|
||||
amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -95,6 +95,7 @@ struct amdgpu_virt_ops {
|
||||
void (*ras_poison_handler)(struct amdgpu_device *adev,
|
||||
enum amdgpu_ras_block block);
|
||||
bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
|
||||
int (*req_ras_err_count)(struct amdgpu_device *adev);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -103,6 +104,7 @@ struct amdgpu_virt_ops {
|
||||
struct amdgpu_virt_fw_reserve {
|
||||
struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
|
||||
struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
|
||||
void *ras_telemetry;
|
||||
unsigned int checksum_key;
|
||||
};
|
||||
|
||||
@ -136,6 +138,8 @@ enum AMDGIM_FEATURE_FLAG {
|
||||
AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
|
||||
/* MES info */
|
||||
AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
|
||||
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
|
||||
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
|
||||
};
|
||||
|
||||
enum AMDGIM_REG_ACCESS_FLAG {
|
||||
@ -276,6 +280,12 @@ struct amdgpu_virt {
|
||||
uint32_t autoload_ucode_id;
|
||||
|
||||
struct mutex rlcg_reg_lock;
|
||||
|
||||
union amd_sriov_ras_caps ras_en_caps;
|
||||
union amd_sriov_ras_caps ras_telemetry_en_caps;
|
||||
|
||||
struct ratelimit_state ras_telemetry_rs;
|
||||
struct amd_sriov_ras_telemetry_error_count count_cache;
|
||||
};
|
||||
|
||||
struct amdgpu_video_codec_info;
|
||||
@ -320,6 +330,15 @@ struct amdgpu_video_codec_info;
|
||||
#define amdgpu_sriov_vf_mmio_access_protection(adev) \
|
||||
((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
|
||||
|
||||
#define amdgpu_sriov_ras_caps_en(adev) \
|
||||
((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS)
|
||||
|
||||
#define amdgpu_sriov_ras_telemetry_en(adev) \
|
||||
(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry)
|
||||
|
||||
#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
|
||||
(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
|
||||
|
||||
static inline bool is_virtual_machine(void)
|
||||
{
|
||||
#if defined(CONFIG_X86)
|
||||
@ -383,4 +402,8 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
|
||||
u32 acc_flags, u32 hwip,
|
||||
bool write, u32 *rlcg_flag);
|
||||
u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id);
|
||||
bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
|
||||
int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
|
||||
struct ras_err_data *err_data);
|
||||
int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
|
||||
#endif
|
||||
|
@ -377,6 +377,13 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
ret = vpe_init_microcode(vpe);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->vpe.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
|
||||
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
|
||||
if (ret)
|
||||
goto out;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -389,6 +396,7 @@ static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
release_firmware(vpe->fw);
|
||||
vpe->fw = NULL;
|
||||
|
||||
amdgpu_vpe_sysfs_reset_mask_fini(adev);
|
||||
vpe_ring_fini(vpe);
|
||||
|
||||
amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
|
||||
@ -865,6 +873,41 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
|
||||
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
|
||||
}
|
||||
|
||||
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(vpe_reset_mask, 0444,
|
||||
amdgpu_get_vpe_reset_mask, NULL);
|
||||
|
||||
int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r = 0;
|
||||
|
||||
if (adev->vpe.num_instances) {
|
||||
r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->vpe.num_instances)
|
||||
device_remove_file(adev->dev, &dev_attr_vpe_reset_mask);
|
||||
}
|
||||
|
||||
static const struct amdgpu_ring_funcs vpe_ring_funcs = {
|
||||
.type = AMDGPU_RING_TYPE_VPE,
|
||||
.align_mask = 0xf,
|
||||
|
@ -79,6 +79,7 @@ struct amdgpu_vpe {
|
||||
|
||||
uint32_t num_instances;
|
||||
bool collaborate_mode;
|
||||
uint32_t supported_reset;
|
||||
};
|
||||
|
||||
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
|
||||
@ -86,6 +87,8 @@ int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe);
|
||||
int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe);
|
||||
int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe);
|
||||
int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe);
|
||||
void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev);
|
||||
int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev);
|
||||
|
||||
#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0)
|
||||
#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0)
|
||||
|
@ -471,6 +471,16 @@ static const char *xcp_desc[] = {
|
||||
[AMDGPU_CPX_PARTITION_MODE] = "CPX",
|
||||
};
|
||||
|
||||
static const char *nps_desc[] = {
|
||||
[UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
|
||||
[AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
|
||||
[AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
|
||||
[AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
|
||||
[AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
|
||||
[AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
|
||||
[AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
|
||||
};
|
||||
|
||||
ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
|
||||
|
||||
#define to_xcp_attr(x) \
|
||||
@ -540,6 +550,26 @@ static ssize_t supported_xcp_configs_show(struct kobject *kobj,
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t supported_nps_configs_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
|
||||
int size = 0, mode;
|
||||
char *sep = "";
|
||||
|
||||
if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
|
||||
return sysfs_emit(buf, "Not supported\n");
|
||||
|
||||
for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
|
||||
size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
|
||||
sep = ", ";
|
||||
}
|
||||
|
||||
size += sysfs_emit_at(buf, size, "\n");
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static ssize_t xcp_config_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
@ -596,6 +626,9 @@ static const struct kobj_type xcp_cfg_sysfs_ktype = {
|
||||
static struct kobj_attribute supp_part_sysfs_mode =
|
||||
__ATTR_RO(supported_xcp_configs);
|
||||
|
||||
static struct kobj_attribute supp_nps_sysfs_mode =
|
||||
__ATTR_RO(supported_nps_configs);
|
||||
|
||||
static const struct attribute *xcp_attrs[] = {
|
||||
&supp_part_sysfs_mode.attr,
|
||||
&xcp_cfg_sysfs_mode.attr,
|
||||
@ -625,13 +658,24 @@ void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
if (adev->gmc.supported_nps_modes != 0) {
|
||||
r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
|
||||
if (r) {
|
||||
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
|
||||
goto err1;
|
||||
}
|
||||
}
|
||||
|
||||
mode = (xcp_cfg->xcp_mgr->mode ==
|
||||
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
|
||||
AMDGPU_SPX_PARTITION_MODE :
|
||||
xcp_cfg->xcp_mgr->mode;
|
||||
r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
|
||||
if (r)
|
||||
if (r) {
|
||||
sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
|
||||
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
|
||||
goto err1;
|
||||
}
|
||||
|
||||
xcp_cfg->mode = mode;
|
||||
for (i = 0; i < xcp_cfg->num_res; i++) {
|
||||
@ -653,6 +697,7 @@ void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
|
||||
kobject_put(&xcp_res->kobj);
|
||||
}
|
||||
|
||||
sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
|
||||
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
|
||||
err1:
|
||||
kobject_put(&xcp_cfg->kobj);
|
||||
@ -673,6 +718,7 @@ void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
|
||||
kobject_put(&xcp_res->kobj);
|
||||
}
|
||||
|
||||
sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
|
||||
sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
|
||||
kobject_put(&xcp_cfg->kobj);
|
||||
}
|
||||
|
@ -28,17 +28,21 @@
|
||||
#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
|
||||
#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
|
||||
#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
|
||||
|
||||
#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
|
||||
#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
|
||||
#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
|
||||
/*
|
||||
* layout
|
||||
* 0 64KB 65KB 66KB
|
||||
* | VBIOS | PF2VF | VF2PF | Bad Page | ...
|
||||
* | 64KB | 1KB | 1KB |
|
||||
* 0 64KB 65KB 66KB 68KB 132KB
|
||||
* | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
|
||||
* | 64KB | 1KB | 1KB | 2KB | 64KB | ...
|
||||
*/
|
||||
|
||||
#define AMD_SRIOV_MSG_SIZE_KB 1
|
||||
#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
|
||||
#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
|
||||
#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
|
||||
#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
|
||||
|
||||
/*
|
||||
* PF2VF history log:
|
||||
@ -86,30 +90,59 @@ enum amd_sriov_ucode_engine_id {
|
||||
|
||||
union amd_sriov_msg_feature_flags {
|
||||
struct {
|
||||
uint32_t error_log_collect : 1;
|
||||
uint32_t host_load_ucodes : 1;
|
||||
uint32_t host_flr_vramlost : 1;
|
||||
uint32_t mm_bw_management : 1;
|
||||
uint32_t pp_one_vf_mode : 1;
|
||||
uint32_t reg_indirect_acc : 1;
|
||||
uint32_t av1_support : 1;
|
||||
uint32_t vcn_rb_decouple : 1;
|
||||
uint32_t mes_info_enable : 1;
|
||||
uint32_t reserved : 23;
|
||||
uint32_t error_log_collect : 1;
|
||||
uint32_t host_load_ucodes : 1;
|
||||
uint32_t host_flr_vramlost : 1;
|
||||
uint32_t mm_bw_management : 1;
|
||||
uint32_t pp_one_vf_mode : 1;
|
||||
uint32_t reg_indirect_acc : 1;
|
||||
uint32_t av1_support : 1;
|
||||
uint32_t vcn_rb_decouple : 1;
|
||||
uint32_t mes_info_dump_enable : 1;
|
||||
uint32_t ras_caps : 1;
|
||||
uint32_t ras_telemetry : 1;
|
||||
uint32_t reserved : 21;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
|
||||
union amd_sriov_reg_access_flags {
|
||||
struct {
|
||||
uint32_t vf_reg_access_ih : 1;
|
||||
uint32_t vf_reg_access_mmhub : 1;
|
||||
uint32_t vf_reg_access_gc : 1;
|
||||
uint32_t reserved : 29;
|
||||
uint32_t vf_reg_access_ih : 1;
|
||||
uint32_t vf_reg_access_mmhub : 1;
|
||||
uint32_t vf_reg_access_gc : 1;
|
||||
uint32_t reserved : 29;
|
||||
} flags;
|
||||
uint32_t all;
|
||||
};
|
||||
|
||||
union amd_sriov_ras_caps {
|
||||
struct {
|
||||
uint64_t block_umc : 1;
|
||||
uint64_t block_sdma : 1;
|
||||
uint64_t block_gfx : 1;
|
||||
uint64_t block_mmhub : 1;
|
||||
uint64_t block_athub : 1;
|
||||
uint64_t block_pcie_bif : 1;
|
||||
uint64_t block_hdp : 1;
|
||||
uint64_t block_xgmi_wafl : 1;
|
||||
uint64_t block_df : 1;
|
||||
uint64_t block_smn : 1;
|
||||
uint64_t block_sem : 1;
|
||||
uint64_t block_mp0 : 1;
|
||||
uint64_t block_mp1 : 1;
|
||||
uint64_t block_fuse : 1;
|
||||
uint64_t block_mca : 1;
|
||||
uint64_t block_vcn : 1;
|
||||
uint64_t block_jpeg : 1;
|
||||
uint64_t block_ih : 1;
|
||||
uint64_t block_mpio : 1;
|
||||
uint64_t poison_propogation_mode : 1;
|
||||
uint64_t reserved : 44;
|
||||
} bits;
|
||||
uint64_t all;
|
||||
};
|
||||
|
||||
union amd_sriov_msg_os_info {
|
||||
struct {
|
||||
uint32_t windows : 1;
|
||||
@ -158,7 +191,7 @@ struct amd_sriov_msg_pf2vf_info_header {
|
||||
uint32_t reserved[2];
|
||||
};
|
||||
|
||||
#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49)
|
||||
#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (55)
|
||||
struct amd_sriov_msg_pf2vf_info {
|
||||
/* header contains size and version */
|
||||
struct amd_sriov_msg_pf2vf_info_header header;
|
||||
@ -211,6 +244,12 @@ struct amd_sriov_msg_pf2vf_info {
|
||||
uint32_t pcie_atomic_ops_support_flags;
|
||||
/* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */
|
||||
uint32_t gpu_capacity;
|
||||
/* vf bdf on host pci tree for debug only */
|
||||
uint32_t bdf_on_host;
|
||||
uint32_t more_bp; //Reserved for future use.
|
||||
union amd_sriov_ras_caps ras_en_caps;
|
||||
union amd_sriov_ras_caps ras_telemetry_en_caps;
|
||||
|
||||
/* reserved */
|
||||
uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
|
||||
} __packed;
|
||||
@ -283,8 +322,12 @@ enum amd_sriov_mailbox_request_message {
|
||||
MB_REQ_MSG_REL_GPU_FINI_ACCESS,
|
||||
MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
|
||||
MB_REQ_MSG_REQ_GPU_INIT_DATA,
|
||||
MB_REQ_MSG_PSP_VF_CMD_RELAY,
|
||||
|
||||
MB_REQ_MSG_LOG_VF_ERROR = 200,
|
||||
MB_REQ_MSG_READY_TO_RESET = 201,
|
||||
MB_REQ_MSG_RAS_POISON = 202,
|
||||
MB_REQ_RAS_ERROR_COUNT = 203,
|
||||
};
|
||||
|
||||
/* mailbox message send from host to guest */
|
||||
@ -297,10 +340,60 @@ enum amd_sriov_mailbox_response_message {
|
||||
MB_RES_MSG_FAIL,
|
||||
MB_RES_MSG_QUERY_ALIVE,
|
||||
MB_RES_MSG_GPU_INIT_DATA_READY,
|
||||
MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
|
||||
|
||||
MB_RES_MSG_TEXT_MESSAGE = 255
|
||||
};
|
||||
|
||||
enum amd_sriov_ras_telemetry_gpu_block {
|
||||
RAS_TELEMETRY_GPU_BLOCK_UMC = 0,
|
||||
RAS_TELEMETRY_GPU_BLOCK_SDMA = 1,
|
||||
RAS_TELEMETRY_GPU_BLOCK_GFX = 2,
|
||||
RAS_TELEMETRY_GPU_BLOCK_MMHUB = 3,
|
||||
RAS_TELEMETRY_GPU_BLOCK_ATHUB = 4,
|
||||
RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF = 5,
|
||||
RAS_TELEMETRY_GPU_BLOCK_HDP = 6,
|
||||
RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL = 7,
|
||||
RAS_TELEMETRY_GPU_BLOCK_DF = 8,
|
||||
RAS_TELEMETRY_GPU_BLOCK_SMN = 9,
|
||||
RAS_TELEMETRY_GPU_BLOCK_SEM = 10,
|
||||
RAS_TELEMETRY_GPU_BLOCK_MP0 = 11,
|
||||
RAS_TELEMETRY_GPU_BLOCK_MP1 = 12,
|
||||
RAS_TELEMETRY_GPU_BLOCK_FUSE = 13,
|
||||
RAS_TELEMETRY_GPU_BLOCK_MCA = 14,
|
||||
RAS_TELEMETRY_GPU_BLOCK_VCN = 15,
|
||||
RAS_TELEMETRY_GPU_BLOCK_JPEG = 16,
|
||||
RAS_TELEMETRY_GPU_BLOCK_IH = 17,
|
||||
RAS_TELEMETRY_GPU_BLOCK_MPIO = 18,
|
||||
RAS_TELEMETRY_GPU_BLOCK_COUNT = 19,
|
||||
};
|
||||
|
||||
struct amd_sriov_ras_telemetry_header {
|
||||
uint32_t checksum;
|
||||
uint32_t used_size;
|
||||
uint32_t reserved[2];
|
||||
};
|
||||
|
||||
struct amd_sriov_ras_telemetry_error_count {
|
||||
struct {
|
||||
uint32_t ce_count;
|
||||
uint32_t ue_count;
|
||||
uint32_t de_count;
|
||||
uint32_t ce_overflow_count;
|
||||
uint32_t ue_overflow_count;
|
||||
uint32_t de_overflow_count;
|
||||
uint32_t reserved[6];
|
||||
} block[RAS_TELEMETRY_GPU_BLOCK_COUNT];
|
||||
};
|
||||
|
||||
struct amdsriov_ras_telemetry {
|
||||
struct amd_sriov_ras_telemetry_header header;
|
||||
|
||||
union {
|
||||
struct amd_sriov_ras_telemetry_error_count error_count;
|
||||
} body;
|
||||
};
|
||||
|
||||
/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
|
||||
enum amd_sriov_gpu_init_data_version {
|
||||
GPU_INIT_DATA_READY_V1 = 1,
|
||||
|
@ -4825,6 +4825,11 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0);
|
||||
if (r) {
|
||||
|
@ -1580,6 +1580,8 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
|
||||
adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
|
||||
@ -1691,6 +1693,24 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
if ((adev->gfx.me_fw_version >= 2280) &&
|
||||
(adev->gfx.mec_fw_version >= 2410)) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!adev->enable_mes_kiq) {
|
||||
r = amdgpu_gfx_kiq_init(adev, GFX11_MEC_HPD_SIZE, 0);
|
||||
if (r) {
|
||||
|
@ -1437,6 +1437,12 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
if (!adev->enable_mes_kiq) {
|
||||
r = amdgpu_gfx_kiq_init(adev, GFX12_MEC_HPD_SIZE, 0);
|
||||
if (r) {
|
||||
|
@ -4823,6 +4823,13 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
|
||||
amdgpu_ring_write(kiq_ring, 0);
|
||||
amdgpu_ring_write(kiq_ring, 0);
|
||||
}
|
||||
/* Submit unmap queue packet */
|
||||
amdgpu_ring_commit(kiq_ring);
|
||||
/*
|
||||
* Ring test will do a basic scratch register change check. Just run
|
||||
* this to ensure that unmap queues that is submitted before got
|
||||
* processed successfully before returning.
|
||||
*/
|
||||
r = amdgpu_ring_test_helper(kiq_ring);
|
||||
if (r)
|
||||
DRM_ERROR("KCQ disable failed\n");
|
||||
|
@ -2374,6 +2374,12 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->gfx.gfx_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
|
||||
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to init KIQ BOs!\n");
|
||||
|
@ -1157,6 +1157,19 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->gfx.compute_supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(9, 4, 3):
|
||||
case IP_VERSION(9, 4, 4):
|
||||
if (adev->gfx.mec_fw_version >= 155) {
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
r = gfx_v9_4_3_gpu_early_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -1130,8 +1130,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
uint64_t *flags)
|
||||
{
|
||||
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
|
||||
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT | AMDGPU_GEM_CREATE_EXT_COHERENT);
|
||||
bool is_vram = bo->tbo.resource &&
|
||||
bo->tbo.resource->mem_type == TTM_PL_VRAM;
|
||||
bool coherent = bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
|
||||
AMDGPU_GEM_CREATE_EXT_COHERENT);
|
||||
bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT;
|
||||
bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
|
||||
struct amdgpu_vm *vm = mapping->bo_va->base.vm;
|
||||
@ -1139,6 +1141,8 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
|
||||
bool snoop = false;
|
||||
bool is_local;
|
||||
|
||||
dma_resv_assert_held(bo->tbo.base.resv);
|
||||
|
||||
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
|
||||
case IP_VERSION(9, 4, 1):
|
||||
case IP_VERSION(9, 4, 2):
|
||||
@ -1257,9 +1261,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
|
||||
*flags &= ~AMDGPU_PTE_VALID;
|
||||
}
|
||||
|
||||
if (bo && bo->tbo.resource)
|
||||
gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
|
||||
mapping, flags);
|
||||
if ((*flags & AMDGPU_PTE_VALID) && bo)
|
||||
gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags);
|
||||
}
|
||||
|
||||
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
|
||||
|
@ -121,6 +121,12 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
|
||||
|
||||
r = amdgpu_jpeg_ras_sw_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -143,6 +149,7 @@ static int jpeg_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
return r;
|
||||
|
@ -159,6 +159,13 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -178,6 +185,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
return r;
|
||||
|
@ -153,6 +153,13 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
|
||||
}
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -172,6 +179,7 @@ static int jpeg_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
return r;
|
||||
|
@ -100,6 +100,12 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
|
||||
adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->jpeg.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
|
||||
r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -119,6 +125,7 @@ static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
amdgpu_jpeg_sysfs_reset_mask_fini(adev);
|
||||
r = amdgpu_jpeg_sw_fini(adev);
|
||||
|
||||
return r;
|
||||
|
@ -644,6 +644,18 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes,
|
||||
sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
|
||||
misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
|
||||
break;
|
||||
case MES_MISC_OP_CHANGE_CONFIG:
|
||||
if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) < 0x63) {
|
||||
dev_err(mes->adev->dev, "MES FW versoin must be larger than 0x63 to support limit single process feature.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
|
||||
misc_pkt.change_config.opcode =
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS;
|
||||
misc_pkt.change_config.option.bits.limit_single_process =
|
||||
input->change_config.option.limit_single_process;
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("unsupported misc op (%d) \n", input->op);
|
||||
return -EINVAL;
|
||||
@ -708,6 +720,9 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
|
||||
mes->event_log_gpu_addr;
|
||||
}
|
||||
|
||||
if (enforce_isolation)
|
||||
mes_set_hw_res_pkt.limit_single_process = 1;
|
||||
|
||||
return mes_v11_0_submit_pkt_and_poll_completion(mes,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
|
||||
@ -908,6 +923,16 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
|
||||
uint32_t pipe, data = 0;
|
||||
|
||||
if (enable) {
|
||||
if (amdgpu_mes_log_enable) {
|
||||
WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO,
|
||||
lower_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
|
||||
WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI,
|
||||
upper_32_bits(adev->mes.event_log_gpu_addr + AMDGPU_MES_LOG_BUFFER_SIZE));
|
||||
dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n",
|
||||
RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI),
|
||||
RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO));
|
||||
}
|
||||
|
||||
data = RREG32_SOC15(GC, 0, regCP_MES_CNTL);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1);
|
||||
data = REG_SET_FIELD(data, CP_MES_CNTL,
|
||||
@ -1370,7 +1395,7 @@ static int mes_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
|
||||
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
|
||||
|
||||
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE;
|
||||
|
||||
r = amdgpu_mes_init(adev);
|
||||
if (r)
|
||||
|
@ -531,6 +531,14 @@ static int mes_v12_0_misc_op(struct amdgpu_mes *mes,
|
||||
sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl));
|
||||
misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en;
|
||||
break;
|
||||
case MES_MISC_OP_CHANGE_CONFIG:
|
||||
misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG;
|
||||
misc_pkt.change_config.opcode =
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS;
|
||||
misc_pkt.change_config.option.bits.limit_single_process =
|
||||
input->change_config.option.limit_single_process;
|
||||
break;
|
||||
|
||||
default:
|
||||
DRM_ERROR("unsupported misc op (%d) \n", input->op);
|
||||
return -EINVAL;
|
||||
@ -550,7 +558,7 @@ static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe)
|
||||
mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER;
|
||||
mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1;
|
||||
mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
|
||||
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 100;
|
||||
mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_set_hw_res_1_pkt, sizeof(mes_set_hw_res_1_pkt),
|
||||
@ -624,6 +632,9 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
if (enforce_isolation)
|
||||
mes_set_hw_res_pkt.limit_single_process = 1;
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
&mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
|
||||
offsetof(union MESAPI_SET_HW_RESOURCES, api_status));
|
||||
|
@ -61,15 +61,18 @@ static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
|
||||
static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
|
||||
enum idh_event event)
|
||||
{
|
||||
int r = 0;
|
||||
u32 reg;
|
||||
|
||||
reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
|
||||
if (reg != event)
|
||||
if (reg == IDH_FAIL)
|
||||
r = -EINVAL;
|
||||
else if (reg != event)
|
||||
return -ENOENT;
|
||||
|
||||
xgpu_nv_mailbox_send_ack(adev);
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
|
||||
@ -178,6 +181,9 @@ static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
|
||||
if (data1 != 0)
|
||||
event = IDH_RAS_POISON_READY;
|
||||
break;
|
||||
case IDH_REQ_RAS_ERROR_COUNT:
|
||||
event = IDH_RAS_ERROR_COUNT_READY;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -456,6 +462,11 @@ static bool xgpu_nv_rcvd_ras_intr(struct amdgpu_device *adev)
|
||||
return (msg == IDH_RAS_ERROR_DETECTED || msg == 0xFFFFFFFF);
|
||||
}
|
||||
|
||||
static int xgpu_nv_req_ras_err_count(struct amdgpu_device *adev)
|
||||
{
|
||||
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_ERROR_COUNT);
|
||||
}
|
||||
|
||||
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
|
||||
.req_full_gpu = xgpu_nv_request_full_gpu_access,
|
||||
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
|
||||
@ -466,4 +477,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
|
||||
.trans_msg = xgpu_nv_mailbox_trans_msg,
|
||||
.ras_poison_handler = xgpu_nv_ras_poison_handler,
|
||||
.rcvd_ras_intr = xgpu_nv_rcvd_ras_intr,
|
||||
.req_ras_err_count = xgpu_nv_req_ras_err_count,
|
||||
};
|
||||
|
@ -40,6 +40,7 @@ enum idh_request {
|
||||
IDH_LOG_VF_ERROR = 200,
|
||||
IDH_READY_TO_RESET = 201,
|
||||
IDH_RAS_POISON = 202,
|
||||
IDH_REQ_RAS_ERROR_COUNT = 203,
|
||||
};
|
||||
|
||||
enum idh_event {
|
||||
@ -54,6 +55,8 @@ enum idh_event {
|
||||
IDH_RAS_POISON_READY,
|
||||
IDH_PF_SOFT_FLR_NOTIFICATION,
|
||||
IDH_RAS_ERROR_DETECTED,
|
||||
IDH_RAS_ERROR_COUNT_READY = 11,
|
||||
|
||||
IDH_TEXT_MESSAGE = 255,
|
||||
};
|
||||
|
||||
|
@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev)
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data);
|
||||
|
||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
||||
case IP_VERSION(7, 7, 0):
|
||||
data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23);
|
||||
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -67,8 +67,8 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
|
||||
|
||||
/* Navi */
|
||||
static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs nv_video_codecs_encode = {
|
||||
@ -94,8 +94,8 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode = {
|
||||
|
||||
/* Sienna Cichlid */
|
||||
static const struct amdgpu_video_codec_info sc_video_codecs_encode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs sc_video_codecs_encode = {
|
||||
@ -136,8 +136,8 @@ static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 = {
|
||||
|
||||
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2160, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 7680, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] = {
|
||||
|
@ -1430,6 +1430,10 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Add queue reset mask when FW fully supports it */
|
||||
adev->sdma.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
|
||||
|
||||
if (amdgpu_sdma_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "fail to initialize sdma ras block\n");
|
||||
return -EINVAL;
|
||||
@ -1442,6 +1446,10 @@ static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
else
|
||||
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
|
||||
|
||||
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1456,6 +1464,7 @@ static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].page);
|
||||
}
|
||||
|
||||
amdgpu_sdma_sysfs_reset_mask_fini(adev);
|
||||
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
|
||||
amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5))
|
||||
amdgpu_sdma_destroy_inst_ctx(adev, true);
|
||||
|
@ -1452,6 +1452,19 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->sdma.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
|
||||
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
|
||||
case IP_VERSION(5, 0, 0):
|
||||
case IP_VERSION(5, 0, 2):
|
||||
case IP_VERSION(5, 0, 5):
|
||||
if (adev->sdma.instance[0].fw_version >= 35)
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Allocate memory for SDMA IP Dump buffer */
|
||||
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
|
||||
if (ptr)
|
||||
@ -1459,6 +1472,10 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
else
|
||||
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
|
||||
|
||||
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1470,6 +1487,7 @@ static int sdma_v5_0_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
amdgpu_sdma_sysfs_reset_mask_fini(adev);
|
||||
amdgpu_sdma_destroy_inst_ctx(adev, false);
|
||||
|
||||
kfree(adev->sdma.ip_dump);
|
||||
|
@ -1357,6 +1357,24 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->sdma.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
|
||||
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
|
||||
case IP_VERSION(5, 2, 0):
|
||||
case IP_VERSION(5, 2, 2):
|
||||
case IP_VERSION(5, 2, 3):
|
||||
case IP_VERSION(5, 2, 4):
|
||||
if (adev->sdma.instance[0].fw_version >= 76)
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
case IP_VERSION(5, 2, 5):
|
||||
if (adev->sdma.instance[0].fw_version >= 34)
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Allocate memory for SDMA IP Dump buffer */
|
||||
ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
|
||||
if (ptr)
|
||||
@ -1364,6 +1382,10 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
else
|
||||
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
|
||||
|
||||
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1375,6 +1397,7 @@ static int sdma_v5_2_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
amdgpu_sdma_sysfs_reset_mask_fini(adev);
|
||||
amdgpu_sdma_destroy_inst_ctx(adev, true);
|
||||
|
||||
kfree(adev->sdma.ip_dump);
|
||||
|
@ -1350,6 +1350,19 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
return r;
|
||||
}
|
||||
|
||||
adev->sdma.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
|
||||
switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
|
||||
case IP_VERSION(6, 0, 0):
|
||||
case IP_VERSION(6, 0, 2):
|
||||
case IP_VERSION(6, 0, 3):
|
||||
if (adev->sdma.instance[0].fw_version >= 21)
|
||||
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (amdgpu_sdma_ras_sw_init(adev)) {
|
||||
dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
|
||||
return -EINVAL;
|
||||
@ -1362,6 +1375,10 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
|
||||
else
|
||||
DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
|
||||
|
||||
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1373,6 +1390,7 @@ static int sdma_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
|
||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||
|
||||
amdgpu_sdma_sysfs_reset_mask_fini(adev);
|
||||
amdgpu_sdma_destroy_inst_ctx(adev, true);
|
||||
|
||||
kfree(adev->sdma.ip_dump);
|
||||
|
@ -90,8 +90,8 @@ static const struct amd_ip_funcs soc15_common_ip_funcs;
|
||||
/* Vega, Raven, Arcturus */
|
||||
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
|
||||
{
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codecs vega_video_codecs_encode =
|
||||
|
@ -49,13 +49,13 @@ static const struct amd_ip_funcs soc21_common_ip_funcs;
|
||||
|
||||
/* SOC21 */
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
@ -96,14 +96,14 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = {
|
||||
|
||||
/* SRIOV SOC21, not const since data is controlled by host */
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
};
|
||||
|
||||
static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = {
|
||||
|
@ -48,7 +48,7 @@
|
||||
static const struct amd_ip_funcs soc24_common_ip_funcs;
|
||||
|
||||
static const struct amdgpu_video_codec_info vcn_5_0_0_video_codecs_encode_array_vcn0[] = {
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 0)},
|
||||
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
|
||||
};
|
||||
|
@ -95,6 +95,13 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring);
|
||||
static void vcn_v4_0_3_set_ras_funcs(struct amdgpu_device *adev);
|
||||
static void vcn_v4_0_3_enable_ras(struct amdgpu_device *adev,
|
||||
int inst_idx, bool indirect);
|
||||
|
||||
static inline bool vcn_v4_0_3_normalizn_reqd(struct amdgpu_device *adev)
|
||||
{
|
||||
return (amdgpu_sriov_vf(adev) ||
|
||||
(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)));
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v4_0_3_early_init - set function pointers
|
||||
*
|
||||
@ -1428,8 +1435,8 @@ static uint64_t vcn_v4_0_3_unified_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
|
||||
uint32_t val, uint32_t mask)
|
||||
{
|
||||
/* For VF, only local offsets should be used */
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
/* Use normalized offsets when required */
|
||||
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
|
||||
reg = NORMALIZE_VCN_REG_OFFSET(reg);
|
||||
|
||||
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
|
||||
@ -1440,8 +1447,8 @@ static void vcn_v4_0_3_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t
|
||||
|
||||
static void vcn_v4_0_3_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
|
||||
{
|
||||
/* For VF, only local offsets should be used */
|
||||
if (amdgpu_sriov_vf(ring->adev))
|
||||
/* Use normalized offsets when required */
|
||||
if (vcn_v4_0_3_normalizn_reqd(ring->adev))
|
||||
reg = NORMALIZE_VCN_REG_OFFSET(reg);
|
||||
|
||||
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
|
||||
|
@ -136,15 +136,15 @@ static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[]
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 2304,
|
||||
.max_pixels_per_frame = 4096 * 2304,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 0,
|
||||
},
|
||||
{
|
||||
.codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
|
||||
.max_width = 4096,
|
||||
.max_height = 2304,
|
||||
.max_pixels_per_frame = 4096 * 2304,
|
||||
.max_height = 4096,
|
||||
.max_pixels_per_frame = 4096 * 4096,
|
||||
.max_level = 0,
|
||||
},
|
||||
};
|
||||
|
@ -534,7 +534,8 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
|
||||
kfd->cwsr_isa = cwsr_trap_gfx11_hex;
|
||||
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
|
||||
} else {
|
||||
BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex) > PAGE_SIZE);
|
||||
BUILD_BUG_ON(sizeof(cwsr_trap_gfx12_hex)
|
||||
> KFD_CWSR_TMA_OFFSET);
|
||||
kfd->cwsr_isa = cwsr_trap_gfx12_hex;
|
||||
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx12_hex);
|
||||
}
|
||||
|
@ -202,6 +202,8 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
|
||||
int r, queue_type;
|
||||
uint64_t wptr_addr_off;
|
||||
|
||||
if (!dqm->sched_running || dqm->sched_halt)
|
||||
return 0;
|
||||
if (!down_read_trylock(&adev->reset_domain->sem))
|
||||
return -EIO;
|
||||
|
||||
@ -270,6 +272,8 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
|
||||
int r;
|
||||
struct mes_remove_queue_input queue_input;
|
||||
|
||||
if (!dqm->sched_running || dqm->sched_halt)
|
||||
return 0;
|
||||
if (!down_read_trylock(&adev->reset_domain->sem))
|
||||
return -EIO;
|
||||
|
||||
@ -292,7 +296,7 @@ static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
|
||||
return r;
|
||||
}
|
||||
|
||||
static int remove_all_queues_mes(struct device_queue_manager *dqm)
|
||||
static int remove_all_kfd_queues_mes(struct device_queue_manager *dqm)
|
||||
{
|
||||
struct device_process_node *cur;
|
||||
struct device *dev = dqm->dev->adev->dev;
|
||||
@ -319,6 +323,33 @@ static int remove_all_queues_mes(struct device_queue_manager *dqm)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int add_all_kfd_queues_mes(struct device_queue_manager *dqm)
|
||||
{
|
||||
struct device_process_node *cur;
|
||||
struct device *dev = dqm->dev->adev->dev;
|
||||
struct qcm_process_device *qpd;
|
||||
struct queue *q;
|
||||
int retval = 0;
|
||||
|
||||
list_for_each_entry(cur, &dqm->queues, list) {
|
||||
qpd = cur->qpd;
|
||||
list_for_each_entry(q, &qpd->queues_list, list) {
|
||||
if (!q->properties.is_active)
|
||||
continue;
|
||||
retval = add_queue_mes(dqm, q, qpd);
|
||||
if (retval) {
|
||||
dev_err(dev, "%s: Failed to add queue %d for dev %d",
|
||||
__func__,
|
||||
q->properties.queue_id,
|
||||
dqm->dev->id);
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int suspend_all_queues_mes(struct device_queue_manager *dqm)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
|
||||
@ -1742,7 +1773,7 @@ static int halt_cpsch(struct device_queue_manager *dqm)
|
||||
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
|
||||
USE_DEFAULT_GRACE_PERIOD, false);
|
||||
else
|
||||
ret = remove_all_queues_mes(dqm);
|
||||
ret = remove_all_kfd_queues_mes(dqm);
|
||||
}
|
||||
dqm->sched_halt = true;
|
||||
dqm_unlock(dqm);
|
||||
@ -1768,6 +1799,9 @@ static int unhalt_cpsch(struct device_queue_manager *dqm)
|
||||
ret = execute_queues_cpsch(dqm,
|
||||
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
|
||||
0, USE_DEFAULT_GRACE_PERIOD);
|
||||
else
|
||||
ret = add_all_kfd_queues_mes(dqm);
|
||||
|
||||
dqm_unlock(dqm);
|
||||
|
||||
return ret;
|
||||
@ -1867,7 +1901,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
|
||||
if (!dqm->dev->kfd->shared_resources.enable_mes)
|
||||
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
|
||||
else
|
||||
remove_all_queues_mes(dqm);
|
||||
remove_all_kfd_queues_mes(dqm);
|
||||
|
||||
dqm->sched_running = false;
|
||||
|
||||
|
@ -341,8 +341,8 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
|
||||
attr_sdma);
|
||||
struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler;
|
||||
|
||||
INIT_WORK(&sdma_activity_work_handler.sdma_activity_work,
|
||||
kfd_sdma_activity_worker);
|
||||
INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work,
|
||||
kfd_sdma_activity_worker);
|
||||
|
||||
sdma_activity_work_handler.pdd = pdd;
|
||||
sdma_activity_work_handler.sdma_activity_counter = 0;
|
||||
@ -350,6 +350,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
|
||||
schedule_work(&sdma_activity_work_handler.sdma_activity_work);
|
||||
|
||||
flush_work(&sdma_activity_work_handler.sdma_activity_work);
|
||||
destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work);
|
||||
|
||||
return snprintf(buffer, PAGE_SIZE, "%llu\n",
|
||||
(sdma_activity_work_handler.sdma_activity_counter)/
|
||||
@ -853,8 +854,10 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* A prior open of /dev/kfd could have already created the process. */
|
||||
process = find_process(thread, false);
|
||||
/* A prior open of /dev/kfd could have already created the process.
|
||||
* find_process will increase process kref in this case
|
||||
*/
|
||||
process = find_process(thread, true);
|
||||
if (process) {
|
||||
pr_debug("Process already found\n");
|
||||
} else {
|
||||
@ -902,8 +905,6 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
|
||||
init_waitqueue_head(&process->wait_irq_drain);
|
||||
}
|
||||
out:
|
||||
if (!IS_ERR(process))
|
||||
kref_get(&process->ref);
|
||||
mutex_unlock(&kfd_processes_mutex);
|
||||
mmput(thread->mm);
|
||||
|
||||
@ -1189,10 +1190,8 @@ static void kfd_process_ref_release(struct kref *ref)
|
||||
|
||||
static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
|
||||
{
|
||||
int idx = srcu_read_lock(&kfd_processes_srcu);
|
||||
struct kfd_process *p = find_process_by_mm(mm);
|
||||
|
||||
srcu_read_unlock(&kfd_processes_srcu, idx);
|
||||
/* This increments p->ref counter if kfd process p exists */
|
||||
struct kfd_process *p = kfd_lookup_process_by_mm(mm);
|
||||
|
||||
return p ? &p->mmu_notifier : ERR_PTR(-ESRCH);
|
||||
}
|
||||
|
@ -1307,6 +1307,29 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
|
||||
DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
|
||||
adev->dm.dmcub_fw_version);
|
||||
|
||||
/* Keeping sanity checks off if
|
||||
* DCN31 >= 4.0.59.0
|
||||
* DCN314 >= 8.0.16.0
|
||||
* Otherwise, turn on sanity checks
|
||||
*/
|
||||
switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
|
||||
case IP_VERSION(3, 1, 2):
|
||||
case IP_VERSION(3, 1, 3):
|
||||
if (adev->dm.dmcub_fw_version &&
|
||||
adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
|
||||
adev->dm.dmcub_fw_version < DMUB_FW_VERSION(4, 0, 59))
|
||||
adev->dm.dc->debug.sanity_checks = true;
|
||||
break;
|
||||
case IP_VERSION(3, 1, 4):
|
||||
if (adev->dm.dmcub_fw_version &&
|
||||
adev->dm.dmcub_fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
|
||||
adev->dm.dmcub_fw_version < DMUB_FW_VERSION(8, 0, 16))
|
||||
adev->dm.dc->debug.sanity_checks = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6790,7 +6813,7 @@ create_stream_for_sink(struct drm_connector *connector,
|
||||
if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
|
||||
tf = TRANSFER_FUNC_GAMMA_22;
|
||||
mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
|
||||
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
aconnector->sr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
|
||||
|
||||
}
|
||||
finish:
|
||||
@ -8894,6 +8917,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_dm_enable_self_refresh(struct amdgpu_crtc *acrtc_attach,
|
||||
const struct dm_crtc_state *acrtc_state,
|
||||
const u64 current_ts)
|
||||
{
|
||||
struct psr_settings *psr = &acrtc_state->stream->link->psr_settings;
|
||||
struct replay_settings *pr = &acrtc_state->stream->link->replay_settings;
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
|
||||
if (pr->config.replay_supported && !pr->replay_feature_enabled)
|
||||
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
|
||||
else if (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED &&
|
||||
!psr->psr_feature_enabled)
|
||||
if (!aconn->disallow_edp_enter_psr)
|
||||
amdgpu_dm_link_setup_psr(acrtc_state->stream);
|
||||
}
|
||||
|
||||
/* Decrement skip count when SR is enabled and we're doing fast updates. */
|
||||
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
(psr->psr_feature_enabled || pr->config.replay_supported)) {
|
||||
if (aconn->sr_skip_count > 0)
|
||||
aconn->sr_skip_count--;
|
||||
|
||||
/* Allow SR when skip count is 0. */
|
||||
acrtc_attach->dm_irq_params.allow_sr_entry = !aconn->sr_skip_count;
|
||||
|
||||
/*
|
||||
* If sink supports PSR SU/Panel Replay, there is no need to rely on
|
||||
* a vblank event disable request to enable PSR/RP. PSR SU/RP
|
||||
* can be enabled immediately once OS demonstrates an
|
||||
* adequate number of fast atomic commits to notify KMD
|
||||
* of update events. See `vblank_control_worker()`.
|
||||
*/
|
||||
if (acrtc_attach->dm_irq_params.allow_sr_entry &&
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
|
||||
#endif
|
||||
(current_ts - psr->psr_dirty_rects_change_timestamp_ns) > 500000000) {
|
||||
if (pr->replay_feature_enabled && !pr->replay_allow_active)
|
||||
amdgpu_dm_replay_enable(acrtc_state->stream, true);
|
||||
if (psr->psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
!psr->psr_allow_active && !aconn->disallow_edp_enter_psr)
|
||||
amdgpu_dm_psr_enable(acrtc_state->stream);
|
||||
}
|
||||
} else {
|
||||
acrtc_attach->dm_irq_params.allow_sr_entry = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
struct drm_device *dev,
|
||||
struct amdgpu_display_manager *dm,
|
||||
@ -9047,7 +9120,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* during the PSR-SU was disabled.
|
||||
*/
|
||||
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry &&
|
||||
acrtc_attach->dm_irq_params.allow_sr_entry &&
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
|
||||
#endif
|
||||
@ -9222,9 +9295,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
bundle->stream_update.abm_level = &acrtc_state->abm_level;
|
||||
|
||||
mutex_lock(&dm->dc_lock);
|
||||
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
|
||||
acrtc_state->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(acrtc_state->stream);
|
||||
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
|
||||
if (acrtc_state->stream->link->replay_settings.replay_allow_active)
|
||||
amdgpu_dm_replay_disable(acrtc_state->stream);
|
||||
if (acrtc_state->stream->link->psr_settings.psr_allow_active)
|
||||
amdgpu_dm_psr_disable(acrtc_state->stream);
|
||||
}
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
|
||||
/*
|
||||
@ -9265,57 +9341,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
dm_update_pflip_irq_state(drm_to_adev(dev),
|
||||
acrtc_attach);
|
||||
|
||||
if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
|
||||
if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
|
||||
!acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
|
||||
} else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
|
||||
!acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
|
||||
|
||||
struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
|
||||
acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (!aconn->disallow_edp_enter_psr)
|
||||
amdgpu_dm_link_setup_psr(acrtc_state->stream);
|
||||
}
|
||||
}
|
||||
|
||||
/* Decrement skip count when PSR is enabled and we're doing fast updates. */
|
||||
if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
|
||||
acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
|
||||
struct amdgpu_dm_connector *aconn =
|
||||
(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
|
||||
|
||||
if (aconn->psr_skip_count > 0)
|
||||
aconn->psr_skip_count--;
|
||||
|
||||
/* Allow PSR when skip count is 0. */
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
|
||||
|
||||
/*
|
||||
* If sink supports PSR SU, there is no need to rely on
|
||||
* a vblank event disable request to enable PSR. PSR SU
|
||||
* can be enabled immediately once OS demonstrates an
|
||||
* adequate number of fast atomic commits to notify KMD
|
||||
* of update events. See `vblank_control_worker()`.
|
||||
*/
|
||||
if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry &&
|
||||
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
|
||||
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
|
||||
#endif
|
||||
!acrtc_state->stream->link->psr_settings.psr_allow_active &&
|
||||
!aconn->disallow_edp_enter_psr &&
|
||||
(timestamp_ns -
|
||||
acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
|
||||
500000000)
|
||||
amdgpu_dm_psr_enable(acrtc_state->stream);
|
||||
} else {
|
||||
acrtc_attach->dm_irq_params.allow_psr_entry = false;
|
||||
}
|
||||
|
||||
amdgpu_dm_enable_self_refresh(acrtc_attach, acrtc_state, timestamp_ns);
|
||||
mutex_unlock(&dm->dc_lock);
|
||||
}
|
||||
|
||||
@ -12101,7 +12127,7 @@ static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
|
||||
break;
|
||||
}
|
||||
|
||||
while (j < EDID_LENGTH) {
|
||||
while (j < EDID_LENGTH - sizeof(struct amd_vsdb_block)) {
|
||||
struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
|
||||
unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
|
||||
|
||||
|
@ -727,7 +727,7 @@ struct amdgpu_dm_connector {
|
||||
/* Cached display modes */
|
||||
struct drm_display_mode freesync_vid_base;
|
||||
|
||||
int psr_skip_count;
|
||||
int sr_skip_count;
|
||||
bool disallow_edp_enter_psr;
|
||||
|
||||
/* Record progress status of mst*/
|
||||
|
@ -265,11 +265,10 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
|
||||
* where the SU region is the full hactive*vactive region. See
|
||||
* fill_dc_dirty_rects().
|
||||
*/
|
||||
if (vblank_work->stream && vblank_work->stream->link) {
|
||||
if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) {
|
||||
amdgpu_dm_crtc_set_panel_sr_feature(
|
||||
vblank_work, vblank_work->enable,
|
||||
vblank_work->acrtc->dm_irq_params.allow_psr_entry ||
|
||||
vblank_work->stream->link->replay_settings.replay_feature_enabled);
|
||||
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
|
||||
}
|
||||
|
||||
if (dm->active_vblank_irq_count == 0)
|
||||
|
@ -33,7 +33,7 @@ struct dm_irq_params {
|
||||
struct mod_vrr_params vrr_params;
|
||||
struct dc_stream_state *stream;
|
||||
int active_planes;
|
||||
bool allow_psr_entry;
|
||||
bool allow_sr_entry;
|
||||
struct mod_freesync_config freesync_config;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
@ -3122,14 +3122,12 @@ static enum bp_result bios_parser_get_vram_info(
|
||||
struct dc_vram_info *info)
|
||||
{
|
||||
struct bios_parser *bp = BP_FROM_DCB(dcb);
|
||||
static enum bp_result result = BP_RESULT_BADBIOSTABLE;
|
||||
enum bp_result result = BP_RESULT_BADBIOSTABLE;
|
||||
struct atom_common_table_header *header;
|
||||
struct atom_data_revision revision;
|
||||
|
||||
// vram info moved to umc_info for DCN4x
|
||||
if (dcb->ctx->dce_version >= DCN_VERSION_4_01 &&
|
||||
dcb->ctx->dce_version < DCN_VERSION_MAX &&
|
||||
info && DATA_TABLES(umc_info)) {
|
||||
if (info && DATA_TABLES(umc_info)) {
|
||||
header = GET_IMAGE(struct atom_common_table_header,
|
||||
DATA_TABLES(umc_info));
|
||||
|
||||
|
@ -265,6 +265,9 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state)
|
||||
dc_state_copy_internal(new_state, src_state);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_FP
|
||||
new_state->bw_ctx.dml2 = NULL;
|
||||
new_state->bw_ctx.dml2_dc_power_source = NULL;
|
||||
|
||||
if (src_state->bw_ctx.dml2 &&
|
||||
!dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
|
||||
dc_state_release(new_state);
|
||||
|
@ -55,7 +55,7 @@ struct aux_payload;
|
||||
struct set_config_cmd_payload;
|
||||
struct dmub_notification;
|
||||
|
||||
#define DC_VER "3.2.308"
|
||||
#define DC_VER "3.2.309"
|
||||
|
||||
#define MAX_SURFACES 3
|
||||
#define MAX_PLANES 6
|
||||
|
@ -181,6 +181,7 @@ struct dc_panel_patch {
|
||||
unsigned int disable_colorimetry;
|
||||
uint8_t blankstream_before_otg_off;
|
||||
bool oled_optimize_display_on;
|
||||
unsigned int force_mst_blocked_discovery;
|
||||
};
|
||||
|
||||
struct dc_edid_caps {
|
||||
|
@ -46,7 +46,7 @@
|
||||
#include "clk_mgr.h"
|
||||
|
||||
__printf(3, 4)
|
||||
unsigned int snprintf_count(char *pbuf, unsigned int bufsize, char *fmt, ...)
|
||||
unsigned int snprintf_count(char *pbuf, unsigned int bufsize, const char *fmt, ...)
|
||||
{
|
||||
int ret_vsnprintf;
|
||||
unsigned int chars_printed;
|
||||
|
@ -143,7 +143,7 @@ void generic_reg_wait(const struct dc_context *ctx,
|
||||
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
|
||||
const char *func_name, int line);
|
||||
|
||||
unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...);
|
||||
unsigned int snprintf_count(char *pBuf, unsigned int bufSize, const char *fmt, ...);
|
||||
|
||||
/* These macros need to be used with soc15 registers in order to retrieve
|
||||
* the actual offset.
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "dml2_pmo_dcn4_fams2.h"
|
||||
|
||||
static const double MIN_VACTIVE_MARGIN_PCT = 0.25; // We need more than non-zero margin because DET buffer granularity can alter vactive latency hiding
|
||||
static const double MIN_BLANK_STUTTER_FACTOR = 3.0;
|
||||
|
||||
static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
|
||||
// VActive Preferred
|
||||
@ -2140,6 +2141,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
|
||||
struct dml2_pmo_instance *pmo = in_out->instance;
|
||||
bool stutter_period_meets_z8_eco = true;
|
||||
bool z8_stutter_optimization_too_expensive = false;
|
||||
bool stutter_optimization_too_expensive = false;
|
||||
double line_time_us, vblank_nom_time_us;
|
||||
|
||||
unsigned int i;
|
||||
@ -2161,10 +2163,15 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
|
||||
line_time_us = (double)in_out->base_display_config->display_config.stream_descriptors[i].timing.h_total / (in_out->base_display_config->display_config.stream_descriptors[i].timing.pixel_clock_khz * 1000) * 1000000;
|
||||
vblank_nom_time_us = line_time_us * in_out->base_display_config->display_config.stream_descriptors[i].timing.vblank_nom;
|
||||
|
||||
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us) {
|
||||
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.z8_stutter_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
|
||||
z8_stutter_optimization_too_expensive = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (vblank_nom_time_us < pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us * MIN_BLANK_STUTTER_FACTOR) {
|
||||
stutter_optimization_too_expensive = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pmo->scratch.pmo_dcn4.num_stutter_candidates = 0;
|
||||
@ -2180,7 +2187,7 @@ bool pmo_dcn4_fams2_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in
|
||||
pmo->scratch.pmo_dcn4.z8_vblank_optimizable = false;
|
||||
}
|
||||
|
||||
if (pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
|
||||
if (!stutter_optimization_too_expensive && pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us > 0) {
|
||||
pmo->scratch.pmo_dcn4.optimal_vblank_reserved_time_for_stutter_us[pmo->scratch.pmo_dcn4.num_stutter_candidates] = (unsigned int)pmo->soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us;
|
||||
pmo->scratch.pmo_dcn4.num_stutter_candidates++;
|
||||
}
|
||||
|
@ -519,15 +519,18 @@ static void dcn31_reset_back_end_for_pipe(
|
||||
|
||||
dc->hwss.set_abm_immediate_disable(pipe_ctx);
|
||||
|
||||
if ((!pipe_ctx->stream->dpms_off || pipe_ctx->stream->link->link_status.link_active)
|
||||
&& pipe_ctx->stream->sink && pipe_ctx->stream->sink->edid_caps.panel_patch.blankstream_before_otg_off) {
|
||||
link = pipe_ctx->stream->link;
|
||||
|
||||
if ((!pipe_ctx->stream->dpms_off || link->link_status.link_active) &&
|
||||
(link->connector_signal == SIGNAL_TYPE_EDP))
|
||||
dc->hwss.blank_stream(pipe_ctx);
|
||||
}
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->set_dsc_config(
|
||||
pipe_ctx->stream_res.tg,
|
||||
OPTC_DSC_DISABLED, 0, 0);
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
|
||||
|
||||
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
|
||||
if (pipe_ctx->stream_res.tg->funcs->set_odm_bypass)
|
||||
pipe_ctx->stream_res.tg->funcs->set_odm_bypass(
|
||||
@ -539,7 +542,6 @@ static void dcn31_reset_back_end_for_pipe(
|
||||
pipe_ctx->stream_res.tg->funcs->set_drr(
|
||||
pipe_ctx->stream_res.tg, NULL);
|
||||
|
||||
link = pipe_ctx->stream->link;
|
||||
/* DPMS may already disable or */
|
||||
/* dpms_off status is incorrect due to fastboot
|
||||
* feature. When system resume from S4 with second
|
||||
@ -651,7 +653,8 @@ static void dmub_abm_set_backlight(struct dc_context *dc,
|
||||
cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = backlight_level_params->frame_ramp;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.backlight_user_level = backlight_level_params->backlight_pwm_u16_16;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.backlight_control_type = backlight_level_params->control_type;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.backlight_control_type =
|
||||
(enum dmub_backlight_control_type) backlight_level_params->control_type;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.min_luminance = backlight_level_params->min_luminance;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.max_luminance = backlight_level_params->max_luminance;
|
||||
cmd.abm_set_backlight.abm_set_backlight_data.min_backlight_pwm = backlight_level_params->min_backlight_pwm;
|
||||
|
@ -1633,9 +1633,11 @@ static bool retrieve_link_cap(struct dc_link *link)
|
||||
}
|
||||
|
||||
/* Read DP tunneling information. */
|
||||
status = dpcd_get_tunneling_device_data(link);
|
||||
if (status != DC_OK)
|
||||
dm_error("%s: Read tunneling device data failed.\n", __func__);
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
|
||||
status = dpcd_get_tunneling_device_data(link);
|
||||
if (status != DC_OK)
|
||||
dm_error("%s: Read tunneling device data failed.\n", __func__);
|
||||
}
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
|
@ -221,21 +221,11 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
|
||||
&replay_error_status.raw,
|
||||
sizeof(replay_error_status.raw));
|
||||
|
||||
link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
|
||||
replay_error_status.bits.LINK_CRC_ERROR;
|
||||
link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
|
||||
replay_configuration.bits.DESYNC_ERROR_STATUS;
|
||||
link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR =
|
||||
replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS;
|
||||
|
||||
if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR ||
|
||||
link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR ||
|
||||
link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) {
|
||||
if (replay_error_status.bits.LINK_CRC_ERROR ||
|
||||
replay_configuration.bits.DESYNC_ERROR_STATUS ||
|
||||
replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
|
||||
bool allow_active;
|
||||
|
||||
if (link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR)
|
||||
link->replay_settings.config.received_desync_error_hpd = 1;
|
||||
|
||||
if (link->replay_settings.config.force_disable_desync_error_check)
|
||||
return;
|
||||
|
||||
|
@ -559,17 +559,6 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.using_dml2 = false,
|
||||
};
|
||||
|
||||
static const struct dc_debug_options debug_defaults_diags = {
|
||||
.disable_dmcu = false,
|
||||
.force_abm_enable = false,
|
||||
.clock_trace = true,
|
||||
.disable_stutter = true,
|
||||
.disable_pplib_clock_request = true,
|
||||
.disable_pplib_wm_range = true,
|
||||
.underflow_assert_delay_us = 0xFFFFFFFF,
|
||||
.enable_legacy_fast_update = true,
|
||||
};
|
||||
|
||||
static void dcn10_dpp_destroy(struct dpp **dpp)
|
||||
{
|
||||
kfree(TO_DCN10_DPP(*dpp));
|
||||
@ -1398,8 +1387,6 @@ static bool dcn10_resource_construct(
|
||||
|
||||
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
|
||||
dc->debug = debug_defaults_drv;
|
||||
else
|
||||
dc->debug = debug_defaults_diags;
|
||||
|
||||
/*************************************************
|
||||
* Create resources *
|
||||
|
@ -868,7 +868,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.max_downscale_src_width = 4096,/*upto true 4K*/
|
||||
.disable_pplib_wm_range = false,
|
||||
.scl_reset_length10 = true,
|
||||
.sanity_checks = true,
|
||||
.sanity_checks = false,
|
||||
.underflow_assert_delay_us = 0xFFFFFFFF,
|
||||
.dwb_fi_phase = -1, // -1 = disable,
|
||||
.dmub_command_table = true,
|
||||
|
@ -888,7 +888,7 @@ static const struct dc_debug_options debug_defaults_drv = {
|
||||
.max_downscale_src_width = 4096,/*upto true 4k*/
|
||||
.disable_pplib_wm_range = false,
|
||||
.scl_reset_length10 = true,
|
||||
.sanity_checks = true,
|
||||
.sanity_checks = false,
|
||||
.underflow_assert_delay_us = 0xFFFFFFFF,
|
||||
.dwb_fi_phase = -1, // -1 = disable,
|
||||
.dmub_command_table = true,
|
||||
|
@ -910,6 +910,16 @@ static void spl_get_taps_non_adaptive_scaler(
|
||||
spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
|
||||
else
|
||||
spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
|
||||
|
||||
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz))
|
||||
spl_scratch->scl_data.taps.h_taps = 1;
|
||||
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))
|
||||
spl_scratch->scl_data.taps.v_taps = 1;
|
||||
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
|
||||
spl_scratch->scl_data.taps.h_taps_c = 1;
|
||||
if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
|
||||
spl_scratch->scl_data.taps.v_taps_c = 1;
|
||||
|
||||
}
|
||||
|
||||
/* Calculate optimal number of taps */
|
||||
@ -936,10 +946,7 @@ static bool spl_get_optimal_number_of_taps(
|
||||
|
||||
/* Disable adaptive scaler and sharpener when integer scaling is enabled */
|
||||
if (spl_in->scaling_quality.integer_scaling) {
|
||||
spl_scratch->scl_data.taps.h_taps = 1;
|
||||
spl_scratch->scl_data.taps.v_taps = 1;
|
||||
spl_scratch->scl_data.taps.v_taps_c = 1;
|
||||
spl_scratch->scl_data.taps.h_taps_c = 1;
|
||||
spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps);
|
||||
*enable_easf_v = false;
|
||||
*enable_easf_h = false;
|
||||
*enable_isharp = false;
|
||||
|
@ -4426,6 +4426,24 @@ struct dmub_rb_cmd_abm_set_pipe {
|
||||
struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* Type of backlight control method to be used by ABM module
|
||||
*/
|
||||
enum dmub_backlight_control_type {
|
||||
/**
|
||||
* PWM Backlight control
|
||||
*/
|
||||
DMU_BACKLIGHT_CONTROL_PWM = 0,
|
||||
/**
|
||||
* VESA Aux-based backlight control
|
||||
*/
|
||||
DMU_BACKLIGHT_CONTROL_VESA_AUX = 1,
|
||||
/**
|
||||
* AMD DPCD Aux-based backlight control
|
||||
*/
|
||||
DMU_BACKLIGHT_CONTROL_AMD_AUX = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* Data passed from driver to FW in a DMUB_CMD__ABM_SET_BACKLIGHT command.
|
||||
*/
|
||||
@ -4452,18 +4470,23 @@ struct dmub_cmd_abm_set_backlight_data {
|
||||
*/
|
||||
uint8_t panel_mask;
|
||||
|
||||
/**
|
||||
* AUX HW Instance.
|
||||
*/
|
||||
uint8_t aux_inst;
|
||||
|
||||
/**
|
||||
* Explicit padding to 4 byte boundary.
|
||||
*/
|
||||
uint8_t pad[1];
|
||||
|
||||
/**
|
||||
* Backlight control type.
|
||||
* Value 0 is PWM backlight control.
|
||||
* Value 1 is VAUX backlight control.
|
||||
* Value 2 is AMD DPCD AUX backlight control.
|
||||
*/
|
||||
uint8_t backlight_control_type;
|
||||
|
||||
/**
|
||||
* AUX HW instance.
|
||||
*/
|
||||
uint8_t aux_inst;
|
||||
enum dmub_backlight_control_type backlight_control_type;
|
||||
|
||||
/**
|
||||
* Minimum luminance in nits.
|
||||
|
@ -497,6 +497,7 @@ enum dmub_status
|
||||
const struct dmub_fw_meta_info *fw_info;
|
||||
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
|
||||
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
|
||||
uint32_t shared_state_size = DMUB_FW_HEADER_SHARED_STATE_SIZE;
|
||||
uint32_t window_sizes[DMUB_WINDOW_TOTAL] = { 0 };
|
||||
|
||||
if (!dmub->sw_init)
|
||||
@ -514,6 +515,7 @@ enum dmub_status
|
||||
|
||||
fw_state_size = fw_info->fw_region_size;
|
||||
trace_buffer_size = fw_info->trace_buffer_size;
|
||||
shared_state_size = fw_info->shared_state_size;
|
||||
|
||||
/**
|
||||
* If DM didn't fill in a version, then fill it in based on
|
||||
@ -534,7 +536,7 @@ enum dmub_status
|
||||
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
|
||||
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
|
||||
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
|
||||
window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
|
||||
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
|
||||
|
||||
out->fb_size =
|
||||
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
|
||||
|
@ -230,13 +230,23 @@ union MESAPI_SET_HW_RESOURCES {
|
||||
uint32_t disable_add_queue_wptr_mc_addr : 1;
|
||||
uint32_t enable_mes_event_int_logging : 1;
|
||||
uint32_t enable_reg_active_poll : 1;
|
||||
uint32_t reserved : 21;
|
||||
uint32_t use_disable_queue_in_legacy_uq_preemption : 1;
|
||||
uint32_t send_write_data : 1;
|
||||
uint32_t os_tdr_timeout_override : 1;
|
||||
uint32_t use_rs64mem_for_proc_gang_ctx : 1;
|
||||
uint32_t use_add_queue_unmap_flag_addr : 1;
|
||||
uint32_t enable_mes_sch_stb_log : 1;
|
||||
uint32_t limit_single_process : 1;
|
||||
uint32_t is_strix_tmz_wa_enabled :1;
|
||||
uint32_t reserved : 13;
|
||||
};
|
||||
uint32_t uint32_t_all;
|
||||
};
|
||||
uint32_t oversubscription_timer;
|
||||
uint64_t doorbell_info;
|
||||
uint64_t event_intr_history_gpu_mc_ptr;
|
||||
uint64_t timestamp;
|
||||
uint32_t os_tdr_timeout_in_sec;
|
||||
};
|
||||
|
||||
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
|
||||
@ -563,6 +573,11 @@ enum MESAPI_MISC_OPCODE {
|
||||
MESAPI_MISC__READ_REG,
|
||||
MESAPI_MISC__WAIT_REG_MEM,
|
||||
MESAPI_MISC__SET_SHADER_DEBUGGER,
|
||||
MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE,
|
||||
MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES,
|
||||
MESAPI_MISC__CHANGE_CONFIG,
|
||||
MESAPI_MISC__LAUNCH_CLEANER_SHADER,
|
||||
|
||||
MESAPI_MISC__MAX,
|
||||
};
|
||||
|
||||
@ -617,6 +632,31 @@ struct SET_SHADER_DEBUGGER {
|
||||
uint32_t trap_en;
|
||||
};
|
||||
|
||||
enum MESAPI_MISC__CHANGE_CONFIG_OPTION {
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0,
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1,
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2,
|
||||
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F
|
||||
};
|
||||
|
||||
struct CHANGE_CONFIG {
|
||||
enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode;
|
||||
union {
|
||||
struct {
|
||||
uint32_t limit_single_process : 1;
|
||||
uint32_t enable_hws_logging_buffer : 1;
|
||||
uint32_t reserved : 31;
|
||||
} bits;
|
||||
uint32_t all;
|
||||
} option;
|
||||
|
||||
struct {
|
||||
uint32_t tdr_level;
|
||||
uint32_t tdr_delay;
|
||||
} tdr_config;
|
||||
};
|
||||
|
||||
union MESAPI__MISC {
|
||||
struct {
|
||||
union MES_API_HEADER header;
|
||||
@ -631,6 +671,7 @@ union MESAPI__MISC {
|
||||
struct WAIT_REG_MEM wait_reg_mem;
|
||||
struct SET_SHADER_DEBUGGER set_shader_debugger;
|
||||
enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
|
||||
struct CHANGE_CONFIG change_config;
|
||||
|
||||
uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS];
|
||||
};
|
||||
|
@ -643,6 +643,10 @@ enum MESAPI_MISC_OPCODE {
|
||||
MESAPI_MISC__SET_SHADER_DEBUGGER,
|
||||
MESAPI_MISC__NOTIFY_WORK_ON_UNMAPPED_QUEUE,
|
||||
MESAPI_MISC__NOTIFY_TO_UNMAP_PROCESSES,
|
||||
MESAPI_MISC__QUERY_HUNG_ENGINE_ID,
|
||||
MESAPI_MISC__CHANGE_CONFIG,
|
||||
MESAPI_MISC__LAUNCH_CLEANER_SHADER,
|
||||
MESAPI_MISC__SETUP_MES_DBGEXT,
|
||||
|
||||
MESAPI_MISC__MAX,
|
||||
};
|
||||
@ -713,6 +717,31 @@ struct SET_GANG_SUBMIT {
|
||||
uint32_t slave_gang_context_array_index;
|
||||
};
|
||||
|
||||
enum MESAPI_MISC__CHANGE_CONFIG_OPTION {
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS = 0,
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_ENABLE_HWS_LOGGING_BUFFER = 1,
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_CHANGE_TDR_CONFIG = 2,
|
||||
|
||||
MESAPI_MISC__CHANGE_CONFIG_OPTION_MAX = 0x1F
|
||||
};
|
||||
|
||||
struct CHANGE_CONFIG {
|
||||
enum MESAPI_MISC__CHANGE_CONFIG_OPTION opcode;
|
||||
union {
|
||||
struct {
|
||||
uint32_t limit_single_process : 1;
|
||||
uint32_t enable_hws_logging_buffer : 1;
|
||||
uint32_t reserved : 30;
|
||||
} bits;
|
||||
uint32_t all;
|
||||
} option;
|
||||
|
||||
struct {
|
||||
uint32_t tdr_level;
|
||||
uint32_t tdr_delay;
|
||||
} tdr_config;
|
||||
};
|
||||
|
||||
union MESAPI__MISC {
|
||||
struct {
|
||||
union MES_API_HEADER header;
|
||||
@ -726,7 +755,7 @@ union MESAPI__MISC {
|
||||
struct WAIT_REG_MEM wait_reg_mem;
|
||||
struct SET_SHADER_DEBUGGER set_shader_debugger;
|
||||
enum MES_AMD_PRIORITY_LEVEL queue_sch_level;
|
||||
|
||||
struct CHANGE_CONFIG change_config;
|
||||
uint32_t data[MISC_DATA_MAX_SIZE_IN_DWORDS];
|
||||
};
|
||||
uint64_t timestamp;
|
||||
|
@ -3041,6 +3041,16 @@ static int mmhub_err_codes[] = {
|
||||
CODE_VML2, CODE_VML2_WALKER, CODE_MMCANE,
|
||||
};
|
||||
|
||||
static int vcn_err_codes[] = {
|
||||
CODE_VIDD, CODE_VIDV,
|
||||
};
|
||||
static int jpeg_err_codes[] = {
|
||||
CODE_JPEG0S, CODE_JPEG0D, CODE_JPEG1S, CODE_JPEG1D,
|
||||
CODE_JPEG2S, CODE_JPEG2D, CODE_JPEG3S, CODE_JPEG3D,
|
||||
CODE_JPEG4S, CODE_JPEG4D, CODE_JPEG5S, CODE_JPEG5D,
|
||||
CODE_JPEG6S, CODE_JPEG6D, CODE_JPEG7S, CODE_JPEG7D,
|
||||
};
|
||||
|
||||
static const struct mca_ras_info mca_ras_table[] = {
|
||||
{
|
||||
.blkid = AMDGPU_RAS_BLOCK__UMC,
|
||||
@ -3069,6 +3079,20 @@ static const struct mca_ras_info mca_ras_table[] = {
|
||||
.blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL,
|
||||
.ip = AMDGPU_MCA_IP_PCS_XGMI,
|
||||
.get_err_count = mca_pcs_xgmi_mca_get_err_count,
|
||||
}, {
|
||||
.blkid = AMDGPU_RAS_BLOCK__VCN,
|
||||
.ip = AMDGPU_MCA_IP_SMU,
|
||||
.err_code_array = vcn_err_codes,
|
||||
.err_code_count = ARRAY_SIZE(vcn_err_codes),
|
||||
.get_err_count = mca_smu_mca_get_err_count,
|
||||
.bank_is_valid = mca_smu_bank_is_valid,
|
||||
}, {
|
||||
.blkid = AMDGPU_RAS_BLOCK__JPEG,
|
||||
.ip = AMDGPU_MCA_IP_SMU,
|
||||
.err_code_array = jpeg_err_codes,
|
||||
.err_code_count = ARRAY_SIZE(jpeg_err_codes),
|
||||
.get_err_count = mca_smu_mca_get_err_count,
|
||||
.bank_is_valid = mca_smu_bank_is_valid,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -1132,7 +1132,7 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
|
||||
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, ret = 0, size = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min, max;
|
||||
|
||||
@ -1168,7 +1168,8 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
|
||||
break;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
@ -1516,6 +1516,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
|
||||
* 64K_D_2D on GFX12 is identical to 64K_D on GFX11.
|
||||
*/
|
||||
#define AMD_FMT_MOD_TILE_GFX9_64K_D 10
|
||||
#define AMD_FMT_MOD_TILE_GFX9_4K_D_X 22
|
||||
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
|
||||
#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
|
||||
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
|
||||
|
Loading…
Reference in New Issue
Block a user