drm fixes for 6.13-rc2

dma-fence:
 - Fix reference leak on fence-merge failure path
 - Simplify fence merging with kernel's sort()
 - Fix dma_fence_array_signaled() to ensure forward progress
 
 dp_mst:
 - Fix MST sideband message body length check
 - Fix a bunch of locking/state handling with DP MST msgs
 
 sti:
 - Add __iomem for mixer_dbg_mxn()'s parameter
 
 xe:
 - Missing init value and 64-bit write-order check
 - Fix a memory allocation issue causing lockdep violation
 
 v3d:
 - performance counter fix.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmdTROUACgkQDHTzWXnE
 hr5vYw//TeMMwRs708c5RnH/1RUthChcStTqk/aVJvHkqjMiqUgEGxrW2OruNTIO
 TODPOWvCrb4AzvD21NKMfZx5dT2fBoq9yw40Qb3eXXSFodH9V4+teEeMh5UI1imJ
 OZvLXiXv+4jTuSwu/01YuiPR16j75QqrvLMvWrCWA7m8Of393uJUa570OQt6G7C3
 lwsKVot16/xBQpUQkNQtch3HyMAvH+5pttnw7DOyZxpAuze9J5erEPicgIkN0j1V
 GB0zohDgqMmnqKLJrOZowt2ngZ6rT8uC6RlpV2+IquXZG3O+nCiwlsDmDeIE4mbO
 Rkur3TR7whp2VKkKRN+ll6kb0TDnPfj3ylFzIRfPyFZjT3JIV4ps5k8E9/tRyeGO
 HQ9D6DGjETw21rJlvq0YEotalP/EyIL8W7jaJgAMmHXTri/reF7NKyq6ngSeck7B
 aF3IFPwtZuJZawbecUChgWRC86NDBi4d8iUyZaaGGDxLQFjzVntUDQF9hrGFf1Vj
 aOLCS4GX1Gp8atv44PDO2KH+7X2t2CpzFY2F8GN4gv74iepeZfDW/bjZLwruVBZY
 Jk6UMlV1oIJB50FT1CtpfILm4B87Ks7mvL7unmY2hlqQz3xWE/2J8GhJcz+2Ww5R
 PNwN/c1mxNgk74dPEkDJh1BGKC4PyKVzwll3KIed5jatbqwqFM0=
 =NMaJ
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-12-07' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Pretty quiet week which is probably expected after US holidays, the
  dma-fence and displayport MST message handling fixes make up the bulk
  of this, along with a couple of minor xe and other driver fixes.

  dma-fence:
   - Fix reference leak on fence-merge failure path
   - Simplify fence merging with kernel's sort()
   - Fix dma_fence_array_signaled() to ensure forward progress

  dp_mst:
   - Fix MST sideband message body length check
   - Fix a bunch of locking/state handling with DP MST msgs

  sti:
   - Add __iomem for mixer_dbg_mxn()'s parameter

  xe:
   - Missing init value and 64-bit write-order check
   - Fix a memory allocation issue causing lockdep violation

  v3d:
   - Performance counter fix"

* tag 'drm-fixes-2024-12-07' of https://gitlab.freedesktop.org/drm/kernel:
  drm/v3d: Enable Performance Counters before clearing them
  drm/dp_mst: Use reset_msg_rx_state() instead of open coding it
  drm/dp_mst: Reset message rx state after OOM in drm_dp_mst_handle_up_req()
  drm/dp_mst: Ensure mst_primary pointer is valid in drm_dp_mst_handle_up_req()
  drm/dp_mst: Fix down request message timeout handling
  drm/dp_mst: Simplify error path in drm_dp_mst_handle_down_rep()
  drm/dp_mst: Verify request type in the corresponding down message reply
  drm/dp_mst: Fix resetting msg rx state after topology removal
  drm/xe: Move the coredump registration to the worker thread
  drm/xe/guc: Fix missing init value and add register order check
  drm/sti: Add __iomem for mixer_dbg_mxn's parameter
  drm/dp_mst: Fix MST sideband message body length check
  dma-buf: fix dma_fence_array_signaled v4
  dma-fence: Use kernel's sort for merging fences
  dma-fence: Fix reference leak on fence merge failure path
This commit is contained in:
Linus Torvalds 2024-12-06 11:52:15 -08:00
commit 9a6e8c7c3a
8 changed files with 285 additions and 137 deletions

View File

@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
static bool dma_fence_array_signaled(struct dma_fence *fence)
{
struct dma_fence_array *array = to_dma_fence_array(fence);
int num_pending;
unsigned int i;
if (atomic_read(&array->num_pending) > 0)
/*
* We need to read num_pending before checking the enable_signal bit
* to avoid racing with the enable_signaling() implementation, which
* might decrement the counter, and cause a partial check.
* atomic_read_acquire() pairs with atomic_dec_and_test() in
* dma_fence_array_enable_signaling()
*
* The !--num_pending check is here to account for the any_signaled case
* if we race with enable_signaling(), that means the !num_pending check
* in the is_signalling_enabled branch might be outdated (num_pending
* might have been decremented), but that's fine. The user will get the
* right value when testing again later.
*/
num_pending = atomic_read_acquire(&array->num_pending);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
if (num_pending <= 0)
goto signal;
return false;
}
for (i = 0; i < array->num_fences; ++i) {
if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
goto signal;
}
return false;
signal:
dma_fence_array_clear_pending_error(array);
return true;
}

View File

@ -12,6 +12,7 @@
#include <linux/dma-fence-chain.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/slab.h>
#include <linux/sort.h>
/* Internal helper to start new array iteration, don't use directly */
static struct dma_fence *
@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
}
EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
static int fence_cmp(const void *_a, const void *_b)
{
struct dma_fence *a = *(struct dma_fence **)_a;
struct dma_fence *b = *(struct dma_fence **)_b;
if (a->context < b->context)
return -1;
else if (a->context > b->context)
return 1;
if (dma_fence_is_later(b, a))
return 1;
else if (dma_fence_is_later(a, b))
return -1;
return 0;
}
/* Implementation for the dma_fence_merge() marco, don't use directly */
struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence **fences,
@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
struct dma_fence_array *result;
struct dma_fence *tmp, **array;
ktime_t timestamp;
unsigned int i;
size_t count;
int i, j, count;
count = 0;
timestamp = ns_to_ktime(0);
@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
if (!array)
return NULL;
/*
* This trashes the input fence array and uses it as position for the
* following merge loop. This works because the dma_fence_merge()
* wrapper macro is creating this temporary array on the stack together
* with the iterators.
*/
for (i = 0; i < num_fences; ++i)
fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
count = 0;
do {
unsigned int sel;
restart:
tmp = NULL;
for (i = 0; i < num_fences; ++i) {
struct dma_fence *next;
while (fences[i] && dma_fence_is_signaled(fences[i]))
fences[i] = dma_fence_unwrap_next(&iter[i]);
next = fences[i];
if (!next)
continue;
/*
* We can't guarantee that inpute fences are ordered by
* context, but it is still quite likely when this
* function is used multiple times. So attempt to order
* the fences by context as we pass over them and merge
* fences with the same context.
*/
if (!tmp || tmp->context > next->context) {
tmp = next;
sel = i;
} else if (tmp->context < next->context) {
continue;
} else if (dma_fence_is_later(tmp, next)) {
fences[i] = dma_fence_unwrap_next(&iter[i]);
goto restart;
for (i = 0; i < num_fences; ++i) {
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
if (!dma_fence_is_signaled(tmp)) {
array[count++] = dma_fence_get(tmp);
} else {
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
goto restart;
ktime_t t = dma_fence_timestamp(tmp);
if (ktime_after(t, timestamp))
timestamp = t;
}
}
}
if (tmp) {
array[count++] = dma_fence_get(tmp);
fences[sel] = dma_fence_unwrap_next(&iter[sel]);
if (count == 0 || count == 1)
goto return_fastpath;
sort(array, count, sizeof(*array), fence_cmp, NULL);
/*
* Only keep the most recent fence for each context.
*/
j = 0;
for (i = 1; i < count; i++) {
if (array[i]->context == array[j]->context)
dma_fence_put(array[i]);
else
array[++j] = array[i];
}
count = ++j;
if (count > 1) {
result = dma_fence_array_create(count, array,
dma_fence_context_alloc(1),
1, false);
if (!result) {
for (i = 0; i < count; i++)
dma_fence_put(array[i]);
tmp = NULL;
goto return_tmp;
}
} while (tmp);
if (count == 0) {
tmp = dma_fence_allocate_private_stub(ktime_get());
goto return_tmp;
return &result->base;
}
if (count == 1) {
return_fastpath:
if (count == 0)
tmp = dma_fence_allocate_private_stub(timestamp);
else
tmp = array[0];
goto return_tmp;
}
result = dma_fence_array_create(count, array,
dma_fence_context_alloc(1),
1, false);
if (!result) {
tmp = NULL;
goto return_tmp;
}
return &result->base;
return_tmp:
kfree(array);

View File

@ -320,6 +320,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
if (hdr->msg_len < 1) /* min space for body CRC */
return false;
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
@ -3697,8 +3700,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
ret = 0;
mgr->payload_id_table_cleared = false;
memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
mgr->reset_rx_state = true;
}
out_unlock:
@ -3856,6 +3858,11 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
{
memset(msg, 0, sizeof(*msg));
}
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
@ -3934,6 +3941,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
return true;
}
static int get_msg_request_type(u8 data)
{
return data & 0x7f;
}
static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
const struct drm_dp_sideband_msg_tx *txmsg,
const struct drm_dp_sideband_msg_rx *rxmsg)
{
const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
const struct drm_dp_mst_branch *mstb = txmsg->dst;
int tx_req_type = get_msg_request_type(txmsg->msg[0]);
int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
char rad_str[64];
if (tx_req_type == rx_req_type)
return true;
drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
drm_dbg_kms(mgr->dev,
"Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
mstb, hdr->seqno, mstb->lct, rad_str,
drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
return false;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
@ -3949,9 +3984,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
/* find the message */
mutex_lock(&mgr->qlock);
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
mutex_unlock(&mgr->qlock);
/* Were we actually expecting a response, and from this mstb? */
if (!txmsg || txmsg->dst != mstb) {
@ -3960,6 +3995,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
hdr = &msg->initial_hdr;
drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
mutex_unlock(&mgr->qlock);
goto out_clear_reply;
}
if (!verify_rx_request_type(mgr, txmsg, msg)) {
mutex_unlock(&mgr->qlock);
goto out_clear_reply;
}
@ -3975,20 +4019,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
txmsg->reply.u.nak.nak_data);
}
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
list_del(&txmsg->next);
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
return 0;
out_clear_reply:
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
reset_msg_rx_state(msg);
out:
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
@ -4070,16 +4109,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_pending_up_req *up_req;
struct drm_dp_mst_branch *mst_primary;
int ret = 0;
if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
goto out;
goto out_clear_reply;
if (!mgr->up_req_recv.have_eomt)
return 0;
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
if (!up_req)
return -ENOMEM;
if (!up_req) {
ret = -ENOMEM;
goto out_clear_reply;
}
INIT_LIST_HEAD(&up_req->next);
@ -4090,10 +4133,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
up_req->msg.req_type);
kfree(up_req);
goto out;
goto out_clear_reply;
}
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
mutex_lock(&mgr->lock);
mst_primary = mgr->mst_primary;
if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
mutex_unlock(&mgr->lock);
kfree(up_req);
goto out_clear_reply;
}
mutex_unlock(&mgr->lock);
drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
@ -4110,13 +4162,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->peer_device_type);
mutex_lock(&mgr->probe_lock);
handle_csn = mgr->mst_primary->link_address_sent;
handle_csn = mst_primary->link_address_sent;
mutex_unlock(&mgr->probe_lock);
if (!handle_csn) {
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
kfree(up_req);
goto out;
goto out_put_primary;
}
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
@ -4133,9 +4185,22 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
out:
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
out_put_primary:
drm_dp_mst_topology_put_mstb(mst_primary);
out_clear_reply:
reset_msg_rx_state(&mgr->up_req_recv);
return ret;
}
static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
{
mutex_lock(&mgr->lock);
if (mgr->reset_rx_state) {
mgr->reset_rx_state = false;
reset_msg_rx_state(&mgr->down_rep_recv);
reset_msg_rx_state(&mgr->up_req_recv);
}
mutex_unlock(&mgr->lock);
}
/**
@ -4172,6 +4237,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
*handled = true;
}
update_msg_rx_state(mgr);
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;

View File

@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val)
}
}
static void mixer_dbg_mxn(struct seq_file *s, void *addr)
static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr)
{
int i;

View File

@ -254,9 +254,9 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon)
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel);
}
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask);
V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask);
V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask);
v3d->active_perfmon = perfmon;
}

View File

@ -155,36 +155,6 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss)
ss->vm = NULL;
}
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
{
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
struct xe_device *xe = coredump_to_xe(coredump);
unsigned int fw_ref;
xe_pm_runtime_get(xe);
/* keep going if fw fails as we still want to save the memory and SW data */
fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
xe_vm_snapshot_capture_delayed(ss->vm);
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
xe_pm_runtime_put(xe);
/* Calculate devcoredump size */
ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
if (!ss->read.buffer)
return;
__xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
xe_devcoredump_snapshot_free(ss);
}
static ssize_t xe_devcoredump_read(char *buffer, loff_t offset,
size_t count, void *data, size_t datalen)
{
@ -234,6 +204,45 @@ static void xe_devcoredump_free(void *data)
"Xe device coredump has been deleted.\n");
}
static void xe_devcoredump_deferred_snap_work(struct work_struct *work)
{
struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work);
struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot);
struct xe_device *xe = coredump_to_xe(coredump);
unsigned int fw_ref;
/*
* NB: Despite passing a GFP_ flags parameter here, more allocations are done
* internally using GFP_KERNEL expliictly. Hence this call must be in the worker
* thread and not in the initial capture call.
*/
dev_coredumpm_timeout(gt_to_xe(ss->gt)->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
xe_devcoredump_read, xe_devcoredump_free,
XE_COREDUMP_TIMEOUT_JIFFIES);
xe_pm_runtime_get(xe);
/* keep going if fw fails as we still want to save the memory and SW data */
fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n");
xe_vm_snapshot_capture_delayed(ss->vm);
xe_guc_exec_queue_snapshot_capture_delayed(ss->ge);
xe_force_wake_put(gt_to_fw(ss->gt), fw_ref);
xe_pm_runtime_put(xe);
/* Calculate devcoredump size */
ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump);
ss->read.buffer = kvmalloc(ss->read.size, GFP_USER);
if (!ss->read.buffer)
return;
__xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump);
xe_devcoredump_snapshot_free(ss);
}
static void devcoredump_snapshot(struct xe_devcoredump *coredump,
struct xe_sched_job *job)
{
@ -310,10 +319,6 @@ void xe_devcoredump(struct xe_sched_job *job)
drm_info(&xe->drm, "Xe device coredump has been created\n");
drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
xe->drm.primary->index);
dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL,
xe_devcoredump_read, xe_devcoredump_free,
XE_COREDUMP_TIMEOUT_JIFFIES);
}
static void xe_driver_devcoredump_fini(void *arg)

View File

@ -102,6 +102,7 @@ struct __guc_capture_parsed_output {
* A 64 bit register define requires 2 consecutive entries,
* with low dword first and hi dword the second.
* 2. Register name: null for incompleted define
* 3. Incorrect order will trigger XE_WARN.
*/
#define COMMON_XELP_BASE_GLOBAL \
{ FORCEWAKE_GT, REG_32BIT, 0, 0, "FORCEWAKE_GT"}
@ -1675,10 +1676,10 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
struct xe_devcoredump *devcoredump = &xe->devcoredump;
struct xe_devcoredump_snapshot *devcore_snapshot = &devcoredump->snapshot;
struct gcap_reg_list_info *reginfo = NULL;
u32 last_value, i;
bool is_ext;
u32 i, last_value = 0;
bool is_ext, low32_ready = false;
if (!list || list->num_regs == 0)
if (!list || !list->list || list->num_regs == 0)
return;
XE_WARN_ON(!devcore_snapshot->matched_node);
@ -1701,29 +1702,75 @@ snapshot_print_by_list_order(struct xe_hw_engine_snapshot *snapshot, struct drm_
continue;
value = reg->value;
if (reg_desc->data_type == REG_64BIT_LOW_DW) {
switch (reg_desc->data_type) {
case REG_64BIT_LOW_DW:
last_value = value;
/*
* A 64 bit register define requires 2 consecutive
* entries in register list, with low dword first
* and hi dword the second, like:
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
* { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, "XXX_REG"},
*
* Incorrect order will trigger XE_WARN.
*
* Possible double low here, for example:
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
*/
XE_WARN_ON(low32_ready);
low32_ready = true;
/* Low 32 bit dword saved, continue for high 32 bit */
continue;
} else if (reg_desc->data_type == REG_64BIT_HI_DW) {
break;
case REG_64BIT_HI_DW: {
u64 value_qw = ((u64)value << 32) | last_value;
/*
* Incorrect 64bit register order. Possible missing low.
* for example:
* { XXX_REG(0), REG_32BIT, 0, 0, NULL},
* { XXX_REG_HI(0), REG_64BIT_HI_DW, 0, 0, NULL},
*/
XE_WARN_ON(!low32_ready);
low32_ready = false;
drm_printf(p, "\t%s: 0x%016llx\n", reg_desc->regname, value_qw);
continue;
break;
}
if (is_ext) {
int dss, group, instance;
case REG_32BIT:
/*
* Incorrect 64bit register order. Possible missing high.
* for example:
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
* { XXX_REG(0), REG_32BIT, 0, 0, "XXX_REG"},
*/
XE_WARN_ON(low32_ready);
group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
if (is_ext) {
int dss, group, instance;
drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
} else {
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
group = FIELD_GET(GUC_REGSET_STEERING_GROUP, reg_desc->flags);
instance = FIELD_GET(GUC_REGSET_STEERING_INSTANCE, reg_desc->flags);
dss = xe_gt_mcr_steering_info_to_dss_id(gt, group, instance);
drm_printf(p, "\t%s[%u]: 0x%08x\n", reg_desc->regname, dss, value);
} else {
drm_printf(p, "\t%s: 0x%08x\n", reg_desc->regname, value);
}
break;
}
}
/*
* Incorrect 64bit register order. Possible missing high.
* for example:
* { XXX_REG_LO(0), REG_64BIT_LOW_DW, 0, 0, NULL},
* } // <- Register list end
*/
XE_WARN_ON(low32_ready);
}
/**

View File

@ -699,6 +699,13 @@ struct drm_dp_mst_topology_mgr {
*/
bool payload_id_table_cleared : 1;
/**
* @reset_rx_state: The down request's reply and up request message
* receiver state must be reset, after the topology manager got
* removed. Protected by @lock.
*/
bool reset_rx_state : 1;
/**
* @payload_count: The number of currently active payloads in hardware. This value is only
* intended to be used internally by MST helpers for payload tracking, and is only safe to