mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-18 02:46:06 +00:00
drm/xe: Convert multiple bind ops into single job
This aligns with the uAPI of an array of binds or single bind that results in multiple GPUVA ops to be considered a single atomic operations. The design is roughly: - xe_vma_ops is a list of xe_vma_op (GPUVA op) - each xe_vma_op resolves to 0-3 PT ops - xe_vma_ops creates a single job - if at any point during binding a failure occurs, xe_vma_ops contains the information necessary unwind the PT and VMA (GPUVA) state v2: - add missing dma-resv slot reservation (CI, testing) v4: - Fix TLB invalidation (Paulo) - Add missing xe_sched_job_last_fence_add/test_dep check (Inspection) v5: - Invert i, j usage (Matthew Auld) - Add helper to test and add job dep (Matthew Auld) - Return on anything but -ETIME for cpu bind (Matthew Auld) - Return -ENOBUFS if suballoc of BB fails due to size (Matthew Auld) - s/do/Do (Matthew Auld) - Add missing comma (Matthew Auld) - Do not assign return value to xe_range_fence_insert (Matthew Auld) v6: - s/0x1ff/MAX_PTE_PER_SDI (Matthew Auld, CI) - Check to large of SA in Xe to avoid triggering WARN (Matthew Auld) - Fix checkpatch issues v7: - Rebase - Support more than 510 PTEs updates in a bind job (Paulo, mesa testing) v8: - Rebase Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-5-matthew.brost@intel.com
This commit is contained in:
parent
96e7ebb220
commit
e8babb280b
@ -58,6 +58,8 @@ struct xe_bo {
|
||||
#endif
|
||||
/** @freed: List node for delayed put. */
|
||||
struct llist_node freed;
|
||||
/** @update_index: Update index if PT BO */
|
||||
int update_index;
|
||||
/** @created: Whether the bo has passed initial creation */
|
||||
bool created;
|
||||
|
||||
|
@ -1125,6 +1125,7 @@ err_sync:
|
||||
}
|
||||
|
||||
static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
|
||||
const struct xe_vm_pgtable_update_op *pt_op,
|
||||
const struct xe_vm_pgtable_update *update,
|
||||
struct xe_migrate_pt_update *pt_update)
|
||||
{
|
||||
@ -1159,8 +1160,12 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
|
||||
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
|
||||
bb->cs[bb->len++] = lower_32_bits(addr);
|
||||
bb->cs[bb->len++] = upper_32_bits(addr);
|
||||
ops->populate(pt_update, tile, NULL, bb->cs + bb->len, ofs, chunk,
|
||||
update);
|
||||
if (pt_op->bind)
|
||||
ops->populate(pt_update, tile, NULL, bb->cs + bb->len,
|
||||
ofs, chunk, update);
|
||||
else
|
||||
ops->clear(pt_update, tile, NULL, bb->cs + bb->len,
|
||||
ofs, chunk, update);
|
||||
|
||||
bb->len += chunk * 2;
|
||||
ofs += chunk;
|
||||
@ -1185,114 +1190,58 @@ struct migrate_test_params {
|
||||
|
||||
static struct dma_fence *
|
||||
xe_migrate_update_pgtables_cpu(struct xe_migrate *m,
|
||||
struct xe_vm *vm, struct xe_bo *bo,
|
||||
const struct xe_vm_pgtable_update *updates,
|
||||
u32 num_updates, bool wait_vm,
|
||||
struct xe_migrate_pt_update *pt_update)
|
||||
{
|
||||
XE_TEST_DECLARE(struct migrate_test_params *test =
|
||||
to_migrate_test_params
|
||||
(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));)
|
||||
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
|
||||
struct dma_fence *fence;
|
||||
struct xe_vm *vm = pt_update->vops->vm;
|
||||
struct xe_vm_pgtable_update_ops *pt_update_ops =
|
||||
&pt_update->vops->pt_update_ops[pt_update->tile_id];
|
||||
int err;
|
||||
u32 i;
|
||||
u32 i, j;
|
||||
|
||||
if (XE_TEST_ONLY(test && test->force_gpu))
|
||||
return ERR_PTR(-ETIME);
|
||||
|
||||
if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL))
|
||||
return ERR_PTR(-ETIME);
|
||||
|
||||
if (wait_vm && !dma_resv_test_signaled(xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP))
|
||||
return ERR_PTR(-ETIME);
|
||||
|
||||
if (ops->pre_commit) {
|
||||
pt_update->job = NULL;
|
||||
err = ops->pre_commit(pt_update);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
for (i = 0; i < num_updates; i++) {
|
||||
const struct xe_vm_pgtable_update *update = &updates[i];
|
||||
|
||||
ops->populate(pt_update, m->tile, &update->pt_bo->vmap, NULL,
|
||||
update->ofs, update->qwords, update);
|
||||
}
|
||||
for (i = 0; i < pt_update_ops->num_ops; ++i) {
|
||||
const struct xe_vm_pgtable_update_op *pt_op =
|
||||
&pt_update_ops->ops[i];
|
||||
|
||||
if (vm) {
|
||||
trace_xe_vm_cpu_bind(vm);
|
||||
xe_device_wmb(vm->xe);
|
||||
}
|
||||
for (j = 0; j < pt_op->num_entries; j++) {
|
||||
const struct xe_vm_pgtable_update *update =
|
||||
&pt_op->entries[j];
|
||||
|
||||
fence = dma_fence_get_stub();
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static bool no_in_syncs(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_syncs; i++) {
|
||||
fence = syncs[i].fence;
|
||||
|
||||
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&fence->flags))
|
||||
return false;
|
||||
}
|
||||
if (q) {
|
||||
fence = xe_exec_queue_last_fence_get(q, vm);
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||
dma_fence_put(fence);
|
||||
return false;
|
||||
if (pt_op->bind)
|
||||
ops->populate(pt_update, m->tile,
|
||||
&update->pt_bo->vmap, NULL,
|
||||
update->ofs, update->qwords,
|
||||
update);
|
||||
else
|
||||
ops->clear(pt_update, m->tile,
|
||||
&update->pt_bo->vmap, NULL,
|
||||
update->ofs, update->qwords, update);
|
||||
}
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
|
||||
return true;
|
||||
trace_xe_vm_cpu_bind(vm);
|
||||
xe_device_wmb(vm->xe);
|
||||
|
||||
return dma_fence_get_stub();
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_migrate_update_pgtables() - Pipelined page-table update
|
||||
* @m: The migrate context.
|
||||
* @vm: The vm we'll be updating.
|
||||
* @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
|
||||
* @q: The exec queue to be used for the update or NULL if the default
|
||||
* migration engine is to be used.
|
||||
* @updates: An array of update descriptors.
|
||||
* @num_updates: Number of descriptors in @updates.
|
||||
* @syncs: Array of xe_sync_entry to await before updating. Note that waits
|
||||
* will block the engine timeline.
|
||||
* @num_syncs: Number of entries in @syncs.
|
||||
* @pt_update: Pointer to a struct xe_migrate_pt_update, which contains
|
||||
* pointers to callback functions and, if subclassed, private arguments to
|
||||
* those.
|
||||
*
|
||||
* Perform a pipelined page-table update. The update descriptors are typically
|
||||
* built under the same lock critical section as a call to this function. If
|
||||
* using the default engine for the updates, they will be performed in the
|
||||
* order they grab the job_mutex. If different engines are used, external
|
||||
* synchronization is needed for overlapping updates to maintain page-table
|
||||
* consistency. Note that the meaing of "overlapping" is that the updates
|
||||
* touch the same page-table, which might be a higher-level page-directory.
|
||||
* If no pipelining is needed, then updates may be performed by the cpu.
|
||||
*
|
||||
* Return: A dma_fence that, when signaled, indicates the update completion.
|
||||
*/
|
||||
struct dma_fence *
|
||||
xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_vm *vm,
|
||||
struct xe_bo *bo,
|
||||
struct xe_exec_queue *q,
|
||||
const struct xe_vm_pgtable_update *updates,
|
||||
u32 num_updates,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
struct xe_migrate_pt_update *pt_update)
|
||||
static struct dma_fence *
|
||||
__xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_migrate_pt_update *pt_update,
|
||||
struct xe_vm_pgtable_update_ops *pt_update_ops)
|
||||
{
|
||||
const struct xe_migrate_pt_update_ops *ops = pt_update->ops;
|
||||
struct xe_tile *tile = m->tile;
|
||||
@ -1301,59 +1250,53 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_sched_job *job;
|
||||
struct dma_fence *fence;
|
||||
struct drm_suballoc *sa_bo = NULL;
|
||||
struct xe_vma *vma = pt_update->vma;
|
||||
struct xe_bb *bb;
|
||||
u32 i, batch_size, ppgtt_ofs, update_idx, page_ofs = 0;
|
||||
u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0;
|
||||
u32 num_updates = 0, current_update = 0;
|
||||
u64 addr;
|
||||
int err = 0;
|
||||
bool usm = !q && xe->info.has_usm;
|
||||
bool first_munmap_rebind = vma &&
|
||||
vma->gpuva.flags & XE_VMA_FIRST_REBIND;
|
||||
struct xe_exec_queue *q_override = !q ? m->q : q;
|
||||
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
|
||||
bool is_migrate = pt_update_ops->q == m->q;
|
||||
bool usm = is_migrate && xe->info.has_usm;
|
||||
|
||||
/* Use the CPU if no in syncs and engine is idle */
|
||||
if (no_in_syncs(vm, q, syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
|
||||
fence = xe_migrate_update_pgtables_cpu(m, vm, bo, updates,
|
||||
num_updates,
|
||||
first_munmap_rebind,
|
||||
pt_update);
|
||||
if (!IS_ERR(fence) || fence == ERR_PTR(-EAGAIN))
|
||||
return fence;
|
||||
for (i = 0; i < pt_update_ops->num_ops; ++i) {
|
||||
struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i];
|
||||
struct xe_vm_pgtable_update *updates = pt_op->entries;
|
||||
|
||||
num_updates += pt_op->num_entries;
|
||||
for (j = 0; j < pt_op->num_entries; ++j) {
|
||||
u32 num_cmds = DIV_ROUND_UP(updates[j].qwords,
|
||||
MAX_PTE_PER_SDI);
|
||||
|
||||
/* align noop + MI_STORE_DATA_IMM cmd prefix */
|
||||
batch_size += 4 * num_cmds + updates[j].qwords * 2;
|
||||
}
|
||||
}
|
||||
|
||||
/* fixed + PTE entries */
|
||||
if (IS_DGFX(xe))
|
||||
batch_size = 2;
|
||||
batch_size += 2;
|
||||
else
|
||||
batch_size = 6 + num_updates * 2;
|
||||
batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) +
|
||||
num_updates * 2;
|
||||
|
||||
for (i = 0; i < num_updates; i++) {
|
||||
u32 num_cmds = DIV_ROUND_UP(updates[i].qwords, MAX_PTE_PER_SDI);
|
||||
|
||||
/* align noop + MI_STORE_DATA_IMM cmd prefix */
|
||||
batch_size += 4 * num_cmds + updates[i].qwords * 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: Create temp bo to copy from, if batch_size becomes too big?
|
||||
*
|
||||
* Worst case: Sum(2 * (each lower level page size) + (top level page size))
|
||||
* Should be reasonably bound..
|
||||
*/
|
||||
xe_tile_assert(tile, batch_size < SZ_128K);
|
||||
|
||||
bb = xe_bb_new(gt, batch_size, !q && xe->info.has_usm);
|
||||
bb = xe_bb_new(gt, batch_size, usm);
|
||||
if (IS_ERR(bb))
|
||||
return ERR_CAST(bb);
|
||||
|
||||
/* For sysmem PTE's, need to map them in our hole.. */
|
||||
if (!IS_DGFX(xe)) {
|
||||
ppgtt_ofs = NUM_KERNEL_PDE - 1;
|
||||
if (q) {
|
||||
xe_tile_assert(tile, num_updates <= NUM_VMUSA_WRITES_PER_UNIT);
|
||||
u32 ptes, ofs;
|
||||
|
||||
sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
|
||||
ppgtt_ofs = NUM_KERNEL_PDE - 1;
|
||||
if (!is_migrate) {
|
||||
u32 num_units = DIV_ROUND_UP(num_updates,
|
||||
NUM_VMUSA_WRITES_PER_UNIT);
|
||||
|
||||
if (num_units > m->vm_update_sa.size) {
|
||||
err = -ENOBUFS;
|
||||
goto err_bb;
|
||||
}
|
||||
sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units,
|
||||
GFP_KERNEL, true, 0);
|
||||
if (IS_ERR(sa_bo)) {
|
||||
err = PTR_ERR(sa_bo);
|
||||
@ -1369,18 +1312,49 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
}
|
||||
|
||||
/* Map our PT's to gtt */
|
||||
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
|
||||
bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
|
||||
bb->cs[bb->len++] = 0; /* upper_32_bits */
|
||||
i = 0;
|
||||
j = 0;
|
||||
ptes = num_updates;
|
||||
ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
|
||||
while (ptes) {
|
||||
u32 chunk = min(MAX_PTE_PER_SDI, ptes);
|
||||
u32 idx = 0;
|
||||
|
||||
for (i = 0; i < num_updates; i++) {
|
||||
struct xe_bo *pt_bo = updates[i].pt_bo;
|
||||
bb->cs[bb->len++] = MI_STORE_DATA_IMM |
|
||||
MI_SDI_NUM_QW(chunk);
|
||||
bb->cs[bb->len++] = ofs;
|
||||
bb->cs[bb->len++] = 0; /* upper_32_bits */
|
||||
|
||||
xe_tile_assert(tile, pt_bo->size == SZ_4K);
|
||||
for (; i < pt_update_ops->num_ops; ++i) {
|
||||
struct xe_vm_pgtable_update_op *pt_op =
|
||||
&pt_update_ops->ops[i];
|
||||
struct xe_vm_pgtable_update *updates = pt_op->entries;
|
||||
|
||||
addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
|
||||
bb->cs[bb->len++] = lower_32_bits(addr);
|
||||
bb->cs[bb->len++] = upper_32_bits(addr);
|
||||
for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) {
|
||||
struct xe_vm *vm = pt_update->vops->vm;
|
||||
struct xe_bo *pt_bo = updates[j].pt_bo;
|
||||
|
||||
if (idx == chunk)
|
||||
goto next_cmd;
|
||||
|
||||
xe_tile_assert(tile, pt_bo->size == SZ_4K);
|
||||
|
||||
/* Map a PT at most once */
|
||||
if (pt_bo->update_index < 0)
|
||||
pt_bo->update_index = current_update;
|
||||
|
||||
addr = vm->pt_ops->pte_encode_bo(pt_bo, 0,
|
||||
XE_CACHE_WB, 0);
|
||||
bb->cs[bb->len++] = lower_32_bits(addr);
|
||||
bb->cs[bb->len++] = upper_32_bits(addr);
|
||||
}
|
||||
|
||||
j = 0;
|
||||
}
|
||||
|
||||
next_cmd:
|
||||
ptes -= chunk;
|
||||
ofs += chunk * sizeof(u64);
|
||||
}
|
||||
|
||||
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
|
||||
@ -1388,19 +1362,36 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
|
||||
addr = xe_migrate_vm_addr(ppgtt_ofs, 0) +
|
||||
(page_ofs / sizeof(u64)) * XE_PAGE_SIZE;
|
||||
for (i = 0; i < num_updates; i++)
|
||||
write_pgtable(tile, bb, addr + i * XE_PAGE_SIZE,
|
||||
&updates[i], pt_update);
|
||||
for (i = 0; i < pt_update_ops->num_ops; ++i) {
|
||||
struct xe_vm_pgtable_update_op *pt_op =
|
||||
&pt_update_ops->ops[i];
|
||||
struct xe_vm_pgtable_update *updates = pt_op->entries;
|
||||
|
||||
for (j = 0; j < pt_op->num_entries; ++j) {
|
||||
struct xe_bo *pt_bo = updates[j].pt_bo;
|
||||
|
||||
write_pgtable(tile, bb, addr +
|
||||
pt_bo->update_index * XE_PAGE_SIZE,
|
||||
pt_op, &updates[j], pt_update);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* phys pages, no preamble required */
|
||||
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
|
||||
update_idx = bb->len;
|
||||
|
||||
for (i = 0; i < num_updates; i++)
|
||||
write_pgtable(tile, bb, 0, &updates[i], pt_update);
|
||||
for (i = 0; i < pt_update_ops->num_ops; ++i) {
|
||||
struct xe_vm_pgtable_update_op *pt_op =
|
||||
&pt_update_ops->ops[i];
|
||||
struct xe_vm_pgtable_update *updates = pt_op->entries;
|
||||
|
||||
for (j = 0; j < pt_op->num_entries; ++j)
|
||||
write_pgtable(tile, bb, 0, pt_op, &updates[j],
|
||||
pt_update);
|
||||
}
|
||||
}
|
||||
|
||||
job = xe_bb_create_migration_job(q ?: m->q, bb,
|
||||
job = xe_bb_create_migration_job(pt_update_ops->q, bb,
|
||||
xe_migrate_batch_base(m, usm),
|
||||
update_idx);
|
||||
if (IS_ERR(job)) {
|
||||
@ -1408,46 +1399,20 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
goto err_sa;
|
||||
}
|
||||
|
||||
/* Wait on BO move */
|
||||
if (bo) {
|
||||
err = xe_sched_job_add_deps(job, bo->ttm.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL);
|
||||
if (err)
|
||||
goto err_job;
|
||||
}
|
||||
|
||||
/*
|
||||
* Munmap style VM unbind, need to wait for all jobs to be complete /
|
||||
* trigger preempts before moving forward
|
||||
*/
|
||||
if (first_munmap_rebind) {
|
||||
err = xe_sched_job_add_deps(job, xe_vm_resv(vm),
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
if (err)
|
||||
goto err_job;
|
||||
}
|
||||
|
||||
err = xe_sched_job_last_fence_add_dep(job, vm);
|
||||
for (i = 0; !err && i < num_syncs; i++)
|
||||
err = xe_sync_entry_add_deps(&syncs[i], job);
|
||||
|
||||
if (err)
|
||||
goto err_job;
|
||||
|
||||
if (ops->pre_commit) {
|
||||
pt_update->job = job;
|
||||
err = ops->pre_commit(pt_update);
|
||||
if (err)
|
||||
goto err_job;
|
||||
}
|
||||
if (!q)
|
||||
if (is_migrate)
|
||||
mutex_lock(&m->job_mutex);
|
||||
|
||||
xe_sched_job_arm(job);
|
||||
fence = dma_fence_get(&job->drm.s_fence->finished);
|
||||
xe_sched_job_push(job);
|
||||
|
||||
if (!q)
|
||||
if (is_migrate)
|
||||
mutex_unlock(&m->job_mutex);
|
||||
|
||||
xe_bb_free(bb, fence);
|
||||
@ -1464,6 +1429,40 @@ err_bb:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_migrate_update_pgtables() - Pipelined page-table update
|
||||
* @m: The migrate context.
|
||||
* @pt_update: PT update arguments
|
||||
*
|
||||
* Perform a pipelined page-table update. The update descriptors are typically
|
||||
* built under the same lock critical section as a call to this function. If
|
||||
* using the default engine for the updates, they will be performed in the
|
||||
* order they grab the job_mutex. If different engines are used, external
|
||||
* synchronization is needed for overlapping updates to maintain page-table
|
||||
* consistency. Note that the meaing of "overlapping" is that the updates
|
||||
* touch the same page-table, which might be a higher-level page-directory.
|
||||
* If no pipelining is needed, then updates may be performed by the cpu.
|
||||
*
|
||||
* Return: A dma_fence that, when signaled, indicates the update completion.
|
||||
*/
|
||||
struct dma_fence *
|
||||
xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_migrate_pt_update *pt_update)
|
||||
|
||||
{
|
||||
struct xe_vm_pgtable_update_ops *pt_update_ops =
|
||||
&pt_update->vops->pt_update_ops[pt_update->tile_id];
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = xe_migrate_update_pgtables_cpu(m, pt_update);
|
||||
|
||||
/* -ETIME indicates a job is needed, anything else is legit error */
|
||||
if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME)
|
||||
return fence;
|
||||
|
||||
return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_migrate_wait() - Complete all operations using the xe_migrate context
|
||||
* @m: Migrate context to wait for.
|
||||
|
@ -47,6 +47,24 @@ struct xe_migrate_pt_update_ops {
|
||||
struct xe_tile *tile, struct iosys_map *map,
|
||||
void *pos, u32 ofs, u32 num_qwords,
|
||||
const struct xe_vm_pgtable_update *update);
|
||||
/**
|
||||
* @clear: Clear a command buffer or page-table with ptes.
|
||||
* @pt_update: Embeddable callback argument.
|
||||
* @tile: The tile for the current operation.
|
||||
* @map: struct iosys_map into the memory to be populated.
|
||||
* @pos: If @map is NULL, map into the memory to be populated.
|
||||
* @ofs: qword offset into @map, unused if @map is NULL.
|
||||
* @num_qwords: Number of qwords to write.
|
||||
* @update: Information about the PTEs to be inserted.
|
||||
*
|
||||
* This interface is intended to be used as a callback into the
|
||||
* page-table system to populate command buffers or shared
|
||||
* page-tables with PTEs.
|
||||
*/
|
||||
void (*clear)(struct xe_migrate_pt_update *pt_update,
|
||||
struct xe_tile *tile, struct iosys_map *map,
|
||||
void *pos, u32 ofs, u32 num_qwords,
|
||||
const struct xe_vm_pgtable_update *update);
|
||||
|
||||
/**
|
||||
* @pre_commit: Callback to be called just before arming the
|
||||
@ -67,14 +85,10 @@ struct xe_migrate_pt_update_ops {
|
||||
struct xe_migrate_pt_update {
|
||||
/** @ops: Pointer to the struct xe_migrate_pt_update_ops callbacks */
|
||||
const struct xe_migrate_pt_update_ops *ops;
|
||||
/** @vma: The vma we're updating the pagetable for. */
|
||||
struct xe_vma *vma;
|
||||
/** @vops: VMA operations */
|
||||
struct xe_vma_ops *vops;
|
||||
/** @job: The job if a GPU page-table update. NULL otherwise */
|
||||
struct xe_sched_job *job;
|
||||
/** @start: Start of update for the range fence */
|
||||
u64 start;
|
||||
/** @last: Last of update for the range fence */
|
||||
u64 last;
|
||||
/** @tile_id: Tile ID of the update */
|
||||
u8 tile_id;
|
||||
};
|
||||
@ -96,12 +110,6 @@ struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m);
|
||||
|
||||
struct dma_fence *
|
||||
xe_migrate_update_pgtables(struct xe_migrate *m,
|
||||
struct xe_vm *vm,
|
||||
struct xe_bo *bo,
|
||||
struct xe_exec_queue *q,
|
||||
const struct xe_vm_pgtable_update *updates,
|
||||
u32 num_updates,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
struct xe_migrate_pt_update *pt_update);
|
||||
|
||||
void xe_migrate_wait(struct xe_migrate *m);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -17,6 +17,7 @@ struct xe_sync_entry;
|
||||
struct xe_tile;
|
||||
struct xe_vm;
|
||||
struct xe_vma;
|
||||
struct xe_vma_ops;
|
||||
|
||||
/* Largest huge pte is currently 1GiB. May become device dependent. */
|
||||
#define MAX_HUGEPTE_LEVEL 2
|
||||
@ -34,14 +35,11 @@ void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
|
||||
|
||||
void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred);
|
||||
|
||||
struct dma_fence *
|
||||
__xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool rebind);
|
||||
|
||||
struct dma_fence *
|
||||
__xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs);
|
||||
int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops);
|
||||
struct dma_fence *xe_pt_update_ops_run(struct xe_tile *tile,
|
||||
struct xe_vma_ops *vops);
|
||||
void xe_pt_update_ops_fini(struct xe_tile *tile, struct xe_vma_ops *vops);
|
||||
void xe_pt_update_ops_abort(struct xe_tile *tile, struct xe_vma_ops *vops);
|
||||
|
||||
bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma);
|
||||
|
||||
|
@ -78,6 +78,8 @@ struct xe_vm_pgtable_update {
|
||||
struct xe_vm_pgtable_update_op {
|
||||
/** @entries: entries to update for this operation */
|
||||
struct xe_vm_pgtable_update entries[XE_VM_MAX_LEVEL * 2 + 1];
|
||||
/** @vma: VMA for operation, operation not valid if NULL */
|
||||
struct xe_vma *vma;
|
||||
/** @num_entries: number of entries for this update operation */
|
||||
u32 num_entries;
|
||||
/** @bind: is a bind */
|
||||
@ -86,4 +88,38 @@ struct xe_vm_pgtable_update_op {
|
||||
bool rebind;
|
||||
};
|
||||
|
||||
/** struct xe_vm_pgtable_update_ops: page table update operations */
|
||||
struct xe_vm_pgtable_update_ops {
|
||||
/** @ops: operations */
|
||||
struct xe_vm_pgtable_update_op *ops;
|
||||
/** @deferred: deferred list to destroy PT entries */
|
||||
struct llist_head deferred;
|
||||
/** @q: exec queue for PT operations */
|
||||
struct xe_exec_queue *q;
|
||||
/** @start: start address of ops */
|
||||
u64 start;
|
||||
/** @last: last address of ops */
|
||||
u64 last;
|
||||
/** @num_ops: number of operations */
|
||||
u32 num_ops;
|
||||
/** @current_op: current operations */
|
||||
u32 current_op;
|
||||
/** @needs_userptr_lock: Needs userptr lock */
|
||||
bool needs_userptr_lock;
|
||||
/** @needs_invalidation: Needs invalidation */
|
||||
bool needs_invalidation;
|
||||
/**
|
||||
* @wait_vm_bookkeep: PT operations need to wait until VM is idle
|
||||
* (bookkeep dma-resv slots are idle) and stage all future VM activity
|
||||
* behind these operations (install PT operations into VM kernel
|
||||
* dma-resv slot).
|
||||
*/
|
||||
bool wait_vm_bookkeep;
|
||||
/**
|
||||
* @wait_vm_kernel: PT operations need to wait until VM kernel dma-resv
|
||||
* slots are idle.
|
||||
*/
|
||||
bool wait_vm_kernel;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -84,6 +84,13 @@ struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u32
|
||||
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
|
||||
unsigned int size)
|
||||
{
|
||||
/*
|
||||
* BB to large, return -ENOBUFS indicating user should split
|
||||
* array of binds into smaller chunks.
|
||||
*/
|
||||
if (size > sa_manager->base.size)
|
||||
return ERR_PTR(-ENOBUFS);
|
||||
|
||||
return drm_suballoc_new(&sa_manager->base, size, GFP_KERNEL, true, 0);
|
||||
}
|
||||
|
||||
|
@ -313,7 +313,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
|
||||
|
||||
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
|
||||
|
||||
/*
|
||||
/**
|
||||
* xe_vm_kill() - VM Kill
|
||||
* @vm: The VM.
|
||||
* @unlocked: Flag indicates the VM's dma-resv is not held
|
||||
@ -321,7 +321,7 @@ int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
|
||||
* Kill the VM by setting banned flag indicated VM is no longer available for
|
||||
* use. If in preempt fence mode, also kill all exec queue attached to the VM.
|
||||
*/
|
||||
static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
|
||||
void xe_vm_kill(struct xe_vm *vm, bool unlocked)
|
||||
{
|
||||
struct xe_exec_queue *q;
|
||||
|
||||
@ -798,7 +798,7 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
|
||||
struct xe_vma *vma, *next;
|
||||
struct xe_vma_ops vops;
|
||||
struct xe_vma_op *op, *next_op;
|
||||
int err;
|
||||
int err, i;
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
if ((xe_vm_in_lr_mode(vm) && !rebind_worker) ||
|
||||
@ -806,6 +806,8 @@ int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
|
||||
return 0;
|
||||
|
||||
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
|
||||
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
|
||||
vops.pt_update_ops[i].wait_vm_bookkeep = true;
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
list_for_each_entry(vma, &vm->rebind_list, combined_links.rebind) {
|
||||
@ -850,6 +852,8 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
|
||||
struct dma_fence *fence = NULL;
|
||||
struct xe_vma_ops vops;
|
||||
struct xe_vma_op *op, *next_op;
|
||||
struct xe_tile *tile;
|
||||
u8 id;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
@ -857,6 +861,11 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
|
||||
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
|
||||
|
||||
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
vops.pt_update_ops[id].wait_vm_bookkeep = true;
|
||||
vops.pt_update_ops[tile->id].q =
|
||||
xe_tile_migrate_exec_queue(tile);
|
||||
}
|
||||
|
||||
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
|
||||
if (err)
|
||||
@ -1697,147 +1706,6 @@ to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
|
||||
return q ? q : vm->q[0];
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
bool first_op, bool last_op)
|
||||
{
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
||||
struct xe_tile *tile;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct dma_fence **fences = NULL;
|
||||
struct dma_fence_array *cf = NULL;
|
||||
int cur_fence = 0;
|
||||
int number_tiles = hweight8(vma->tile_present);
|
||||
int err;
|
||||
u8 id;
|
||||
|
||||
trace_xe_vma_unbind(vma);
|
||||
|
||||
if (number_tiles > 1) {
|
||||
fences = kmalloc_array(number_tiles, sizeof(*fences),
|
||||
GFP_KERNEL);
|
||||
if (!fences)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (!(vma->tile_present & BIT(id)))
|
||||
goto next;
|
||||
|
||||
fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id],
|
||||
first_op ? syncs : NULL,
|
||||
first_op ? num_syncs : 0);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_fences;
|
||||
}
|
||||
|
||||
if (fences)
|
||||
fences[cur_fence++] = fence;
|
||||
|
||||
next:
|
||||
if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
|
||||
q = list_next_entry(q, multi_gt_list);
|
||||
}
|
||||
|
||||
if (fences) {
|
||||
cf = dma_fence_array_create(number_tiles, fences,
|
||||
vm->composite_fence_ctx,
|
||||
vm->composite_fence_seqno++,
|
||||
false);
|
||||
if (!cf) {
|
||||
--vm->composite_fence_seqno;
|
||||
err = -ENOMEM;
|
||||
goto err_fences;
|
||||
}
|
||||
}
|
||||
|
||||
fence = cf ? &cf->base : !fence ?
|
||||
xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence;
|
||||
|
||||
return fence;
|
||||
|
||||
err_fences:
|
||||
if (fences) {
|
||||
while (cur_fence)
|
||||
dma_fence_put(fences[--cur_fence]);
|
||||
kfree(fences);
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
u8 tile_mask, bool first_op, bool last_op)
|
||||
{
|
||||
struct xe_tile *tile;
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence **fences = NULL;
|
||||
struct dma_fence_array *cf = NULL;
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
int cur_fence = 0;
|
||||
int number_tiles = hweight8(tile_mask);
|
||||
int err;
|
||||
u8 id;
|
||||
|
||||
trace_xe_vma_bind(vma);
|
||||
|
||||
if (number_tiles > 1) {
|
||||
fences = kmalloc_array(number_tiles, sizeof(*fences),
|
||||
GFP_KERNEL);
|
||||
if (!fences)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (!(tile_mask & BIT(id)))
|
||||
goto next;
|
||||
|
||||
fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id],
|
||||
first_op ? syncs : NULL,
|
||||
first_op ? num_syncs : 0,
|
||||
vma->tile_present & BIT(id));
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
goto err_fences;
|
||||
}
|
||||
|
||||
if (fences)
|
||||
fences[cur_fence++] = fence;
|
||||
|
||||
next:
|
||||
if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list))
|
||||
q = list_next_entry(q, multi_gt_list);
|
||||
}
|
||||
|
||||
if (fences) {
|
||||
cf = dma_fence_array_create(number_tiles, fences,
|
||||
vm->composite_fence_ctx,
|
||||
vm->composite_fence_seqno++,
|
||||
false);
|
||||
if (!cf) {
|
||||
--vm->composite_fence_seqno;
|
||||
err = -ENOMEM;
|
||||
goto err_fences;
|
||||
}
|
||||
}
|
||||
|
||||
return cf ? &cf->base : fence;
|
||||
|
||||
err_fences:
|
||||
if (fences) {
|
||||
while (cur_fence)
|
||||
dma_fence_put(fences[--cur_fence]);
|
||||
kfree(fences);
|
||||
}
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static struct xe_user_fence *
|
||||
find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
|
||||
{
|
||||
@ -1853,48 +1721,6 @@ find_ufence_get(struct xe_sync_entry *syncs, u32 num_syncs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q,
|
||||
struct xe_bo *bo, struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
u8 tile_mask, bool immediate, bool first_op, bool last_op)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
xe_bo_assert_held(bo);
|
||||
|
||||
if (immediate) {
|
||||
fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, tile_mask,
|
||||
first_op, last_op);
|
||||
if (IS_ERR(fence))
|
||||
return fence;
|
||||
} else {
|
||||
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
|
||||
|
||||
fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm);
|
||||
}
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||
u32 num_syncs, bool first_op, bool last_op)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
xe_bo_assert_held(xe_vma_bo(vma));
|
||||
|
||||
fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op);
|
||||
if (IS_ERR(fence))
|
||||
return fence;
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \
|
||||
DRM_XE_VM_CREATE_FLAG_LR_MODE | \
|
||||
DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
|
||||
@ -2035,21 +1861,6 @@ static const u32 region_to_mem_type[] = {
|
||||
XE_PL_VRAM1,
|
||||
};
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_exec_queue *q, struct xe_sync_entry *syncs,
|
||||
u32 num_syncs, bool first_op, bool last_op)
|
||||
{
|
||||
struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q);
|
||||
|
||||
if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
|
||||
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
|
||||
vma->tile_mask, true, first_op, last_op);
|
||||
} else {
|
||||
return xe_exec_queue_last_fence_get(wait_exec_queue, vm);
|
||||
}
|
||||
}
|
||||
|
||||
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
|
||||
bool post_commit)
|
||||
{
|
||||
@ -2337,13 +2148,10 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
struct drm_gpuva_ops *ops,
|
||||
struct xe_sync_entry *syncs, u32 num_syncs,
|
||||
struct xe_vma_ops *vops, bool last)
|
||||
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
|
||||
struct xe_vma_ops *vops)
|
||||
{
|
||||
struct xe_device *xe = vm->xe;
|
||||
struct xe_vma_op *last_op = NULL;
|
||||
struct drm_gpuva_op *__op;
|
||||
struct xe_tile *tile;
|
||||
u8 id, tile_mask = 0;
|
||||
@ -2357,19 +2165,10 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
drm_gpuva_for_each_op(__op, ops) {
|
||||
struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
|
||||
struct xe_vma *vma;
|
||||
bool first = list_empty(&vops->list);
|
||||
unsigned int flags = 0;
|
||||
|
||||
INIT_LIST_HEAD(&op->link);
|
||||
list_add_tail(&op->link, &vops->list);
|
||||
|
||||
if (first) {
|
||||
op->flags |= XE_VMA_OP_FIRST;
|
||||
op->num_syncs = num_syncs;
|
||||
op->syncs = syncs;
|
||||
}
|
||||
|
||||
op->q = q;
|
||||
op->tile_mask = tile_mask;
|
||||
|
||||
switch (op->base.op) {
|
||||
@ -2482,197 +2281,21 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
|
||||
}
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
case DRM_GPUVA_OP_PREFETCH:
|
||||
/* FIXME: Need to skip some prefetch ops */
|
||||
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
|
||||
break;
|
||||
default:
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
last_op = op;
|
||||
|
||||
err = xe_vma_op_commit(vm, op);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* FIXME: Unhandled corner case */
|
||||
XE_WARN_ON(!last_op && last && !list_empty(&vops->list));
|
||||
|
||||
if (!last_op)
|
||||
return 0;
|
||||
|
||||
if (last) {
|
||||
last_op->flags |= XE_VMA_OP_LAST;
|
||||
last_op->num_syncs = num_syncs;
|
||||
last_op->syncs = syncs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_fence *op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_vma_op *op)
|
||||
{
|
||||
struct dma_fence *fence = NULL;
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
|
||||
xe_vm_assert_held(vm);
|
||||
xe_bo_assert_held(xe_vma_bo(vma));
|
||||
|
||||
switch (op->base.op) {
|
||||
case DRM_GPUVA_OP_MAP:
|
||||
fence = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
|
||||
op->syncs, op->num_syncs,
|
||||
op->tile_mask,
|
||||
op->map.immediate || !xe_vm_in_fault_mode(vm),
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
op->flags & XE_VMA_OP_LAST);
|
||||
break;
|
||||
case DRM_GPUVA_OP_REMAP:
|
||||
{
|
||||
bool prev = !!op->remap.prev;
|
||||
bool next = !!op->remap.next;
|
||||
|
||||
if (!op->remap.unmap_done) {
|
||||
if (prev || next)
|
||||
vma->gpuva.flags |= XE_VMA_FIRST_REBIND;
|
||||
fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
|
||||
op->num_syncs,
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
op->flags & XE_VMA_OP_LAST &&
|
||||
!prev && !next);
|
||||
if (IS_ERR(fence))
|
||||
break;
|
||||
op->remap.unmap_done = true;
|
||||
}
|
||||
|
||||
if (prev) {
|
||||
op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND;
|
||||
dma_fence_put(fence);
|
||||
fence = xe_vm_bind(vm, op->remap.prev, op->q,
|
||||
xe_vma_bo(op->remap.prev), op->syncs,
|
||||
op->num_syncs,
|
||||
op->remap.prev->tile_mask, true,
|
||||
false,
|
||||
op->flags & XE_VMA_OP_LAST && !next);
|
||||
op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND;
|
||||
if (IS_ERR(fence))
|
||||
break;
|
||||
op->remap.prev = NULL;
|
||||
}
|
||||
|
||||
if (next) {
|
||||
op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND;
|
||||
dma_fence_put(fence);
|
||||
fence = xe_vm_bind(vm, op->remap.next, op->q,
|
||||
xe_vma_bo(op->remap.next),
|
||||
op->syncs, op->num_syncs,
|
||||
op->remap.next->tile_mask, true,
|
||||
false, op->flags & XE_VMA_OP_LAST);
|
||||
op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND;
|
||||
if (IS_ERR(fence))
|
||||
break;
|
||||
op->remap.next = NULL;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
fence = xe_vm_unbind(vm, vma, op->q, op->syncs,
|
||||
op->num_syncs, op->flags & XE_VMA_OP_FIRST,
|
||||
op->flags & XE_VMA_OP_LAST);
|
||||
break;
|
||||
case DRM_GPUVA_OP_PREFETCH:
|
||||
fence = xe_vm_prefetch(vm, vma, op->q, op->syncs, op->num_syncs,
|
||||
op->flags & XE_VMA_OP_FIRST,
|
||||
op->flags & XE_VMA_OP_LAST);
|
||||
break;
|
||||
default:
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
if (IS_ERR(fence))
|
||||
trace_xe_vma_fail(vma);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
__xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma,
|
||||
struct xe_vma_op *op)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
int err;
|
||||
|
||||
retry_userptr:
|
||||
fence = op_execute(vm, vma, op);
|
||||
if (IS_ERR(fence) && PTR_ERR(fence) == -EAGAIN) {
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
|
||||
if (op->base.op == DRM_GPUVA_OP_REMAP) {
|
||||
if (!op->remap.unmap_done)
|
||||
vma = gpuva_to_vma(op->base.remap.unmap->va);
|
||||
else if (op->remap.prev)
|
||||
vma = op->remap.prev;
|
||||
else
|
||||
vma = op->remap.next;
|
||||
}
|
||||
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
|
||||
if (!err)
|
||||
goto retry_userptr;
|
||||
|
||||
fence = ERR_PTR(err);
|
||||
trace_xe_vma_fail(vma);
|
||||
}
|
||||
}
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static struct dma_fence *
|
||||
xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op)
|
||||
{
|
||||
struct dma_fence *fence = ERR_PTR(-ENOMEM);
|
||||
|
||||
lockdep_assert_held(&vm->lock);
|
||||
|
||||
switch (op->base.op) {
|
||||
case DRM_GPUVA_OP_MAP:
|
||||
fence = __xe_vma_op_execute(vm, op->map.vma, op);
|
||||
break;
|
||||
case DRM_GPUVA_OP_REMAP:
|
||||
{
|
||||
struct xe_vma *vma;
|
||||
|
||||
if (!op->remap.unmap_done)
|
||||
vma = gpuva_to_vma(op->base.remap.unmap->va);
|
||||
else if (op->remap.prev)
|
||||
vma = op->remap.prev;
|
||||
else
|
||||
vma = op->remap.next;
|
||||
|
||||
fence = __xe_vma_op_execute(vm, vma, op);
|
||||
break;
|
||||
}
|
||||
case DRM_GPUVA_OP_UNMAP:
|
||||
fence = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va),
|
||||
op);
|
||||
break;
|
||||
case DRM_GPUVA_OP_PREFETCH:
|
||||
fence = __xe_vma_op_execute(vm,
|
||||
gpuva_to_vma(op->base.prefetch.va),
|
||||
op);
|
||||
break;
|
||||
default:
|
||||
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
|
||||
}
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
|
||||
bool post_commit, bool prev_post_commit,
|
||||
bool next_post_commit)
|
||||
@ -2858,23 +2481,110 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops)
|
||||
{
|
||||
struct xe_exec_queue *q = vops->q;
|
||||
struct xe_tile *tile;
|
||||
int number_tiles = 0;
|
||||
u8 id;
|
||||
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (vops->pt_update_ops[id].num_ops)
|
||||
++number_tiles;
|
||||
|
||||
if (vops->pt_update_ops[id].q)
|
||||
continue;
|
||||
|
||||
if (q) {
|
||||
vops->pt_update_ops[id].q = q;
|
||||
if (vm->pt_root[id] && !list_empty(&q->multi_gt_list))
|
||||
q = list_next_entry(q, multi_gt_list);
|
||||
} else {
|
||||
vops->pt_update_ops[id].q = vm->q[id];
|
||||
}
|
||||
}
|
||||
|
||||
return number_tiles;
|
||||
}
|
||||
|
||||
static struct dma_fence *ops_execute(struct xe_vm *vm,
|
||||
struct xe_vma_ops *vops)
|
||||
{
|
||||
struct xe_vma_op *op, *next;
|
||||
struct xe_tile *tile;
|
||||
struct dma_fence *fence = NULL;
|
||||
struct dma_fence **fences = NULL;
|
||||
struct dma_fence_array *cf = NULL;
|
||||
int number_tiles = 0, current_fence = 0, err;
|
||||
u8 id;
|
||||
|
||||
list_for_each_entry_safe(op, next, &vops->list, link) {
|
||||
dma_fence_put(fence);
|
||||
fence = xe_vma_op_execute(vm, op);
|
||||
if (IS_ERR(fence)) {
|
||||
drm_warn(&vm->xe->drm, "VM op(%d) failed with %ld",
|
||||
op->base.op, PTR_ERR(fence));
|
||||
fence = ERR_PTR(-ENOSPC);
|
||||
break;
|
||||
number_tiles = vm_ops_setup_tile_args(vm, vops);
|
||||
if (number_tiles == 0)
|
||||
return ERR_PTR(-ENODATA);
|
||||
|
||||
if (number_tiles > 1) {
|
||||
fences = kmalloc_array(number_tiles, sizeof(*fences),
|
||||
GFP_KERNEL);
|
||||
if (!fences)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (!vops->pt_update_ops[id].num_ops)
|
||||
continue;
|
||||
|
||||
err = xe_pt_update_ops_prepare(tile, vops);
|
||||
if (err) {
|
||||
fence = ERR_PTR(err);
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (!vops->pt_update_ops[id].num_ops)
|
||||
continue;
|
||||
|
||||
fence = xe_pt_update_ops_run(tile, vops);
|
||||
if (IS_ERR(fence))
|
||||
goto err_out;
|
||||
|
||||
if (fences)
|
||||
fences[current_fence++] = fence;
|
||||
}
|
||||
|
||||
if (fences) {
|
||||
cf = dma_fence_array_create(number_tiles, fences,
|
||||
vm->composite_fence_ctx,
|
||||
vm->composite_fence_seqno++,
|
||||
false);
|
||||
if (!cf) {
|
||||
--vm->composite_fence_seqno;
|
||||
fence = ERR_PTR(-ENOMEM);
|
||||
goto err_out;
|
||||
}
|
||||
fence = &cf->base;
|
||||
}
|
||||
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (!vops->pt_update_ops[id].num_ops)
|
||||
continue;
|
||||
|
||||
xe_pt_update_ops_fini(tile, vops);
|
||||
}
|
||||
|
||||
return fence;
|
||||
|
||||
err_out:
|
||||
for_each_tile(tile, vm->xe, id) {
|
||||
if (!vops->pt_update_ops[id].num_ops)
|
||||
continue;
|
||||
|
||||
xe_pt_update_ops_abort(tile, vops);
|
||||
}
|
||||
while (current_fence)
|
||||
dma_fence_put(fences[--current_fence]);
|
||||
kfree(fences);
|
||||
kfree(cf);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
||||
@ -2955,12 +2665,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
|
||||
fence = ops_execute(vm, vops);
|
||||
if (IS_ERR(fence)) {
|
||||
err = PTR_ERR(fence);
|
||||
/* FIXME: Killing VM rather than proper error handling */
|
||||
xe_vm_kill(vm, false);
|
||||
goto unlock;
|
||||
} else {
|
||||
vm_bind_ioctl_ops_fini(vm, vops, fence);
|
||||
}
|
||||
|
||||
vm_bind_ioctl_ops_fini(vm, vops, fence);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -3317,8 +3025,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
goto unwind_ops;
|
||||
}
|
||||
|
||||
err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs,
|
||||
&vops, i == args->num_binds - 1);
|
||||
err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
|
||||
if (err)
|
||||
goto unwind_ops;
|
||||
}
|
||||
|
@ -259,6 +259,8 @@ static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
|
||||
return drm_gpuvm_resv(&vm->gpuvm);
|
||||
}
|
||||
|
||||
void xe_vm_kill(struct xe_vm *vm, bool unlocked);
|
||||
|
||||
/**
|
||||
* xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
|
||||
* @vm: The vm
|
||||
|
@ -26,14 +26,12 @@ struct xe_vm_pgtable_update_op;
|
||||
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
|
||||
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
|
||||
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
|
||||
#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
|
||||
#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
|
||||
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
|
||||
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
|
||||
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
|
||||
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
|
||||
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
|
||||
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
|
||||
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 3)
|
||||
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 4)
|
||||
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 5)
|
||||
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 6)
|
||||
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
|
||||
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
|
||||
|
||||
/** struct xe_userptr - User pointer */
|
||||
struct xe_userptr {
|
||||
@ -100,6 +98,9 @@ struct xe_vma {
|
||||
*/
|
||||
u8 tile_present;
|
||||
|
||||
/** @tile_staged: bind is staged for this VMA */
|
||||
u8 tile_staged;
|
||||
|
||||
/**
|
||||
* @pat_index: The pat index to use when encoding the PTEs for this vma.
|
||||
*/
|
||||
@ -315,31 +316,18 @@ struct xe_vma_op_prefetch {
|
||||
|
||||
/** enum xe_vma_op_flags - flags for VMA operation */
|
||||
enum xe_vma_op_flags {
|
||||
/** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
|
||||
XE_VMA_OP_FIRST = BIT(0),
|
||||
/** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
|
||||
XE_VMA_OP_LAST = BIT(1),
|
||||
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
|
||||
XE_VMA_OP_COMMITTED = BIT(2),
|
||||
XE_VMA_OP_COMMITTED = BIT(0),
|
||||
/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
|
||||
XE_VMA_OP_PREV_COMMITTED = BIT(3),
|
||||
XE_VMA_OP_PREV_COMMITTED = BIT(1),
|
||||
/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
|
||||
XE_VMA_OP_NEXT_COMMITTED = BIT(4),
|
||||
XE_VMA_OP_NEXT_COMMITTED = BIT(2),
|
||||
};
|
||||
|
||||
/** struct xe_vma_op - VMA operation */
|
||||
struct xe_vma_op {
|
||||
/** @base: GPUVA base operation */
|
||||
struct drm_gpuva_op base;
|
||||
/** @q: exec queue for this operation */
|
||||
struct xe_exec_queue *q;
|
||||
/**
|
||||
* @syncs: syncs for this operation, only used on first and last
|
||||
* operation
|
||||
*/
|
||||
struct xe_sync_entry *syncs;
|
||||
/** @num_syncs: number of syncs */
|
||||
u32 num_syncs;
|
||||
/** @link: async operation link */
|
||||
struct list_head link;
|
||||
/** @flags: operation flags */
|
||||
@ -363,19 +351,14 @@ struct xe_vma_ops {
|
||||
struct list_head list;
|
||||
/** @vm: VM */
|
||||
struct xe_vm *vm;
|
||||
/** @q: exec queue these operations */
|
||||
/** @q: exec queue for VMA operations */
|
||||
struct xe_exec_queue *q;
|
||||
/** @syncs: syncs these operation */
|
||||
struct xe_sync_entry *syncs;
|
||||
/** @num_syncs: number of syncs */
|
||||
u32 num_syncs;
|
||||
/** @pt_update_ops: page table update operations */
|
||||
struct {
|
||||
/** @ops: operations */
|
||||
struct xe_vm_pgtable_update_op *ops;
|
||||
/** @num_ops: number of operations */
|
||||
u32 num_ops;
|
||||
} pt_update_ops[XE_MAX_TILES_PER_DEVICE];
|
||||
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
|
||||
};
|
||||
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user