mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-16 05:26:07 +00:00
drm/xe/vm: Subclass userptr vmas
The construct allocating only parts of the vma structure when the userptr part is not needed is very fragile. A developer could add additional fields below the userptr part, and the code could easily attempt to access the userptr part even if its not persent. So introduce xe_userptr_vma which subclasses struct xe_vma the proper way, and accordingly modify a couple of interfaces. This should also help if adding userptr helpers to drm_gpuvm. v2: - Fix documentation of to_userptr_vma() (Matthew Brost) - Fix allocation and freeing of vmas to clearer distinguish between the types. Closes: https://lore.kernel.org/intel-xe/0c4cc1a7-f409-4597-b110-81f9e45d1ffe@embeddedor.com/T/#u Fixes: a4cc60a55fd9 ("drm/xe: Only alloc userptr part of xe_vma for userptrs") Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Matthew Brost <matthew.brost@intel.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240131091628.12318-1-thomas.hellstrom@linux.intel.com (cherry picked from commit 5bd24e78829ad569fa1c3ce9a05b59bb97b91f3d) Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
This commit is contained in:
parent
89642db3b2
commit
ed2bdf3b26
@ -165,7 +165,8 @@ retry_userptr:
|
||||
goto unlock_vm;
|
||||
}
|
||||
|
||||
if (!xe_vma_is_userptr(vma) || !xe_vma_userptr_check_repin(vma)) {
|
||||
if (!xe_vma_is_userptr(vma) ||
|
||||
!xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
|
||||
downgrade_write(&vm->lock);
|
||||
write_locked = false;
|
||||
}
|
||||
@ -181,11 +182,13 @@ retry_userptr:
|
||||
/* TODO: Validate fault */
|
||||
|
||||
if (xe_vma_is_userptr(vma) && write_locked) {
|
||||
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
|
||||
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_del_init(&vma->userptr.invalidate_link);
|
||||
list_del_init(&uvma->userptr.invalidate_link);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
|
||||
ret = xe_vma_userptr_pin_pages(vma);
|
||||
ret = xe_vma_userptr_pin_pages(uvma);
|
||||
if (ret)
|
||||
goto unlock_vm;
|
||||
|
||||
@ -220,7 +223,7 @@ retry_userptr:
|
||||
dma_fence_put(fence);
|
||||
|
||||
if (xe_vma_is_userptr(vma))
|
||||
ret = xe_vma_userptr_check_repin(vma);
|
||||
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
|
||||
vma->usm.tile_invalidated &= ~BIT(tile->id);
|
||||
|
||||
unlock_dma_resv:
|
||||
|
@ -618,8 +618,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
|
||||
|
||||
if (!xe_vma_is_null(vma)) {
|
||||
if (xe_vma_is_userptr(vma))
|
||||
xe_res_first_sg(vma->userptr.sg, 0, xe_vma_size(vma),
|
||||
&curs);
|
||||
xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
|
||||
xe_vma_size(vma), &curs);
|
||||
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
|
||||
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
|
||||
xe_vma_size(vma), &curs);
|
||||
@ -906,17 +906,17 @@ static void xe_vm_dbg_print_entries(struct xe_device *xe,
|
||||
|
||||
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
|
||||
|
||||
static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
|
||||
static int xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
u32 divisor = vma->userptr.divisor ? vma->userptr.divisor : 2;
|
||||
u32 divisor = uvma->userptr.divisor ? uvma->userptr.divisor : 2;
|
||||
static u32 count;
|
||||
|
||||
if (count++ % divisor == divisor - 1) {
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
|
||||
|
||||
vma->userptr.divisor = divisor << 1;
|
||||
uvma->userptr.divisor = divisor << 1;
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_move_tail(&vma->userptr.invalidate_link,
|
||||
list_move_tail(&uvma->userptr.invalidate_link,
|
||||
&vm->userptr.invalidated);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
return true;
|
||||
@ -927,7 +927,7 @@ static int xe_pt_userptr_inject_eagain(struct xe_vma *vma)
|
||||
|
||||
#else
|
||||
|
||||
static bool xe_pt_userptr_inject_eagain(struct xe_vma *vma)
|
||||
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -1000,9 +1000,9 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
{
|
||||
struct xe_pt_migrate_pt_update *userptr_update =
|
||||
container_of(pt_update, typeof(*userptr_update), base);
|
||||
struct xe_vma *vma = pt_update->vma;
|
||||
unsigned long notifier_seq = vma->userptr.notifier_seq;
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct xe_userptr_vma *uvma = to_userptr_vma(pt_update->vma);
|
||||
unsigned long notifier_seq = uvma->userptr.notifier_seq;
|
||||
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
|
||||
int err = xe_pt_vm_dependencies(pt_update->job,
|
||||
&vm->rftree[pt_update->tile_id],
|
||||
pt_update->start,
|
||||
@ -1023,7 +1023,7 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
*/
|
||||
do {
|
||||
down_read(&vm->userptr.notifier_lock);
|
||||
if (!mmu_interval_read_retry(&vma->userptr.notifier,
|
||||
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
|
||||
notifier_seq))
|
||||
break;
|
||||
|
||||
@ -1032,11 +1032,11 @@ static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
|
||||
if (userptr_update->bind)
|
||||
return -EAGAIN;
|
||||
|
||||
notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
|
||||
notifier_seq = mmu_interval_read_begin(&uvma->userptr.notifier);
|
||||
} while (true);
|
||||
|
||||
/* Inject errors to test_whether they are handled correctly */
|
||||
if (userptr_update->bind && xe_pt_userptr_inject_eagain(vma)) {
|
||||
if (userptr_update->bind && xe_pt_userptr_inject_eagain(uvma)) {
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -1297,7 +1297,7 @@ __xe_pt_bind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queue
|
||||
vma->tile_present |= BIT(tile->id);
|
||||
|
||||
if (bind_pt_update.locked) {
|
||||
vma->userptr.initial_bind = true;
|
||||
to_userptr_vma(vma)->userptr.initial_bind = true;
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
xe_bo_put_commit(&deferred);
|
||||
}
|
||||
@ -1642,7 +1642,7 @@ __xe_pt_unbind_vma(struct xe_tile *tile, struct xe_vma *vma, struct xe_exec_queu
|
||||
|
||||
if (!vma->tile_present) {
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_del_init(&vma->userptr.invalidate_link);
|
||||
list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
}
|
||||
up_read(&vm->userptr.notifier_lock);
|
||||
|
@ -46,7 +46,7 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
|
||||
|
||||
/**
|
||||
* xe_vma_userptr_check_repin() - Advisory check for repin needed
|
||||
* @vma: The userptr vma
|
||||
* @uvma: The userptr vma
|
||||
*
|
||||
* Check if the userptr vma has been invalidated since last successful
|
||||
* repin. The check is advisory only and can the function can be called
|
||||
@ -56,15 +56,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
|
||||
*
|
||||
* Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
|
||||
*/
|
||||
int xe_vma_userptr_check_repin(struct xe_vma *vma)
|
||||
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
return mmu_interval_check_retry(&vma->userptr.notifier,
|
||||
vma->userptr.notifier_seq) ?
|
||||
return mmu_interval_check_retry(&uvma->userptr.notifier,
|
||||
uvma->userptr.notifier_seq) ?
|
||||
-EAGAIN : 0;
|
||||
}
|
||||
|
||||
int xe_vma_userptr_pin_pages(struct xe_vma *vma)
|
||||
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
|
||||
{
|
||||
struct xe_userptr *userptr = &uvma->userptr;
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct xe_device *xe = vm->xe;
|
||||
const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT;
|
||||
@ -80,30 +82,30 @@ retry:
|
||||
if (vma->gpuva.flags & XE_VMA_DESTROYED)
|
||||
return 0;
|
||||
|
||||
notifier_seq = mmu_interval_read_begin(&vma->userptr.notifier);
|
||||
if (notifier_seq == vma->userptr.notifier_seq)
|
||||
notifier_seq = mmu_interval_read_begin(&userptr->notifier);
|
||||
if (notifier_seq == userptr->notifier_seq)
|
||||
return 0;
|
||||
|
||||
pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
if (vma->userptr.sg) {
|
||||
if (userptr->sg) {
|
||||
dma_unmap_sgtable(xe->drm.dev,
|
||||
vma->userptr.sg,
|
||||
userptr->sg,
|
||||
read_only ? DMA_TO_DEVICE :
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
sg_free_table(vma->userptr.sg);
|
||||
vma->userptr.sg = NULL;
|
||||
sg_free_table(userptr->sg);
|
||||
userptr->sg = NULL;
|
||||
}
|
||||
|
||||
pinned = ret = 0;
|
||||
if (in_kthread) {
|
||||
if (!mmget_not_zero(vma->userptr.notifier.mm)) {
|
||||
if (!mmget_not_zero(userptr->notifier.mm)) {
|
||||
ret = -EFAULT;
|
||||
goto mm_closed;
|
||||
}
|
||||
kthread_use_mm(vma->userptr.notifier.mm);
|
||||
kthread_use_mm(userptr->notifier.mm);
|
||||
}
|
||||
|
||||
while (pinned < num_pages) {
|
||||
@ -123,32 +125,32 @@ retry:
|
||||
}
|
||||
|
||||
if (in_kthread) {
|
||||
kthread_unuse_mm(vma->userptr.notifier.mm);
|
||||
mmput(vma->userptr.notifier.mm);
|
||||
kthread_unuse_mm(userptr->notifier.mm);
|
||||
mmput(userptr->notifier.mm);
|
||||
}
|
||||
mm_closed:
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = sg_alloc_table_from_pages_segment(&vma->userptr.sgt, pages,
|
||||
ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages,
|
||||
pinned, 0,
|
||||
(u64)pinned << PAGE_SHIFT,
|
||||
xe_sg_segment_size(xe->drm.dev),
|
||||
GFP_KERNEL);
|
||||
if (ret) {
|
||||
vma->userptr.sg = NULL;
|
||||
userptr->sg = NULL;
|
||||
goto out;
|
||||
}
|
||||
vma->userptr.sg = &vma->userptr.sgt;
|
||||
userptr->sg = &userptr->sgt;
|
||||
|
||||
ret = dma_map_sgtable(xe->drm.dev, vma->userptr.sg,
|
||||
ret = dma_map_sgtable(xe->drm.dev, userptr->sg,
|
||||
read_only ? DMA_TO_DEVICE :
|
||||
DMA_BIDIRECTIONAL,
|
||||
DMA_ATTR_SKIP_CPU_SYNC |
|
||||
DMA_ATTR_NO_KERNEL_MAPPING);
|
||||
if (ret) {
|
||||
sg_free_table(vma->userptr.sg);
|
||||
vma->userptr.sg = NULL;
|
||||
sg_free_table(userptr->sg);
|
||||
userptr->sg = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -167,8 +169,8 @@ out:
|
||||
kvfree(pages);
|
||||
|
||||
if (!(ret < 0)) {
|
||||
vma->userptr.notifier_seq = notifier_seq;
|
||||
if (xe_vma_userptr_check_repin(vma) == -EAGAIN)
|
||||
userptr->notifier_seq = notifier_seq;
|
||||
if (xe_vma_userptr_check_repin(uvma) == -EAGAIN)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -635,7 +637,9 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct xe_vma *vma = container_of(mni, struct xe_vma, userptr.notifier);
|
||||
struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
|
||||
struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
|
||||
struct xe_vma *vma = &uvma->vma;
|
||||
struct xe_vm *vm = xe_vma_vm(vma);
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
@ -651,7 +655,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
/* No need to stop gpu access if the userptr is not yet bound. */
|
||||
if (!vma->userptr.initial_bind) {
|
||||
if (!userptr->initial_bind) {
|
||||
up_write(&vm->userptr.notifier_lock);
|
||||
return true;
|
||||
}
|
||||
@ -663,7 +667,7 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
|
||||
if (!xe_vm_in_fault_mode(vm) &&
|
||||
!(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_move_tail(&vma->userptr.invalidate_link,
|
||||
list_move_tail(&userptr->invalidate_link,
|
||||
&vm->userptr.invalidated);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
}
|
||||
@ -703,7 +707,7 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
|
||||
|
||||
int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
{
|
||||
struct xe_vma *vma, *next;
|
||||
struct xe_userptr_vma *uvma, *next;
|
||||
int err = 0;
|
||||
LIST_HEAD(tmp_evict);
|
||||
|
||||
@ -711,22 +715,23 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
|
||||
|
||||
/* Collect invalidated userptrs */
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_for_each_entry_safe(vma, next, &vm->userptr.invalidated,
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
|
||||
userptr.invalidate_link) {
|
||||
list_del_init(&vma->userptr.invalidate_link);
|
||||
list_move_tail(&vma->combined_links.userptr,
|
||||
list_del_init(&uvma->userptr.invalidate_link);
|
||||
list_move_tail(&uvma->userptr.repin_link,
|
||||
&vm->userptr.repin_list);
|
||||
}
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
|
||||
/* Pin and move to temporary list */
|
||||
list_for_each_entry_safe(vma, next, &vm->userptr.repin_list,
|
||||
combined_links.userptr) {
|
||||
err = xe_vma_userptr_pin_pages(vma);
|
||||
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
|
||||
userptr.repin_link) {
|
||||
err = xe_vma_userptr_pin_pages(uvma);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
list_move_tail(&vma->combined_links.userptr, &vm->rebind_list);
|
||||
list_del_init(&uvma->userptr.repin_link);
|
||||
list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -782,6 +787,14 @@ struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker)
|
||||
return fence;
|
||||
}
|
||||
|
||||
static void xe_vma_free(struct xe_vma *vma)
|
||||
{
|
||||
if (xe_vma_is_userptr(vma))
|
||||
kfree(to_userptr_vma(vma));
|
||||
else
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
|
||||
#define VMA_CREATE_FLAG_IS_NULL BIT(1)
|
||||
|
||||
@ -800,14 +813,26 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||
xe_assert(vm->xe, start < end);
|
||||
xe_assert(vm->xe, end < vm->size);
|
||||
|
||||
if (!bo && !is_null) /* userptr */
|
||||
/*
|
||||
* Allocate and ensure that the xe_vma_is_userptr() return
|
||||
* matches what was allocated.
|
||||
*/
|
||||
if (!bo && !is_null) {
|
||||
struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL);
|
||||
|
||||
if (!uvma)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
vma = &uvma->vma;
|
||||
} else {
|
||||
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
|
||||
else
|
||||
vma = kzalloc(sizeof(*vma) - sizeof(struct xe_userptr),
|
||||
GFP_KERNEL);
|
||||
if (!vma) {
|
||||
vma = ERR_PTR(-ENOMEM);
|
||||
return vma;
|
||||
if (!vma)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (is_null)
|
||||
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
|
||||
if (bo)
|
||||
vma->gpuva.gem.obj = &bo->ttm.base;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&vma->combined_links.rebind);
|
||||
@ -818,8 +843,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||
vma->gpuva.va.range = end - start + 1;
|
||||
if (read_only)
|
||||
vma->gpuva.flags |= XE_VMA_READ_ONLY;
|
||||
if (is_null)
|
||||
vma->gpuva.flags |= DRM_GPUVA_SPARSE;
|
||||
|
||||
for_each_tile(tile, vm->xe, id)
|
||||
vma->tile_mask |= 0x1 << id;
|
||||
@ -836,35 +859,35 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
|
||||
|
||||
vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base);
|
||||
if (IS_ERR(vm_bo)) {
|
||||
kfree(vma);
|
||||
xe_vma_free(vma);
|
||||
return ERR_CAST(vm_bo);
|
||||
}
|
||||
|
||||
drm_gpuvm_bo_extobj_add(vm_bo);
|
||||
drm_gem_object_get(&bo->ttm.base);
|
||||
vma->gpuva.gem.obj = &bo->ttm.base;
|
||||
vma->gpuva.gem.offset = bo_offset_or_userptr;
|
||||
drm_gpuva_link(&vma->gpuva, vm_bo);
|
||||
drm_gpuvm_bo_put(vm_bo);
|
||||
} else /* userptr or null */ {
|
||||
if (!is_null) {
|
||||
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
|
||||
u64 size = end - start + 1;
|
||||
int err;
|
||||
|
||||
INIT_LIST_HEAD(&vma->userptr.invalidate_link);
|
||||
INIT_LIST_HEAD(&userptr->invalidate_link);
|
||||
INIT_LIST_HEAD(&userptr->repin_link);
|
||||
vma->gpuva.gem.offset = bo_offset_or_userptr;
|
||||
|
||||
err = mmu_interval_notifier_insert(&vma->userptr.notifier,
|
||||
err = mmu_interval_notifier_insert(&userptr->notifier,
|
||||
current->mm,
|
||||
xe_vma_userptr(vma), size,
|
||||
&vma_userptr_notifier_ops);
|
||||
if (err) {
|
||||
kfree(vma);
|
||||
vma = ERR_PTR(err);
|
||||
return vma;
|
||||
xe_vma_free(vma);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
vma->userptr.notifier_seq = LONG_MAX;
|
||||
userptr->notifier_seq = LONG_MAX;
|
||||
}
|
||||
|
||||
xe_vm_get(vm);
|
||||
@ -880,13 +903,15 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
||||
bool read_only = xe_vma_read_only(vma);
|
||||
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
if (vma->userptr.sg) {
|
||||
struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
|
||||
|
||||
if (userptr->sg) {
|
||||
dma_unmap_sgtable(xe->drm.dev,
|
||||
vma->userptr.sg,
|
||||
userptr->sg,
|
||||
read_only ? DMA_TO_DEVICE :
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
sg_free_table(vma->userptr.sg);
|
||||
vma->userptr.sg = NULL;
|
||||
sg_free_table(userptr->sg);
|
||||
userptr->sg = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -894,7 +919,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
||||
* the notifer until we're sure the GPU is not accessing
|
||||
* them anymore
|
||||
*/
|
||||
mmu_interval_notifier_remove(&vma->userptr.notifier);
|
||||
mmu_interval_notifier_remove(&userptr->notifier);
|
||||
xe_vm_put(vm);
|
||||
} else if (xe_vma_is_null(vma)) {
|
||||
xe_vm_put(vm);
|
||||
@ -902,7 +927,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
|
||||
xe_bo_put(xe_vma_bo(vma));
|
||||
}
|
||||
|
||||
kfree(vma);
|
||||
xe_vma_free(vma);
|
||||
}
|
||||
|
||||
static void vma_destroy_work_func(struct work_struct *w)
|
||||
@ -933,7 +958,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
|
||||
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
|
||||
|
||||
spin_lock(&vm->userptr.invalidated_lock);
|
||||
list_del(&vma->userptr.invalidate_link);
|
||||
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
|
||||
spin_unlock(&vm->userptr.invalidated_lock);
|
||||
} else if (!xe_vma_is_null(vma)) {
|
||||
xe_bo_assert_held(xe_vma_bo(vma));
|
||||
@ -2150,7 +2175,7 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
|
||||
drm_exec_fini(&exec);
|
||||
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
err = xe_vma_userptr_pin_pages(vma);
|
||||
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
|
||||
if (err) {
|
||||
prep_vma_destroy(vm, vma, false);
|
||||
xe_vma_destroy_unlocked(vma);
|
||||
@ -2507,7 +2532,7 @@ retry_userptr:
|
||||
|
||||
if (err == -EAGAIN && xe_vma_is_userptr(vma)) {
|
||||
lockdep_assert_held_write(&vm->lock);
|
||||
err = xe_vma_userptr_pin_pages(vma);
|
||||
err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
|
||||
if (!err)
|
||||
goto retry_userptr;
|
||||
|
||||
@ -3138,8 +3163,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
|
||||
if (xe_vma_is_userptr(vma)) {
|
||||
WARN_ON_ONCE(!mmu_interval_check_retry
|
||||
(&vma->userptr.notifier,
|
||||
vma->userptr.notifier_seq));
|
||||
(&to_userptr_vma(vma)->userptr.notifier,
|
||||
to_userptr_vma(vma)->userptr.notifier_seq));
|
||||
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
|
||||
DMA_RESV_USAGE_BOOKKEEP));
|
||||
|
||||
@ -3200,11 +3225,11 @@ int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id)
|
||||
if (is_null) {
|
||||
addr = 0;
|
||||
} else if (is_userptr) {
|
||||
struct sg_table *sg = to_userptr_vma(vma)->userptr.sg;
|
||||
struct xe_res_cursor cur;
|
||||
|
||||
if (vma->userptr.sg) {
|
||||
xe_res_first_sg(vma->userptr.sg, 0, XE_PAGE_SIZE,
|
||||
&cur);
|
||||
if (sg) {
|
||||
xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur);
|
||||
addr = xe_res_dma(&cur);
|
||||
} else {
|
||||
addr = 0;
|
||||
|
@ -160,6 +160,18 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
|
||||
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
|
||||
}
|
||||
|
||||
/**
|
||||
* to_userptr_vma() - Return a pointer to an embedding userptr vma
|
||||
* @vma: Pointer to the embedded struct xe_vma
|
||||
*
|
||||
* Return: Pointer to the embedding userptr vma
|
||||
*/
|
||||
static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
|
||||
{
|
||||
xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
|
||||
return container_of(vma, struct xe_userptr_vma, vma);
|
||||
}
|
||||
|
||||
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
|
||||
|
||||
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
@ -224,9 +236,9 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
|
||||
}
|
||||
}
|
||||
|
||||
int xe_vma_userptr_pin_pages(struct xe_vma *vma);
|
||||
int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
|
||||
|
||||
int xe_vma_userptr_check_repin(struct xe_vma *vma);
|
||||
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
|
||||
|
||||
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
|
||||
|
||||
|
@ -37,6 +37,8 @@ struct xe_vm;
|
||||
struct xe_userptr {
|
||||
/** @invalidate_link: Link for the vm::userptr.invalidated list */
|
||||
struct list_head invalidate_link;
|
||||
/** @userptr: link into VM repin list if userptr. */
|
||||
struct list_head repin_link;
|
||||
/**
|
||||
* @notifier: MMU notifier for user pointer (invalidation call back)
|
||||
*/
|
||||
@ -68,8 +70,6 @@ struct xe_vma {
|
||||
* resv.
|
||||
*/
|
||||
union {
|
||||
/** @userptr: link into VM repin list if userptr. */
|
||||
struct list_head userptr;
|
||||
/** @rebind: link into VM if this VMA needs rebinding. */
|
||||
struct list_head rebind;
|
||||
/** @destroy: link to contested list when VM is being closed. */
|
||||
@ -105,11 +105,15 @@ struct xe_vma {
|
||||
* @pat_index: The pat index to use when encoding the PTEs for this vma.
|
||||
*/
|
||||
u16 pat_index;
|
||||
};
|
||||
|
||||
/**
|
||||
* @userptr: user pointer state, only allocated for VMAs that are
|
||||
* user pointers
|
||||
*/
|
||||
/**
|
||||
* struct xe_userptr_vma - A userptr vma subclass
|
||||
* @vma: The vma.
|
||||
* @userptr: Additional userptr information.
|
||||
*/
|
||||
struct xe_userptr_vma {
|
||||
struct xe_vma vma;
|
||||
struct xe_userptr userptr;
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user