mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-18 22:34:48 +00:00
drm/radeon: don't add the IB pool to all VMs v2
We want to use VMs without the IB pool in the future. v2: also remove it from radeon_vm_finish. Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
90a51a3292
commit
d72d43cfc5
@ -1848,7 +1848,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
|
||||
*/
|
||||
int radeon_vm_manager_init(struct radeon_device *rdev);
|
||||
void radeon_vm_manager_fini(struct radeon_device *rdev);
|
||||
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
|
||||
|
@ -602,7 +602,6 @@ int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
* @vm: vm to bind
|
||||
*
|
||||
* Allocate a page table for the requested vm (cayman+).
|
||||
* Also starts to populate the page table.
|
||||
* Returns 0 for success, error for failure.
|
||||
*
|
||||
* Global and local mutex must be locked!
|
||||
@ -655,8 +654,7 @@ retry:
|
||||
}
|
||||
|
||||
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
|
||||
return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
|
||||
&rdev->ring_tmp_bo.bo->tbo.mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1241,30 +1239,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
|
||||
* @rdev: radeon_device pointer
|
||||
* @vm: requested vm
|
||||
*
|
||||
* Init @vm (cayman+).
|
||||
* Map the IB pool and any other shared objects into the VM
|
||||
* by default as it's used by all VMs.
|
||||
* Returns 0 for success, error for failure.
|
||||
* Init @vm fields (cayman+).
|
||||
*/
|
||||
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
vm->id = 0;
|
||||
vm->fence = NULL;
|
||||
mutex_init(&vm->mutex);
|
||||
INIT_LIST_HEAD(&vm->list);
|
||||
INIT_LIST_HEAD(&vm->va);
|
||||
|
||||
/* map the ib pool buffer at 0 in virtual address space, set
|
||||
* read only
|
||||
*/
|
||||
bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo);
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1286,17 +1269,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
radeon_vm_free_pt(rdev, vm);
|
||||
mutex_unlock(&rdev->vm_manager.lock);
|
||||
|
||||
/* remove all bo at this point non are busy any more because unbind
|
||||
* waited for the last vm fence to signal
|
||||
*/
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (!r) {
|
||||
bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo);
|
||||
list_del_init(&bo_va->bo_list);
|
||||
list_del_init(&bo_va->vm_list);
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
kfree(bo_va);
|
||||
}
|
||||
if (!list_empty(&vm->va)) {
|
||||
dev_err(rdev->dev, "still active bo inside vm\n");
|
||||
}
|
||||
|
@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
/* new gpu have virtual address space support */
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
struct radeon_fpriv *fpriv;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
|
||||
@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = radeon_vm_init(rdev, &fpriv->vm);
|
||||
radeon_vm_init(rdev, &fpriv->vm);
|
||||
|
||||
/* map the ib pool buffer read only into
|
||||
* virtual address space */
|
||||
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
|
||||
RADEON_VM_PAGE_READABLE |
|
||||
RADEON_VM_PAGE_SNOOPED);
|
||||
if (r) {
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
|
||||
/* new gpu have virtual address space support */
|
||||
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
|
||||
struct radeon_fpriv *fpriv = file_priv->driver_priv;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
|
||||
if (!r) {
|
||||
bo_va = radeon_vm_bo_find(&fpriv->vm,
|
||||
rdev->ring_tmp_bo.bo);
|
||||
if (bo_va)
|
||||
radeon_vm_bo_rmv(rdev, bo_va);
|
||||
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
|
||||
}
|
||||
|
||||
radeon_vm_fini(rdev, &fpriv->vm);
|
||||
kfree(fpriv);
|
||||
|
Loading…
x
Reference in New Issue
Block a user