mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-18 22:34:48 +00:00
drm/ttm: Make LRU removal optional v2
We are already doing this for DMA-buf imports and also for amdgpu VM BOs for quite a while now. If this doesn't run into any problems we are probably going to stop removing BOs from the LRU altogether. v2: drop BUG_ON from ttm_bo_add_to_lru Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <david1.zhou@amd.com> Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
526c654a8a
commit
6e58ab7ac7
@ -585,7 +585,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
|
|||||||
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
|
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
|
||||||
|
|
||||||
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
|
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
|
||||||
false, &ctx->duplicates);
|
false, &ctx->duplicates, true);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ctx->reserved = true;
|
ctx->reserved = true;
|
||||||
else {
|
else {
|
||||||
@ -658,7 +658,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
|
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
|
||||||
false, &ctx->duplicates);
|
false, &ctx->duplicates, true);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ctx->reserved = true;
|
ctx->reserved = true;
|
||||||
else
|
else
|
||||||
@ -1808,7 +1808,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Reserve all BOs and page tables for validation */
|
/* Reserve all BOs and page tables for validation */
|
||||||
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
|
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
|
||||||
|
true);
|
||||||
WARN(!list_empty(&duplicates), "Duplicates should be empty");
|
WARN(!list_empty(&duplicates), "Duplicates should be empty");
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
@ -2014,7 +2015,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
|
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
|
||||||
false, &duplicate_save);
|
false, &duplicate_save, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
|
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
|
||||||
goto ttm_reserve_fail;
|
goto ttm_reserve_fail;
|
||||||
|
@ -648,7 +648,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
}
|
}
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
|
||||||
&duplicates);
|
&duplicates, true);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
if (r != -ERESTARTSYS)
|
if (r != -ERESTARTSYS)
|
||||||
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
|
||||||
|
@ -79,7 +79,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||||||
list_add(&csa_tv.head, &list);
|
list_add(&csa_tv.head, &list);
|
||||||
amdgpu_vm_get_pd_bo(vm, &list, &pd);
|
amdgpu_vm_get_pd_bo(vm, &list, &pd);
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
|
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
|
||||||
return r;
|
return r;
|
||||||
|
@ -171,7 +171,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||||||
|
|
||||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
|
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "leaking bo va because "
|
dev_err(adev->dev, "leaking bo va because "
|
||||||
"we fail to reserve bo (%d)\n", r);
|
"we fail to reserve bo (%d)\n", r);
|
||||||
@ -608,7 +608,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_unref;
|
goto error_unref;
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
|
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
|
||||||
!no_intr, NULL);
|
!no_intr, NULL, true);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -559,7 +559,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||||||
if (!vm_bos)
|
if (!vm_bos)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_free;
|
goto error_free;
|
||||||
|
|
||||||
|
@ -539,7 +539,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
|||||||
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
|
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&duplicates);
|
INIT_LIST_HEAD(&duplicates);
|
||||||
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
|
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
|
||||||
if (unlikely(r != 0)) {
|
if (unlikely(r != 0)) {
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -173,19 +173,20 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
|
|||||||
|
|
||||||
reservation_object_assert_held(bo->resv);
|
reservation_object_assert_held(bo->resv);
|
||||||
|
|
||||||
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
if (!list_empty(&bo->lru))
|
||||||
BUG_ON(!list_empty(&bo->lru));
|
return;
|
||||||
|
|
||||||
man = &bdev->man[bo->mem.mem_type];
|
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
|
||||||
list_add_tail(&bo->lru, &man->lru[bo->priority]);
|
return;
|
||||||
|
|
||||||
|
man = &bdev->man[bo->mem.mem_type];
|
||||||
|
list_add_tail(&bo->lru, &man->lru[bo->priority]);
|
||||||
|
kref_get(&bo->list_kref);
|
||||||
|
|
||||||
|
if (bo->ttm && !(bo->ttm->page_flags &
|
||||||
|
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
|
||||||
|
list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
|
||||||
kref_get(&bo->list_kref);
|
kref_get(&bo->list_kref);
|
||||||
|
|
||||||
if (bo->ttm && !(bo->ttm->page_flags &
|
|
||||||
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
|
|
||||||
list_add_tail(&bo->swap,
|
|
||||||
&bdev->glob->swap_lru[bo->priority]);
|
|
||||||
kref_get(&bo->list_kref);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_bo_add_to_lru);
|
EXPORT_SYMBOL(ttm_bo_add_to_lru);
|
||||||
|
@ -69,7 +69,8 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||||||
list_for_each_entry(entry, list, head) {
|
list_for_each_entry(entry, list, head) {
|
||||||
struct ttm_buffer_object *bo = entry->bo;
|
struct ttm_buffer_object *bo = entry->bo;
|
||||||
|
|
||||||
ttm_bo_add_to_lru(bo);
|
if (list_empty(&bo->lru))
|
||||||
|
ttm_bo_add_to_lru(bo);
|
||||||
reservation_object_unlock(bo->resv);
|
reservation_object_unlock(bo->resv);
|
||||||
}
|
}
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
@ -93,7 +94,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
|||||||
|
|
||||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||||
struct list_head *list, bool intr,
|
struct list_head *list, bool intr,
|
||||||
struct list_head *dups)
|
struct list_head *dups, bool del_lru)
|
||||||
{
|
{
|
||||||
struct ttm_bo_global *glob;
|
struct ttm_bo_global *glob;
|
||||||
struct ttm_validate_buffer *entry;
|
struct ttm_validate_buffer *entry;
|
||||||
@ -172,11 +173,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
|||||||
list_add(&entry->head, list);
|
list_add(&entry->head, list);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ticket)
|
if (del_lru) {
|
||||||
ww_acquire_done(ticket);
|
spin_lock(&glob->lru_lock);
|
||||||
spin_lock(&glob->lru_lock);
|
ttm_eu_del_from_lru_locked(list);
|
||||||
ttm_eu_del_from_lru_locked(list);
|
spin_unlock(&glob->lru_lock);
|
||||||
spin_unlock(&glob->lru_lock);
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
|
||||||
@ -203,7 +204,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
|
|||||||
reservation_object_add_shared_fence(bo->resv, fence);
|
reservation_object_add_shared_fence(bo->resv, fence);
|
||||||
else
|
else
|
||||||
reservation_object_add_excl_fence(bo->resv, fence);
|
reservation_object_add_excl_fence(bo->resv, fence);
|
||||||
ttm_bo_add_to_lru(bo);
|
if (list_empty(&bo->lru))
|
||||||
|
ttm_bo_add_to_lru(bo);
|
||||||
|
else
|
||||||
|
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||||
reservation_object_unlock(bo->resv);
|
reservation_object_unlock(bo->resv);
|
||||||
}
|
}
|
||||||
spin_unlock(&glob->lru_lock);
|
spin_unlock(&glob->lru_lock);
|
||||||
|
@ -63,7 +63,7 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
|
|||||||
struct virtio_gpu_object *qobj;
|
struct virtio_gpu_object *qobj;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
|
ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -464,7 +464,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
|
|||||||
val_buf->bo = &res->backup->base;
|
val_buf->bo = &res->backup->base;
|
||||||
val_buf->num_shared = 0;
|
val_buf->num_shared = 0;
|
||||||
list_add_tail(&val_buf->head, &val_list);
|
list_add_tail(&val_buf->head, &val_list);
|
||||||
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
|
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
|
||||||
|
true);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_no_reserve;
|
goto out_no_reserve;
|
||||||
|
|
||||||
|
@ -169,7 +169,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
|
|||||||
bool intr)
|
bool intr)
|
||||||
{
|
{
|
||||||
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
|
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
|
||||||
NULL);
|
NULL, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -769,7 +769,10 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
|
|||||||
{
|
{
|
||||||
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
|
||||||
spin_lock(&bo->bdev->glob->lru_lock);
|
spin_lock(&bo->bdev->glob->lru_lock);
|
||||||
ttm_bo_add_to_lru(bo);
|
if (list_empty(&bo->lru))
|
||||||
|
ttm_bo_add_to_lru(bo);
|
||||||
|
else
|
||||||
|
ttm_bo_move_to_lru_tail(bo, NULL);
|
||||||
spin_unlock(&bo->bdev->glob->lru_lock);
|
spin_unlock(&bo->bdev->glob->lru_lock);
|
||||||
}
|
}
|
||||||
reservation_object_unlock(bo->resv);
|
reservation_object_unlock(bo->resv);
|
||||||
|
@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||||||
* @list: thread private list of ttm_validate_buffer structs.
|
* @list: thread private list of ttm_validate_buffer structs.
|
||||||
* @intr: should the wait be interruptible
|
* @intr: should the wait be interruptible
|
||||||
* @dups: [out] optional list of duplicates.
|
* @dups: [out] optional list of duplicates.
|
||||||
|
* @del_lru: true if BOs should be removed from the LRU.
|
||||||
*
|
*
|
||||||
* Tries to reserve bos pointed to by the list entries for validation.
|
* Tries to reserve bos pointed to by the list entries for validation.
|
||||||
* If the function returns 0, all buffers are marked as "unfenced",
|
* If the function returns 0, all buffers are marked as "unfenced",
|
||||||
@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||||||
|
|
||||||
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||||
struct list_head *list, bool intr,
|
struct list_head *list, bool intr,
|
||||||
struct list_head *dups);
|
struct list_head *dups, bool del_lru);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* function ttm_eu_fence_buffer_objects.
|
* function ttm_eu_fence_buffer_objects.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user