mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
drm/amdkfd: reserve the BO before validating it
Fix a warning. v2: Avoid unmapping attachment repeatedly when ERESTARTSYS. v3: Lock the BO before accessing ttm->sg to avoid race conditions.(Felix) [ 41.708711] WARNING: CPU: 0 PID: 1463 at drivers/gpu/drm/ttm/ttm_bo.c:846 ttm_bo_validate+0x146/0x1b0 [ttm] [ 41.708989] Call Trace: [ 41.708992] <TASK> [ 41.708996] ? show_regs+0x6c/0x80 [ 41.709000] ? ttm_bo_validate+0x146/0x1b0 [ttm] [ 41.709008] ? __warn+0x93/0x190 [ 41.709014] ? ttm_bo_validate+0x146/0x1b0 [ttm] [ 41.709024] ? report_bug+0x1f9/0x210 [ 41.709035] ? handle_bug+0x46/0x80 [ 41.709041] ? exc_invalid_op+0x1d/0x80 [ 41.709048] ? asm_exc_invalid_op+0x1f/0x30 [ 41.709057] ? amdgpu_amdkfd_gpuvm_dmaunmap_mem+0x2c/0x80 [amdgpu] [ 41.709185] ? ttm_bo_validate+0x146/0x1b0 [ttm] [ 41.709197] ? amdgpu_amdkfd_gpuvm_dmaunmap_mem+0x2c/0x80 [amdgpu] [ 41.709337] ? srso_alias_return_thunk+0x5/0x7f [ 41.709346] kfd_mem_dmaunmap_attachment+0x9e/0x1e0 [amdgpu] [ 41.709467] amdgpu_amdkfd_gpuvm_dmaunmap_mem+0x56/0x80 [amdgpu] [ 41.709586] kfd_ioctl_unmap_memory_from_gpu+0x1b7/0x300 [amdgpu] [ 41.709710] kfd_ioctl+0x1ec/0x650 [amdgpu] [ 41.709822] ? __pfx_kfd_ioctl_unmap_memory_from_gpu+0x10/0x10 [amdgpu] [ 41.709945] ? srso_alias_return_thunk+0x5/0x7f [ 41.709949] ? tomoyo_file_ioctl+0x20/0x30 [ 41.709959] __x64_sys_ioctl+0x9c/0xd0 [ 41.709967] do_syscall_64+0x3f/0x90 [ 41.709973] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 Fixes: 101b8104307e ("drm/amdkfd: Move dma unmapping after TLB flush") Signed-off-by: Lang Yu <Lang.Yu@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
16da399091
commit
9c29282ecb
@ -303,7 +303,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
|
|||||||
struct kgd_mem *mem, void *drm_priv);
|
struct kgd_mem *mem, void *drm_priv);
|
||||||
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
||||||
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
|
struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
|
||||||
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
|
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
|
||||||
int amdgpu_amdkfd_gpuvm_sync_memory(
|
int amdgpu_amdkfd_gpuvm_sync_memory(
|
||||||
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
|
struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
|
||||||
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
|
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
|
||||||
|
@ -2085,21 +2085,35 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
|
int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
|
||||||
{
|
{
|
||||||
struct kfd_mem_attachment *entry;
|
struct kfd_mem_attachment *entry;
|
||||||
struct amdgpu_vm *vm;
|
struct amdgpu_vm *vm;
|
||||||
|
int ret;
|
||||||
|
|
||||||
vm = drm_priv_to_vm(drm_priv);
|
vm = drm_priv_to_vm(drm_priv);
|
||||||
|
|
||||||
mutex_lock(&mem->lock);
|
mutex_lock(&mem->lock);
|
||||||
|
|
||||||
|
ret = amdgpu_bo_reserve(mem->bo, true);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
list_for_each_entry(entry, &mem->attachments, list) {
|
list_for_each_entry(entry, &mem->attachments, list) {
|
||||||
if (entry->bo_va->base.vm == vm)
|
if (entry->bo_va->base.vm != vm)
|
||||||
|
continue;
|
||||||
|
if (entry->bo_va->base.bo->tbo.ttm &&
|
||||||
|
!entry->bo_va->base.bo->tbo.ttm->sg)
|
||||||
|
continue;
|
||||||
|
|
||||||
kfd_mem_dmaunmap_attachment(mem, entry);
|
kfd_mem_dmaunmap_attachment(mem, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
amdgpu_bo_unreserve(mem->bo);
|
||||||
|
out:
|
||||||
mutex_unlock(&mem->lock);
|
mutex_unlock(&mem->lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
|
||||||
|
@ -1442,7 +1442,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
|
|||||||
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
|
kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
|
||||||
|
|
||||||
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
|
/* Remove dma mapping after tlb flush to avoid IO_PAGE_FAULT */
|
||||||
amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
|
err = amdgpu_amdkfd_gpuvm_dmaunmap_mem(mem, peer_pdd->drm_priv);
|
||||||
|
if (err)
|
||||||
|
goto sync_memory_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&p->mutex);
|
mutex_unlock(&p->mutex);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user