drm/amdgpu/userq: call dma_resv_wait_timeout without test for signalled

In function amdgpu_userq_gem_va_unmap_validate call
dma_resv_wait_timeout directly. Also since we are waiting
forever we should not be having any return value and hence
no handling needed.

Suggested-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Sunil Khatri
2026-03-26 13:22:20 +05:30
committed by Alex Deucher
parent 4c86e12ab1
commit 38476bde59
3 changed files with 11 additions and 23 deletions

View File

@@ -1462,17 +1462,16 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
return ret;
}
int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t saddr)
void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t saddr)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_bo_va *bo_va = mapping->bo_va;
struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
int ret = 0;
if (!ip_mask)
return 0;
return;
dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
/**
@@ -1483,14 +1482,8 @@ int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
* unmap is only for one kind of userq VAs, so at this point suppose
* the eviction fence is always unsignaled.
*/
if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
MAX_SCHEDULE_TIMEOUT);
if (ret <= 0)
return -EBUSY;
}
return 0;
dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
}
void amdgpu_userq_pre_reset(struct amdgpu_device *adev)

View File

@@ -160,7 +160,7 @@ void amdgpu_userq_start_hang_detect_work(struct amdgpu_usermode_queue *queue);
int amdgpu_userq_input_va_validate(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *queue,
u64 addr, u64 expected_size);
int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t saddr);
void amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
uint64_t saddr);
#endif

View File

@@ -1978,7 +1978,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -2003,12 +2002,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* during user requests GEM unmap IOCTL except for forcing the unmap
* from user space.
*/
if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
if (unlikely(r == -EBUSY))
dev_warn_once(adev->dev,
"Attempt to unmap an active userq buffer\n");
}
if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0))
amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);