mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-05-16 06:41:39 -04:00
drm/amdgpu/userq: caller to take reserv lock for vas_list_cleanup
In function amdgpu_userq_buffer_vas_list_cleanup, remove the reservation lock for vm and caller should make sure it's taken before locking userq_mutex. Signed-off-by: Sunil Khatri <sunil.khatri@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
committed by
Alex Deucher
parent
469e6fea09
commit
168178b0cb
@@ -312,25 +312,21 @@ static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_userq_va_cursor *va_cursor, *tmp;
|
||||
struct amdgpu_bo_va_mapping *mapping;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
|
||||
if (r)
|
||||
return r;
|
||||
/* Caller must hold vm->root.bo reservation */
|
||||
dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv);
|
||||
|
||||
list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
|
||||
mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
|
||||
if (!mapping) {
|
||||
r = -EINVAL;
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
|
||||
queue, va_cursor->gpu_addr);
|
||||
amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
|
||||
}
|
||||
err:
|
||||
amdgpu_bo_unreserve(queue->vm->root.bo);
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue)
|
||||
@@ -444,8 +440,6 @@ static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue)
|
||||
/* Wait for mode-1 reset to complete */
|
||||
down_read(&adev->reset_domain->sem);
|
||||
|
||||
/* Drop the userq reference. */
|
||||
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
|
||||
uq_funcs->mqd_destroy(queue);
|
||||
/* Use interrupt-safe locking since IRQ handlers may access these XArrays */
|
||||
xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
|
||||
@@ -626,6 +620,9 @@ static int
|
||||
amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue)
|
||||
{
|
||||
struct amdgpu_device *adev = uq_mgr->adev;
|
||||
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
|
||||
struct amdgpu_vm *vm = &fpriv->vm;
|
||||
|
||||
int r = 0;
|
||||
|
||||
cancel_delayed_work_sync(&uq_mgr->resume_work);
|
||||
@@ -633,6 +630,14 @@ amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_que
|
||||
/* Cancel any pending hang detection work and cleanup */
|
||||
cancel_delayed_work_sync(&queue->hang_detect_work);
|
||||
|
||||
r = amdgpu_bo_reserve(vm->root.bo, false);
|
||||
if (r) {
|
||||
drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n");
|
||||
return r;
|
||||
}
|
||||
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
|
||||
amdgpu_bo_unreserve(vm->root.bo);
|
||||
|
||||
mutex_lock(&uq_mgr->userq_mutex);
|
||||
queue->hang_detect_fence = NULL;
|
||||
amdgpu_userq_wait_for_last_fence(queue);
|
||||
@@ -664,7 +669,6 @@ amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_que
|
||||
}
|
||||
amdgpu_userq_cleanup(queue);
|
||||
mutex_unlock(&uq_mgr->userq_mutex);
|
||||
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
||||
return r;
|
||||
@@ -856,7 +860,9 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
|
||||
clean_fence_driver:
|
||||
amdgpu_userq_fence_driver_free(queue);
|
||||
clean_mapping:
|
||||
amdgpu_bo_reserve(fpriv->vm.root.bo, true);
|
||||
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
|
||||
amdgpu_bo_unreserve(fpriv->vm.root.bo);
|
||||
kfree(queue);
|
||||
return r;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user