diff options
| author | Sunil Khatri <sunil.khatri@amd.com> | 2026-04-08 17:56:23 +0530 |
|---|---|---|
| committer | Alex Deucher <alexander.deucher@amd.com> | 2026-04-17 15:41:12 -0400 |
| commit | 168178b0cbac72f7adecdcbd68c04e1fd644abf5 (patch) | |
| tree | 47fedcc3dcddf01439e6b96458d76ee1656e469b | |
| parent | 469e6fea0949216d150c7b999d95ad0e0203ce27 (diff) | |
drm/amdgpu/userq: caller to take reserv lock for vas_list_cleanup
In function amdgpu_userq_buffer_vas_list_cleanup, remove the
reservation lock for vm and caller should make sure it's taken
before locking userq_mutex.
Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c | 30 |
1 files changed, 18 insertions, 12 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 6c76cf4ff380..d460cb485920 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -312,25 +312,21 @@ static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev, { struct amdgpu_userq_va_cursor *va_cursor, *tmp; struct amdgpu_bo_va_mapping *mapping; - int r; - r = amdgpu_bo_reserve(queue->vm->root.bo, false); - if (r) - return r; + /* Caller must hold vm->root.bo reservation */ + dma_resv_assert_held(queue->vm->root.bo->tbo.base.resv); list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) { mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr); if (!mapping) { - r = -EINVAL; - goto err; + return -EINVAL; } dev_dbg(adev->dev, "delete the userq:%p va:%llx\n", queue, va_cursor->gpu_addr); amdgpu_userq_buffer_va_list_del(mapping, va_cursor); } -err: - amdgpu_bo_unreserve(queue->vm->root.bo); - return r; + + return 0; } static int amdgpu_userq_preempt_helper(struct amdgpu_usermode_queue *queue) @@ -444,8 +440,6 @@ static void amdgpu_userq_cleanup(struct amdgpu_usermode_queue *queue) /* Wait for mode-1 reset to complete */ down_read(&adev->reset_domain->sem); - /* Drop the userq reference. */ - amdgpu_userq_buffer_vas_list_cleanup(adev, queue); uq_funcs->mqd_destroy(queue); /* Use interrupt-safe locking since IRQ handlers may access these XArrays */ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index); @@ -626,6 +620,9 @@ static int amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_queue *queue) { struct amdgpu_device *adev = uq_mgr->adev; + struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); + struct amdgpu_vm *vm = &fpriv->vm; + int r = 0; cancel_delayed_work_sync(&uq_mgr->resume_work); @@ -633,6 +630,14 @@ amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_que /* Cancel any pending hang detection work and cleanup */ cancel_delayed_work_sync(&queue->hang_detect_work); + r = amdgpu_bo_reserve(vm->root.bo, false); + if (r) { + drm_file_err(uq_mgr->file, "Failed to reserve root bo during userqueue destroy\n"); + return r; + } + amdgpu_userq_buffer_vas_list_cleanup(adev, queue); + amdgpu_bo_unreserve(vm->root.bo); + mutex_lock(&uq_mgr->userq_mutex); queue->hang_detect_fence = NULL; amdgpu_userq_wait_for_last_fence(queue); @@ -664,7 +669,6 @@ amdgpu_userq_destroy(struct amdgpu_userq_mgr *uq_mgr, struct amdgpu_usermode_que } amdgpu_userq_cleanup(queue); mutex_unlock(&uq_mgr->userq_mutex); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; @@ -856,7 +860,9 @@ clean_mqd: clean_fence_driver: amdgpu_userq_fence_driver_free(queue); clean_mapping: + amdgpu_bo_reserve(fpriv->vm.root.bo, true); amdgpu_userq_buffer_vas_list_cleanup(adev, queue); + amdgpu_bo_unreserve(fpriv->vm.root.bo); kfree(queue); return r; } |
