diff options
author | Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com> | 2025-04-11 15:08:30 +0530 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2025-04-22 08:51:44 -0400 |
commit | 4b27406380b0b9ada6b4893bc8f6766dd34fff36 (patch) | |
tree | 03e5632ee64b1f61afaa48be0a0ca5f9d6b5a1e9 /drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c | |
parent | 4ec2141d23d3bbede9347a60580a40d9e11089ff (diff) |
drm/amdgpu: Add queue id support to the user queue wait IOCTL
Add queue id support to the user queue wait IOCTL
drm_amdgpu_userq_wait structure.
This is required to retrieve the wait user queue and maintain
the fence driver references in it so that the user queue in
the same context releases their reference to the fence drivers
at some point before queue destruction.
Otherwise, we would gather those references until we
don't have any more space left and crash.
v2: Modify the UAPI comment as per the mesa and libdrm UAPI comment.
Libdrm MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/408
Mesa MR: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/34493
Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c index 0a3032e01c34..ca198360cfda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c @@ -91,7 +91,6 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev, spin_lock_init(&fence_drv->fence_list_lock); fence_drv->adev = adev; - fence_drv->fence_drv_xa_ptr = &userq->fence_drv_xa; fence_drv->context = dma_fence_context_alloc(1); get_task_comm(fence_drv->timeline_name, current); @@ -611,6 +610,9 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, u32 num_syncobj, num_read_bo_handles, num_write_bo_handles; struct drm_amdgpu_userq_fence_info *fence_info = NULL; struct drm_amdgpu_userq_wait *wait_info = data; + struct amdgpu_fpriv *fpriv = filp->driver_priv; + struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr; + struct amdgpu_usermode_queue *waitq; struct drm_gem_object **gobj_write; struct drm_gem_object **gobj_read; struct dma_fence **fences = NULL; @@ -860,6 +862,10 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, fences[num_fences++] = fence; } + waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id); + if (!waitq) + goto free_fences; + for (i = 0, cnt = 0; i < num_fences; i++) { struct amdgpu_userq_fence_driver *fence_drv; struct amdgpu_userq_fence *userq_fence; @@ -888,14 +894,12 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data, * Otherwise, we would gather those references until we don't * have any more space left and crash. */ - if (fence_drv->fence_drv_xa_ptr) { - r = xa_alloc(fence_drv->fence_drv_xa_ptr, &index, fence_drv, - xa_limit_32b, GFP_KERNEL); - if (r) - goto free_fences; + r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv, + xa_limit_32b, GFP_KERNEL); + if (r) + goto free_fences; - amdgpu_userq_fence_driver_get(fence_drv); - } + amdgpu_userq_fence_driver_get(fence_drv); /* Store drm syncobj's gpu va address and value */ fence_info[cnt].va = fence_drv->va; |