summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>2026-01-06 11:10:50 -0800
committerNiranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>2026-01-06 11:13:54 -0800
commitcaaed1dda7df9b4e21d439bb5e7750d4af4f1e78 (patch)
tree1c4d386ca0386c193e973d235e5baacc2c56bff8
parente70711be0d0ebdd0a9213446b657fe0815e38017 (diff)
Revert "drm/xe/multi_queue: Support active group after primary is destroyed"
This reverts commit 3131a43ecb346ae3b5287ee195779fc38c6fcd11. There is no must have requirement for this feature from Compute UMD. Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Link: https://patch.msgid.link/20260106191051.2866538-5-niranjana.vishwanathapura@intel.com
-rw-r--r--drivers/gpu/drm/xe/xe_device.c7
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c55
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h2
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h4
-rw-r--r--include/uapi/drm/xe_drm.h4
5 files changed, 3 insertions, 69 deletions
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index e101d290b2a6..f4741cbe4c45 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -180,12 +180,7 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
xa_for_each(&xef->exec_queue.xa, idx, q) {
if (q->vm && q->hwe->hw_engine_group)
xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
-
- if (xe_exec_queue_is_multi_queue_primary(q))
- xe_exec_queue_group_kill_put(q->multi_queue.group);
- else
- xe_exec_queue_kill(q);
-
+ xe_exec_queue_kill(q);
xe_exec_queue_put(q);
}
xa_for_each(&xef->vm.xa, idx, vm)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 0b9e074b022f..529a40ca4002 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -467,26 +467,6 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
}
ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
-static void xe_exec_queue_group_kill(struct kref *ref)
-{
- struct xe_exec_queue_group *group = container_of(ref, struct xe_exec_queue_group,
- kill_refcount);
- xe_exec_queue_kill(group->primary);
-}
-
-static inline void xe_exec_queue_group_kill_get(struct xe_exec_queue_group *group)
-{
- kref_get(&group->kill_refcount);
-}
-
-void xe_exec_queue_group_kill_put(struct xe_exec_queue_group *group)
-{
- if (!group)
- return;
-
- kref_put(&group->kill_refcount, xe_exec_queue_group_kill);
-}
-
void xe_exec_queue_destroy(struct kref *ref)
{
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
@@ -716,7 +696,6 @@ static int xe_exec_queue_group_init(struct xe_device *xe, struct xe_exec_queue *
group->primary = q;
group->cgp_bo = bo;
INIT_LIST_HEAD(&group->list);
- kref_init(&group->kill_refcount);
xa_init_flags(&group->xa, XA_FLAGS_ALLOC1);
mutex_init(&group->list_lock);
q->multi_queue.group = group;
@@ -792,11 +771,6 @@ static int xe_exec_queue_group_add(struct xe_device *xe, struct xe_exec_queue *q
q->multi_queue.pos = pos;
- if (group->primary->multi_queue.keep_active) {
- xe_exec_queue_group_kill_get(group);
- q->multi_queue.keep_active = true;
- }
-
return 0;
}
@@ -810,11 +784,6 @@ static void xe_exec_queue_group_delete(struct xe_device *xe, struct xe_exec_queu
lrc = xa_erase(&group->xa, q->multi_queue.pos);
xe_assert(xe, lrc);
xe_lrc_put(lrc);
-
- if (q->multi_queue.keep_active) {
- xe_exec_queue_group_kill_put(group);
- q->multi_queue.keep_active = false;
- }
}
static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue *q,
@@ -836,24 +805,12 @@ static int exec_queue_set_multi_group(struct xe_device *xe, struct xe_exec_queue
return -EINVAL;
if (value & DRM_XE_MULTI_GROUP_CREATE) {
- if (XE_IOCTL_DBG(xe, value & ~(DRM_XE_MULTI_GROUP_CREATE |
- DRM_XE_MULTI_GROUP_KEEP_ACTIVE)))
- return -EINVAL;
-
- /*
- * KEEP_ACTIVE is not supported in preempt fence mode as in that mode,
- * VM_DESTROY ioctl expects all exec queues of that VM are already killed.
- */
- if (XE_IOCTL_DBG(xe, (value & DRM_XE_MULTI_GROUP_KEEP_ACTIVE) &&
- xe_vm_in_preempt_fence_mode(q->vm)))
+ if (XE_IOCTL_DBG(xe, value & ~DRM_XE_MULTI_GROUP_CREATE))
return -EINVAL;
q->multi_queue.valid = true;
q->multi_queue.is_primary = true;
q->multi_queue.pos = 0;
- if (value & DRM_XE_MULTI_GROUP_KEEP_ACTIVE)
- q->multi_queue.keep_active = true;
-
return 0;
}
@@ -1419,11 +1376,6 @@ void xe_exec_queue_kill(struct xe_exec_queue *q)
q->ops->kill(q);
xe_vm_remove_compute_exec_queue(q->vm, q);
-
- if (!xe_exec_queue_is_multi_queue_primary(q) && q->multi_queue.keep_active) {
- xe_exec_queue_group_kill_put(q->multi_queue.group);
- q->multi_queue.keep_active = false;
- }
}
int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
@@ -1450,10 +1402,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (q->vm && q->hwe->hw_engine_group)
xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
- if (xe_exec_queue_is_multi_queue_primary(q))
- xe_exec_queue_group_kill_put(q->multi_queue.group);
- else
- xe_exec_queue_kill(q);
+ xe_exec_queue_kill(q);
trace_xe_exec_queue_close(q);
xe_exec_queue_put(q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index b5ad975d7e97..b1e51789128f 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -113,8 +113,6 @@ static inline struct xe_exec_queue *xe_exec_queue_multi_queue_primary(struct xe_
return xe_exec_queue_is_multi_queue(q) ? q->multi_queue.group->primary : q;
}
-void xe_exec_queue_group_kill_put(struct xe_exec_queue_group *group);
-
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index 67ea5eebf70b..5fc516b0bb77 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -62,8 +62,6 @@ struct xe_exec_queue_group {
struct list_head list;
/** @list_lock: Secondary queue list lock */
struct mutex list_lock;
- /** @kill_refcount: ref count to kill primary queue */
- struct kref kill_refcount;
/** @sync_pending: CGP_SYNC_DONE g2h response pending */
bool sync_pending;
/** @banned: Group banned */
@@ -163,8 +161,6 @@ struct xe_exec_queue {
u8 valid:1;
/** @multi_queue.is_primary: Is primary queue (Q0) of the group */
u8 is_primary:1;
- /** @multi_queue.keep_active: Keep the group active after primary is destroyed */
- u8 keep_active:1;
} multi_queue;
/** @sched_props: scheduling properties */
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index bb69f9b30c7d..077e66a682e2 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -1280,9 +1280,6 @@ struct drm_xe_vm_bind {
* then a new multi-queue group is created with this queue as the primary queue
* (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
* queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
- * If the extension's 'value' field has %DRM_XE_MULTI_GROUP_KEEP_ACTIVE flag
- * set, then the multi-queue group is kept active after the primary queue is
- * destroyed.
* All the other non-relevant bits of extension's 'value' field while adding the
* primary or the secondary queues of the group must be set to 0.
* - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
@@ -1331,7 +1328,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
-#define DRM_XE_MULTI_GROUP_KEEP_ACTIVE (1ull << 62)
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;