summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2025-12-12 10:28:45 -0800
committerMatthew Brost <matthew.brost@intel.com>2025-12-15 14:02:54 -0800
commit4ac9048d05017449dde6320694d6e4700a8b9f5f (patch)
tree8b346173f87b4d39e6a15a8edf99e9fbaab0f6a7
parent8533051ce92015e9cc6f75e0d52119b9d91610b6 (diff)
drm/xe: Wait on in-syncs when swicthing to dma-fence mode
If a dma-fence submission has in-fences and pagefault queues are running work, there is little incentive to kick the pagefault queues off the hardware until the dma-fence submission is ready to run. Therefore, wait on the in-fences of the dma-fence submission before removing the pagefault queues from the hardware. v2: - Fix kernel doc (CI) - Don't wait under lock (Thomas) - Make wait interruptable Suggested-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/20251212182847.1683222-6-matthew.brost@intel.com
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c9
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c55
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c28
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h2
5 files changed, 87 insertions, 11 deletions
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index fd9480031750..730a5c9c2637 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -121,7 +121,7 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 addresses[XE_HW_ENGINE_MAX_INSTANCE];
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
- u32 i, num_syncs, num_ufence = 0;
+ u32 i, num_syncs, num_in_sync = 0, num_ufence = 0;
struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
@@ -183,6 +183,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (xe_sync_is_ufence(&syncs[num_syncs]))
num_ufence++;
+
+ if (!num_in_sync && xe_sync_needs_wait(&syncs[num_syncs]))
+ num_in_sync++;
}
if (XE_IOCTL_DBG(xe, num_ufence > 1)) {
@@ -203,7 +206,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
mode = xe_hw_engine_group_find_exec_mode(q);
if (mode == EXEC_MODE_DMA_FENCE) {
- err = xe_hw_engine_group_get_mode(group, mode, &previous_mode);
+ err = xe_hw_engine_group_get_mode(group, mode, &previous_mode,
+ syncs, num_in_sync ?
+ num_syncs : 0);
if (err)
goto err_syncs;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index 4d9263a1a208..40ce5d5f543c 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -11,6 +11,7 @@
#include "xe_gt.h"
#include "xe_gt_stats.h"
#include "xe_hw_engine_group.h"
+#include "xe_sync.h"
#include "xe_vm.h"
static void
@@ -21,7 +22,8 @@ hw_engine_group_resume_lr_jobs_func(struct work_struct *w)
int err;
enum xe_hw_engine_group_execution_mode previous_mode;
- err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode);
+ err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode,
+ NULL, 0);
if (err)
return;
@@ -189,10 +191,12 @@ void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group
/**
* xe_hw_engine_group_suspend_faulting_lr_jobs() - Suspend the faulting LR jobs of this group
* @group: The hw engine group
+ * @has_deps: dma-fence job triggering suspend has dependencies
*
* Return: 0 on success, negative error code on error.
*/
-static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group)
+static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group,
+ bool has_deps)
{
int err;
struct xe_exec_queue *q;
@@ -201,11 +205,18 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
lockdep_assert_held_write(&group->mode_sem);
list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) {
+ bool idle_skip_suspend;
+
if (!xe_vm_in_fault_mode(q->vm))
continue;
+ idle_skip_suspend = xe_exec_queue_idle_skip_suspend(q);
+ if (!idle_skip_suspend && has_deps)
+ return -EAGAIN;
+
xe_gt_stats_incr(q->gt, XE_GT_STATS_ID_HW_ENGINE_GROUP_SUSPEND_LR_QUEUE_COUNT, 1);
- need_resume |= !xe_exec_queue_idle_skip_suspend(q);
+
+ need_resume |= !idle_skip_suspend;
q->ops->suspend(q);
}
@@ -258,7 +269,7 @@ static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group
return 0;
}
-static int switch_mode(struct xe_hw_engine_group *group)
+static int switch_mode(struct xe_hw_engine_group *group, bool has_deps)
{
int err = 0;
enum xe_hw_engine_group_execution_mode new_mode;
@@ -268,7 +279,8 @@ static int switch_mode(struct xe_hw_engine_group *group)
switch (group->cur_mode) {
case EXEC_MODE_LR:
new_mode = EXEC_MODE_DMA_FENCE;
- err = xe_hw_engine_group_suspend_faulting_lr_jobs(group);
+ err = xe_hw_engine_group_suspend_faulting_lr_jobs(group,
+ has_deps);
break;
case EXEC_MODE_DMA_FENCE:
new_mode = EXEC_MODE_LR;
@@ -284,19 +296,36 @@ static int switch_mode(struct xe_hw_engine_group *group)
return 0;
}
+static int wait_syncs(struct xe_sync_entry *syncs, int num_syncs)
+{
+ int err, i;
+
+ for (i = 0; i < num_syncs; ++i) {
+ err = xe_sync_entry_wait(syncs + i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/**
* xe_hw_engine_group_get_mode() - Get the group to execute in the new mode
* @group: The hw engine group
* @new_mode: The new execution mode
* @previous_mode: Pointer to the previous mode provided for use by caller
+ * @syncs: Syncs from exec IOCTL
+ * @num_syncs: Number of syncs from exec IOCTL
*
* Return: 0 if successful, -EINTR if locking failed.
*/
int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
enum xe_hw_engine_group_execution_mode new_mode,
- enum xe_hw_engine_group_execution_mode *previous_mode)
+ enum xe_hw_engine_group_execution_mode *previous_mode,
+ struct xe_sync_entry *syncs, int num_syncs)
__acquires(&group->mode_sem)
{
+ bool has_deps = !!num_syncs;
int err = down_read_interruptible(&group->mode_sem);
if (err)
@@ -306,15 +335,25 @@ __acquires(&group->mode_sem)
if (new_mode != group->cur_mode) {
up_read(&group->mode_sem);
+retry:
err = down_write_killable(&group->mode_sem);
if (err)
return err;
if (new_mode != group->cur_mode) {
- err = switch_mode(group);
+ err = switch_mode(group, has_deps);
if (err) {
up_write(&group->mode_sem);
- return err;
+
+ if (err != -EAGAIN)
+ return err;
+
+ err = wait_syncs(syncs, num_syncs);
+ if (err)
+ return err;
+
+ has_deps = false;
+ goto retry;
}
}
downgrade_write(&group->mode_sem);
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.h b/drivers/gpu/drm/xe/xe_hw_engine_group.h
index 797ee81acbf2..8b17ccd30b70 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.h
@@ -11,6 +11,7 @@
struct drm_device;
struct xe_exec_queue;
struct xe_gt;
+struct xe_sync_entry;
int xe_hw_engine_setup_groups(struct xe_gt *gt);
@@ -19,7 +20,8 @@ void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct
int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group,
enum xe_hw_engine_group_execution_mode new_mode,
- enum xe_hw_engine_group_execution_mode *previous_mode);
+ enum xe_hw_engine_group_execution_mode *previous_mode,
+ struct xe_sync_entry *syncs, int num_syncs);
void xe_hw_engine_group_put(struct xe_hw_engine_group *group);
enum xe_hw_engine_group_execution_mode
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 1fc4fa278b78..ee1344a880b9 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -228,6 +228,34 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync, struct xe_sched_job *job)
return 0;
}
+/**
+ * xe_sync_entry_wait() - Wait on in-sync
+ * @sync: Sync object
+ *
+ * If the sync is in an in-sync, wait on the sync to signal.
+ *
+ * Return: 0 on success, -ERESTARTSYS on failure (interruption)
+ */
+int xe_sync_entry_wait(struct xe_sync_entry *sync)
+{
+ if (sync->flags & DRM_XE_SYNC_FLAG_SIGNAL)
+ return 0;
+
+ return dma_fence_wait(sync->fence, true);
+}
+
+/**
+ * xe_sync_needs_wait() - Sync needs a wait (input dma-fence not signaled)
+ * @sync: Sync object
+ *
+ * Return: True if sync needs a wait, False otherwise
+ */
+bool xe_sync_needs_wait(struct xe_sync_entry *sync)
+{
+ return !(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &sync->fence->flags);
+}
+
void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
{
if (!(sync->flags & DRM_XE_SYNC_FLAG_SIGNAL))
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 51f2d803e977..6b949194acff 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -29,6 +29,8 @@ int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
void xe_sync_entry_signal(struct xe_sync_entry *sync,
struct dma_fence *fence);
+int xe_sync_entry_wait(struct xe_sync_entry *sync);
+bool xe_sync_needs_wait(struct xe_sync_entry *sync);
void xe_sync_entry_cleanup(struct xe_sync_entry *sync);
struct dma_fence *
xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,