diff options
| author | Tvrtko Ursulin <tvrtko.ursulin@igalia.com> | 2025-05-15 10:49:56 +0100 |
|---|---|---|
| committer | Christian König <christian.koenig@amd.com> | 2025-05-15 15:05:29 +0200 |
| commit | 549810e918155cc00d65d44ed3e7d2bd0aa89df9 (patch) | |
| tree | 0e90891c11ef57e521f1e17e64ae27a75d911081 /drivers | |
| parent | 4963049ea1aed7b5aefe164867e0312185e878bc (diff) | |
dma-fence: Change signature of __dma_fence_is_later
With the goal of reducing the need for drivers to touch (and dereference)
fence->ops, we change the prototype of __dma_fence_is_later() to take
fence instead of fence->ops.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20250515095004.28318-2-tvrtko.ursulin@igalia.com
Signed-off-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/dma-buf/dma-fence-chain.c | 2 | ||||
| -rw-r--r-- | drivers/dma-buf/sw_sync.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_hw_fence.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/xe/xe_sched_job.c | 14 |
4 files changed, 11 insertions, 9 deletions
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 9663ba1bb6ac..90424f23fd73 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -252,7 +252,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, chain->prev_seqno = 0; /* Try to reuse the context of the previous chain node. */ - if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { + if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) { context = prev->context; chain->prev_seqno = prev->seqno; } else { diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 4f27ee93a00c..3c20f1d31cf5 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -170,7 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence) { struct sync_timeline *parent = dma_fence_parent(fence); - return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); + return !__dma_fence_is_later(fence, fence->seqno, parent->value); } static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c index 0b4f12be3692..03eb8c6d1616 100644 --- a/drivers/gpu/drm/xe/xe_hw_fence.c +++ b/drivers/gpu/drm/xe/xe_hw_fence.c @@ -165,7 +165,7 @@ static bool xe_hw_fence_signaled(struct dma_fence *dma_fence) u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32); return dma_fence->error || - !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops); + !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno); } static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence) diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c index 1905ca590965..f0a6ce610948 100644 --- a/drivers/gpu/drm/xe/xe_sched_job.c +++ b/drivers/gpu/drm/xe/xe_sched_job.c @@ -216,15 +216,17 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error) bool xe_sched_job_started(struct xe_sched_job *job) { + struct dma_fence *fence = dma_fence_chain_contained(job->fence); struct xe_lrc *lrc = job->q->lrc[0]; - return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job), - xe_lrc_start_seqno(lrc), - dma_fence_chain_contained(job->fence)->ops); + return !__dma_fence_is_later(fence, + xe_sched_job_lrc_seqno(job), + xe_lrc_start_seqno(lrc)); } bool xe_sched_job_completed(struct xe_sched_job *job) { + struct dma_fence *fence = dma_fence_chain_contained(job->fence); struct xe_lrc *lrc = job->q->lrc[0]; /* @@ -232,9 +234,9 @@ bool xe_sched_job_completed(struct xe_sched_job *job) * parallel handshake is done. */ - return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job), - xe_lrc_seqno(lrc), - dma_fence_chain_contained(job->fence)->ops); + return !__dma_fence_is_later(fence, + xe_sched_job_lrc_seqno(job), + xe_lrc_seqno(lrc)); } void xe_sched_job_arm(struct xe_sched_job *job) |
