diff options
author | Eric Anholt <eric@anholt.net> | 2015-11-30 12:34:01 -0800 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2015-12-07 20:10:03 -0800 |
commit | b501bacc6060fd62654b756469cc3091eb53de3a (patch) | |
tree | 299d70d456d4b125f16207c6aac4d93c3fbd0333 /drivers/gpu/drm/vc4/vc4_gem.c | |
parent | d5b1a78a772f1e31a94f8babfa964152ec5e9aa5 (diff) |
drm/vc4: Add support for async pageflips.
An async pageflip stores the modeset to be done and executes it once
the BOs are ready to be displayed. This gets us about 3x performance
in full screen rendering with pageflipping.
Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu/drm/vc4/vc4_gem.c')
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_gem.c | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 936dddfa890f..5fb0556e001e 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -461,6 +461,7 @@ void vc4_job_handle_completed(struct vc4_dev *vc4) { unsigned long irqflags; + struct vc4_seqno_cb *cb, *cb_temp; spin_lock_irqsave(&vc4->job_lock, irqflags); while (!list_empty(&vc4->job_done_list)) { @@ -473,7 +474,45 @@ vc4_job_handle_completed(struct vc4_dev *vc4) vc4_complete_exec(vc4->dev, exec); spin_lock_irqsave(&vc4->job_lock, irqflags); } + + list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) { + if (cb->seqno <= vc4->finished_seqno) { + list_del_init(&cb->work.entry); + schedule_work(&cb->work); + } + } + + spin_unlock_irqrestore(&vc4->job_lock, irqflags); +} + +static void vc4_seqno_cb_work(struct work_struct *work) +{ + struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work); + + cb->func(cb); +} + +int vc4_queue_seqno_cb(struct drm_device *dev, + struct vc4_seqno_cb *cb, uint64_t seqno, + void (*func)(struct vc4_seqno_cb *cb)) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + int ret = 0; + unsigned long irqflags; + + cb->func = func; + INIT_WORK(&cb->work, vc4_seqno_cb_work); + + spin_lock_irqsave(&vc4->job_lock, irqflags); + if (seqno > vc4->finished_seqno) { + cb->seqno = seqno; + list_add_tail(&cb->work.entry, &vc4->seqno_cb_list); + } else { + schedule_work(&cb->work); + } spin_unlock_irqrestore(&vc4->job_lock, irqflags); + + return ret; } /* Scheduled when any job has been completed, this walks the list of @@ -610,6 +649,7 @@ vc4_gem_init(struct drm_device *dev) INIT_LIST_HEAD(&vc4->job_list); INIT_LIST_HEAD(&vc4->job_done_list); + INIT_LIST_HEAD(&vc4->seqno_cb_list); spin_lock_init(&vc4->job_lock); INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work); |