summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c254
1 files changed, 113 insertions, 141 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index df9d451afdcd..b7dbd16904e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
*
*/
-#include <subdev/fb.h>
-
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
@@ -58,14 +56,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma;
int ret;
- if (!cli->base.vm)
+ if (!cli->vm)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return ret;
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) {
@@ -73,7 +71,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
- ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
+ ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
if (ret) {
kfree(vma);
goto out;
@@ -100,17 +98,23 @@ static void
nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
- struct nouveau_fence *fence = NULL;
+ struct reservation_object *resv = nvbo->bo.resv;
+ struct reservation_object_list *fobj;
+ struct fence *fence = NULL;
+
+ fobj = reservation_object_get_list(resv);
list_del(&vma->head);
- if (mapped) {
- spin_lock(&nvbo->bo.bdev->fence_lock);
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
- }
+ if (fobj && fobj->shared_count > 1)
+ ttm_bo_wait(&nvbo->bo, true, false, false);
+ else if (fobj && fobj->shared_count == 1)
+ fence = rcu_dereference_protected(fobj->shared[0],
+ reservation_object_held(resv));
+ else
+ fence = reservation_object_get_excl(nvbo->bo.resv);
- if (fence) {
+ if (fence && mapped) {
nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
} else {
if (mapped)
@@ -118,7 +122,6 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
nouveau_vm_put(vma);
kfree(vma);
}
- nouveau_fence_unref(&fence);
}
void
@@ -129,14 +132,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
struct nouveau_vma *vma;
int ret;
- if (!cli->base.vm)
+ if (!cli->vm)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
if (ret)
return;
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
if (--vma->refcount == 0)
nouveau_gem_object_unmap(nvbo, vma);
@@ -173,7 +176,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
/* Initialize the embedded gem-object. We return a single gem-reference
@@ -202,8 +205,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (cli->base.vm) {
- vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
+ if (cli->vm) {
+ vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (!vma)
return -EINVAL;
@@ -223,13 +226,13 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli = nouveau_cli(file_priv);
- struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb *pfb = nvkm_fb(&drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
- NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -290,24 +293,23 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
}
struct validate_op {
- struct list_head vram_list;
- struct list_head gart_list;
- struct list_head both_list;
+ struct list_head list;
struct ww_acquire_ctx ticket;
};
static void
-validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
- struct ww_acquire_ctx *ticket)
+validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- struct list_head *entry, *tmp;
struct nouveau_bo *nvbo;
+ struct drm_nouveau_gem_pushbuf_bo *b;
- list_for_each_safe(entry, tmp, list) {
- nvbo = list_entry(entry, struct nouveau_bo, entry);
+ while (!list_empty(&op->list)) {
+ nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
+ b = &pbbo[nvbo->pbbo_index];
if (likely(fence))
- nouveau_bo_fence(nvbo, fence);
+ nouveau_bo_fence(nvbo, fence, !!b->write_domains);
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
@@ -316,23 +318,16 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
- ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
+ ttm_bo_unreserve_ticket(&nvbo->bo, &op->ticket);
drm_gem_object_unreference_unlocked(&nvbo->gem);
}
}
static void
-validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
+validate_fini(struct validate_op *op, struct nouveau_fence *fence,
+ struct drm_nouveau_gem_pushbuf_bo *pbbo)
{
- validate_fini_list(&op->vram_list, fence, &op->ticket);
- validate_fini_list(&op->gart_list, fence, &op->ticket);
- validate_fini_list(&op->both_list, fence, &op->ticket);
-}
-
-static void
-validate_fini(struct validate_op *op, struct nouveau_fence *fence)
-{
- validate_fini_no_ticket(op, fence);
+ validate_fini_no_ticket(op, fence, pbbo);
ww_acquire_fini(&op->ticket);
}
@@ -346,11 +341,14 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
int trycnt = 0;
int ret, i;
struct nouveau_bo *res_bo = NULL;
+ LIST_HEAD(gart_list);
+ LIST_HEAD(vram_list);
+ LIST_HEAD(both_list);
ww_acquire_init(&op->ticket, &reservation_ww_class);
retry:
if (++trycnt > 100000) {
- NV_ERROR(cli, "%s failed and gave up.\n", __func__);
+ NV_PRINTK(error, cli, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -361,10 +359,9 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
- ww_acquire_done(&op->ticket);
- validate_fini(op, NULL);
- return -ENOENT;
+ NV_PRINTK(error, cli, "Unknown handle 0x%08x\n", b->handle);
+ ret = -ENOENT;
+ break;
}
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
@@ -374,17 +371,19 @@ retry:
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(cli, "multiple instances of buffer %d on "
+ NV_PRINTK(error, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
- ww_acquire_done(&op->ticket);
- validate_fini(op, NULL);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
if (ret) {
- validate_fini_no_ticket(op, NULL);
+ list_splice_tail_init(&vram_list, &op->list);
+ list_splice_tail_init(&gart_list, &op->list);
+ list_splice_tail_init(&both_list, &op->list);
+ validate_fini_no_ticket(op, NULL, NULL);
if (unlikely(ret == -EDEADLK)) {
ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
&op->ticket);
@@ -392,12 +391,9 @@ retry:
res_bo = nvbo;
}
if (unlikely(ret)) {
- ww_acquire_done(&op->ticket);
- ww_acquire_fini(&op->ticket);
- drm_gem_object_unreference_unlocked(gem);
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "fail reserve\n");
- return ret;
+ NV_PRINTK(error, cli, "fail reserve\n");
+ break;
}
}
@@ -406,45 +402,32 @@ retry:
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
(b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
- list_add_tail(&nvbo->entry, &op->both_list);
+ list_add_tail(&nvbo->entry, &both_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
- list_add_tail(&nvbo->entry, &op->vram_list);
+ list_add_tail(&nvbo->entry, &vram_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
- list_add_tail(&nvbo->entry, &op->gart_list);
+ list_add_tail(&nvbo->entry, &gart_list);
else {
- NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
+ NV_PRINTK(error, cli, "invalid valid domains: 0x%08x\n",
b->valid_domains);
- list_add_tail(&nvbo->entry, &op->both_list);
- ww_acquire_done(&op->ticket);
- validate_fini(op, NULL);
- return -EINVAL;
+ list_add_tail(&nvbo->entry, &both_list);
+ ret = -EINVAL;
+ break;
}
if (nvbo == res_bo)
goto retry;
}
ww_acquire_done(&op->ticket);
- return 0;
-}
-
-static int
-validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
-{
- struct nouveau_fence *fence = NULL;
- int ret = 0;
-
- spin_lock(&nvbo->bo.bdev->fence_lock);
- fence = nouveau_fence_ref(nvbo->bo.sync_obj);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
-
- if (fence) {
- ret = nouveau_fence_sync(fence, chan);
- nouveau_fence_unref(&fence);
- }
-
+ list_splice_tail(&vram_list, &op->list);
+ list_splice_tail(&gart_list, &op->list);
+ list_splice_tail(&both_list, &op->list);
+ if (ret)
+ validate_fini(op, NULL, NULL);
return ret;
+
}
static int
@@ -465,24 +448,25 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(cli, "fail set_domain\n");
+ NV_PRINTK(error, cli, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "fail ttm_validate\n");
+ NV_PRINTK(error, cli, "fail ttm_validate\n");
return ret;
}
- ret = validate_sync(chan, nvbo);
+ ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains);
if (unlikely(ret)) {
- NV_ERROR(cli, "fail post-validate sync\n");
+ if (ret != -ERESTARTSYS)
+ NV_PRINTK(error, cli, "fail post-validate sync\n");
return ret;
}
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -515,11 +499,9 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct validate_op *op, int *apply_relocs)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
- int ret, relocs = 0;
+ int ret;
- INIT_LIST_HEAD(&op->vram_list);
- INIT_LIST_HEAD(&op->gart_list);
- INIT_LIST_HEAD(&op->both_list);
+ INIT_LIST_HEAD(&op->list);
if (nr_buffers == 0)
return 0;
@@ -527,38 +509,18 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate_init\n");
+ NV_PRINTK(error, cli, "validate_init\n");
return ret;
}
- ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
+ ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate vram_list\n");
- validate_fini(op, NULL);
+ NV_PRINTK(error, cli, "validating bo list\n");
+ validate_fini(op, NULL, NULL);
return ret;
}
- relocs += ret;
-
- ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
- if (unlikely(ret < 0)) {
- if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate gart_list\n");
- validate_fini(op, NULL);
- return ret;
- }
- relocs += ret;
-
- ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
- if (unlikely(ret < 0)) {
- if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate both_list\n");
- validate_fini(op, NULL);
- return ret;
- }
- relocs += ret;
-
- *apply_relocs = relocs;
+ *apply_relocs = ret;
return 0;
}
@@ -613,7 +575,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(cli, "reloc bo index invalid\n");
+ NV_PRINTK(error, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -623,7 +585,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(cli, "reloc container bo index invalid\n");
+ NV_PRINTK(error, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -631,7 +593,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(cli, "reloc outside of bo\n");
+ NV_PRINTK(error, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -640,7 +602,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(cli, "failed kmap for reloc\n");
+ NV_PRINTK(error, cli, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -661,11 +623,9 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
data |= r->vor;
}
- spin_lock(&nvbo->bo.bdev->fence_lock);
- ret = ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
+ ret = ttm_bo_wait(&nvbo->bo, true, false, false);
if (ret) {
- NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
+ NV_PRINTK(error, cli, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -696,7 +656,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
return -ENOMEM;
list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
+ if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) {
chan = temp->chan;
break;
}
@@ -711,19 +671,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_PRINTK(error, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return nouveau_abi16_put(abi16, -EINVAL);
}
@@ -741,7 +701,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(cli, "push %d buffer not in list\n", i);
+ NV_PRINTK(error, cli, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -752,7 +712,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(cli, "validate: %d\n", ret);
+ NV_PRINTK(error, cli, "validate: %d\n", ret);
goto out_prevalid;
}
@@ -760,7 +720,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
if (ret) {
- NV_ERROR(cli, "reloc apply: %d\n", ret);
+ NV_PRINTK(error, cli, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -768,7 +728,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_ERROR(cli, "nv50cal_space: %d\n", ret);
+ NV_PRINTK(error, cli, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -780,10 +740,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (nv_device(drm->device)->chipset >= 0x25) {
+ if (drm->device.info.chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(cli, "cal_space: %d\n", ret);
+ NV_PRINTK(error, cli, "cal_space: %d\n", ret);
goto out;
}
@@ -797,7 +757,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(cli, "jmp_space: %d\n", ret);
+ NV_PRINTK(error, cli, "jmp_space: %d\n", ret);
goto out;
}
@@ -835,13 +795,13 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_fence_new(chan, false, &fence);
if (ret) {
- NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
+ NV_PRINTK(error, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
out:
- validate_fini(&op, fence);
+ validate_fini(&op, fence, bo);
nouveau_fence_unref(&fence);
out_prevalid:
@@ -853,7 +813,7 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (nv_device(drm->device)->chipset >= 0x25) {
+ if (drm->device.info.chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
@@ -886,17 +846,29 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
- int ret = -EINVAL;
+ bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
+ int ret;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -ENOENT;
nvbo = nouveau_gem_object(gem);
- spin_lock(&nvbo->bo.bdev->fence_lock);
- ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
+ if (no_wait)
+ ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
+ else {
+ long lret;
+
+ lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
+ if (!lret)
+ ret = -EBUSY;
+ else if (lret > 0)
+ ret = 0;
+ else
+ ret = lret;
+ }
drm_gem_object_unreference_unlocked(gem);
+
return ret;
}