summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-08-11 22:31:17 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2015-09-13 09:07:42 -0700
commit3f52099b9579b39c8e6bfbbd1a92edc303c6d9fd (patch)
treee7983830a1317d8d46e86ed95ab2227b5675b87f /drivers/gpu
parent39d202573c4d5ec3b3a3930b31dc0c621e771414 (diff)
drm/vmwgfx: Fix execbuf locking issues
commit 3e04e2fe6d87807d27521ad6ebb9e7919d628f25 upstream. This addresses two issues that cause problems with viewperf maya-03 in situation with memory pressure. The first issue causes attempts to unreserve buffers if batched reservation fails due to, for example, a signal pending. While previously the ttm_eu api was resistant against this type of error, it is no longer and the lockdep code will complain about attempting to unreserve buffers that are not reserved. The issue is resolved by avoid calling ttm_eu_backoff_reservation in the buffer reserve error path. The second issue is that the binding_mutex may be held when user-space fence objects are created and hence during memory reclaims. This may cause recursive attempts to grab the binding mutex. The issue is resolved by not holding the binding mutex across fence creation and submission. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
true, NULL);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_fifo_commit(dev_priv, command_size);
+ mutex_unlock(&dev_priv->binding_mutex);
vmw_query_bo_switch_commit(dev_priv, sw_context);
ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
DRM_ERROR("Fence submission error. Syncing.\n");
vmw_resource_list_unreserve(&sw_context->resource_list, false);
- mutex_unlock(&dev_priv->binding_mutex);
ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
(void *) fence);