summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c134
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c9
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h37
7 files changed, 120 insertions, 71 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 748af58b0cea..d2935b4fd695 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1800,7 +1800,7 @@ i915_drop_caches_set(void *data, u64 val)
if (obj->pin_count)
continue;
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_ggtt_unbind(obj);
if (ret)
goto unlock;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4f93467fdfdd..8205b4b4f2be 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1737,7 +1737,8 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4ca8f9fad913..db9792c47827 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,10 +38,12 @@
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking);
+static __must_check int
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -1692,7 +1694,6 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
- struct i915_address_space *vm = &dev_priv->gtt.base;
long count = 0;
list_for_each_entry_safe(obj, next,
@@ -1706,13 +1707,16 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
+ global_list) {
+ struct i915_vma *vma, *v;
if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
continue;
- if (i915_gem_object_unbind(obj))
- continue;
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
if (!i915_gem_object_put_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
@@ -2596,17 +2600,13 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
-/**
- * Unbinds an object from the GTT aperture.
- */
-int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_vma_unbind(struct i915_vma *vma)
{
+ struct drm_i915_gem_object *obj = vma->obj;
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *vma;
int ret;
- if (!i915_gem_obj_ggtt_bound(obj))
+ if (list_empty(&vma->vma_link))
return 0;
if (obj->pin_count)
@@ -2629,7 +2629,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- trace_i915_gem_object_unbind(obj);
+ trace_i915_vma_unbind(vma);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
@@ -2644,7 +2644,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
list_del(&vma->vma_link);
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -2659,6 +2658,26 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return 0;
}
+/**
+ * Unbinds an object from the global GTT aperture.
+ */
+int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
+
+ if (!i915_gem_obj_ggtt_bound(obj));
+ return 0;
+
+ if (obj->pin_count)
+ return -EBUSY;
+
+ BUG_ON(obj->pages == NULL);
+
+ return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
+}
+
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3076,18 +3095,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking)
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
- size_t gtt_max = map_and_fenceable ?
- dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ size_t gtt_max =
+ map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -3132,15 +3151,18 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_vma_create(obj, &dev_priv->gtt.base);
+ /* FIXME: For now we only ever use 1 VMA per object */
+ BUG_ON(!i915_is_ggtt(vm));
+ WARN_ON(!list_empty(&obj->vma_list));
+
+ vma = i915_gem_vma_create(obj, vm);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err_unpin;
}
search_free:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
- &vma->node,
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, gtt_max);
if (ret) {
@@ -3165,18 +3187,25 @@ search_free:
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &vm->inactive_list);
- list_add(&vma->vma_link, &obj->vma_list);
+
+ /* Keep GGTT vmas first to make debug easier */
+ if (i915_is_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
fenceable =
+ i915_is_ggtt(vm) &&
i915_gem_obj_ggtt_size(obj) == fence_size &&
(i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
- mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
- dev_priv->gtt.mappable_end;
+ mappable =
+ i915_is_ggtt(vm) &&
+ vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
- trace_i915_gem_object_bind(obj, map_and_fenceable);
+ trace_i915_vma_bind(vma, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
@@ -3345,7 +3374,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link) {
if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(vma);
if (ret)
return ret;
@@ -3653,33 +3682,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
bool map_and_fenceable,
bool nonblocking)
{
+ struct i915_vma *vma;
int ret;
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (i915_gem_obj_ggtt_bound(obj)) {
- if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
+ WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+
+ if (vma) {
+ if ((alignment &&
+ vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_ggtt_offset(obj), alignment,
+ i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
}
- if (!i915_gem_obj_ggtt_bound(obj)) {
+ if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable,
- nonblocking);
+ ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
@@ -3975,6 +4010,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
trace_i915_gem_object_destroy(obj);
@@ -3982,15 +4018,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
- if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
- bool was_interruptible;
+ /* NB: 0 or 1 elements */
+ WARN_ON(!list_empty(&obj->vma_list) &&
+ !list_is_singular(&obj->vma_list));
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ if (WARN_ON(ret == -ERESTARTSYS)) {
+ bool was_interruptible;
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
- WARN_ON(i915_gem_object_unbind(obj));
+ WARN_ON(i915_vma_unbind(vma));
- dev_priv->mm.interruptible = was_interruptible;
+ dev_priv->mm.interruptible = was_interruptible;
+ }
}
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 33d85a4447a6..9205a4179b7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -147,7 +147,7 @@ found:
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_ggtt_unbind(obj);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
@@ -185,7 +185,7 @@ i915_gem_evict_everything(struct drm_device *dev)
/* Having flushed everything, unbind() should never raise an error */
list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
+ WARN_ON(i915_gem_object_ggtt_unbind(obj));
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9939d2ef3ea9..17be2e4bae6b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -556,7 +556,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if ((entry->alignment &&
obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 92a8d279ca39..032e9ef9c896 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -360,17 +360,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
!i915_gem_obj_ggtt_bound(obj) ||
- (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
+ (i915_gem_obj_ggtt_offset(obj) +
+ obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
- u32 unfenced_alignment =
+ u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
- ret = i915_gem_object_unbind(obj);
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
+ ret = i915_gem_object_ggtt_unbind(obj);
}
if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 2933e2ffeaa4..e2c5ee6f6194 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
-TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
- TP_ARGS(obj, mappable),
+TRACE_EVENT(i915_vma_bind,
+ TP_PROTO(struct i915_vma *vma, bool mappable),
+ TP_ARGS(vma, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->offset = i915_gem_obj_ggtt_offset(obj);
- __entry->size = i915_gem_obj_ggtt_size(obj);
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
__entry->mappable = mappable;
),
- TP_printk("obj=%p, offset=%08x size=%x%s",
+ TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
- __entry->mappable ? ", mappable" : "")
+ __entry->mappable ? ", mappable" : "",
+ __entry->vm)
);
-TRACE_EVENT(i915_gem_object_unbind,
- TP_PROTO(struct drm_i915_gem_object *obj),
- TP_ARGS(obj),
+TRACE_EVENT(i915_vma_unbind,
+ TP_PROTO(struct i915_vma *vma),
+ TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->offset = i915_gem_obj_ggtt_offset(obj);
- __entry->size = i915_gem_obj_ggtt_size(obj);
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%08x size=%x",
- __entry->obj, __entry->offset, __entry->size)
+ TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ __entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,