diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 302 |
1 files changed, 210 insertions, 92 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 818576654092..37427e4016cb 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,10 +34,6 @@ #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); @@ -607,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) case -EAGAIN: return VM_FAULT_OOM; case -EFAULT: - case -EBUSY: - DRM_ERROR("can't insert pfn?? fault or busy...\n"); return VM_FAULT_SIGBUS; default: return VM_FAULT_NOPAGE; @@ -684,6 +678,30 @@ out_free_list: return ret; } +static void +i915_gem_free_mmap_offset(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_gem_mm *mm = dev->mm_private; + struct drm_map_list *list; + + list = &obj->map_list; + drm_ht_remove_item(&mm->offset_hash, &list->hash); + + if (list->file_offset_node) { + drm_mm_put_block(list->file_offset_node); + list->file_offset_node = NULL; + } + + if (list->map) { + drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER); + list->map = NULL; + } + + obj_priv->mmap_offset = 0; +} + /** * i915_gem_get_gtt_alignment - return required GTT alignment for an object * @obj: object to check @@ -758,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, if (!obj_priv->mmap_offset) { ret = i915_gem_create_mmap_offset(obj); - if (ret) + if (ret) { + drm_gem_object_unreference(obj); + mutex_unlock(&dev->struct_mutex); return ret; + } } args->offset = obj_priv->mmap_offset; @@ -1030,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + if (!dev_priv->hw_status_page) + return; + seqno = i915_get_gem_seqno(dev); while (!list_empty(&dev_priv->mm.request_list)) { @@ -1452,7 +1476,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) struct drm_i915_gem_object *obj_priv = obj->driver_private; int regnum = obj_priv->fence_reg; int tile_width; - uint32_t val; + uint32_t fence_reg, val; uint32_t pitch_val; if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || @@ -1479,7 +1503,11 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= I830_FENCE_REG_VALID; - I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); + if (regnum < 8) + fence_reg = FENCE_REG_830_0 + (regnum * 4); + else + fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); + I915_WRITE(fence_reg, val); } static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) @@ -1533,7 +1561,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_fence_reg *reg = NULL; - int i, ret; + struct drm_i915_gem_object *old_obj_priv = NULL; + int i, ret, avail; switch (obj_priv->tiling_mode) { case I915_TILING_NONE: @@ -1556,25 +1585,46 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write) } /* First try to find a free reg */ +try_again: + avail = 0; for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { reg = &dev_priv->fence_regs[i]; if (!reg->obj) break; + + old_obj_priv = reg->obj->driver_private; + if (!old_obj_priv->pin_count) + avail++; } /* None available, try to steal one or wait for a user to finish */ if (i == dev_priv->num_fence_regs) { - struct drm_i915_gem_object *old_obj_priv = NULL; + uint32_t seqno = dev_priv->mm.next_gem_seqno; loff_t offset; -try_again: - /* Could try to use LRU here instead... */ + if (avail == 0) + return -ENOMEM; + for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + uint32_t this_seqno; + reg = &dev_priv->fence_regs[i]; old_obj_priv = reg->obj->driver_private; - if (!old_obj_priv->pin_count) + + if (old_obj_priv->pin_count) + continue; + + /* i915 uses fences for GPU access to tiled buffers */ + if (IS_I965G(dev) || !old_obj_priv->active) break; + + /* find the seqno of the first available fence */ + this_seqno = old_obj_priv->last_rendering_seqno; + if (this_seqno != 0 && + reg->obj->write_domain == 0 && + i915_seqno_passed(seqno, this_seqno)) + seqno = this_seqno; } /* @@ -1582,15 +1632,25 @@ try_again: * objects to finish before trying again. */ if (i == dev_priv->num_fence_regs) { - ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0); - if (ret) { - WARN(ret != -ERESTARTSYS, - "switch to GTT domain failed: %d\n", ret); - return ret; + if (seqno == dev_priv->mm.next_gem_seqno) { + i915_gem_flush(dev, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); + seqno = i915_add_request(dev, + I915_GEM_GPU_DOMAINS); + if (seqno == 0) + return -ENOMEM; } + + ret = i915_wait_request(dev, seqno); + if (ret) + return ret; goto try_again; } + BUG_ON(old_obj_priv->active || + (reg->obj->write_domain & I915_GEM_GPU_DOMAINS)); + /* * Zap this virtual mapping so we can set up a fence again * for this object next time we need it. @@ -1631,8 +1691,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) if (IS_I965G(dev)) I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); - else - I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0); + else { + uint32_t fence_reg; + + if (obj_priv->fence_reg < 8) + fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + else + fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - + 8) * 4; + + I915_WRITE(fence_reg, 0); + } dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL; obj_priv->fence_reg = I915_FENCE_REG_NONE; @@ -1996,30 +2065,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", __func__, obj, - obj->read_domains, read_domains, - obj->write_domain, write_domain); + obj->read_domains, obj->pending_read_domains, + obj->write_domain, obj->pending_write_domain); #endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (write_domain == 0) - read_domains |= obj->read_domains; + if (obj->pending_write_domain == 0) + obj->pending_read_domains |= obj->read_domains; else obj_priv->dirty = 1; @@ -2029,15 +2096,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && obj->write_domain != read_domains) { + if (obj->write_domain && + obj->write_domain != obj->pending_read_domains) { flush_domains |= obj->write_domain; - invalidate_domains |= read_domains & ~obj->write_domain; + invalidate_domains |= + obj->pending_read_domains & ~obj->write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= read_domains & ~obj->read_domains; + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { #if WATCH_BUF DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", @@ -2046,9 +2115,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, i915_gem_clflush_object(obj); } - if ((write_domain | flush_domains) != 0) - obj->write_domain = write_domain; - obj->read_domains = read_domains; + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->pending_write_domain == 0) + obj->pending_write_domain = obj->write_domain; + obj->read_domains = obj->pending_read_domains; dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; @@ -2251,6 +2326,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc.offset, reloc.read_domains, reloc.write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } @@ -2437,6 +2514,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object *exec_list = NULL; struct drm_gem_object **object_list = NULL; struct drm_gem_object *batch_obj; + struct drm_i915_gem_object *obj_priv; int ret, i, pinned = 0; uint64_t exec_offset; uint32_t seqno, flush_domains; @@ -2480,13 +2558,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, if (dev_priv->mm.wedged) { DRM_ERROR("Execbuf while wedged\n"); mutex_unlock(&dev->struct_mutex); - return -EIO; + ret = -EIO; + goto pre_mutex_err; } if (dev_priv->mm.suspended) { DRM_ERROR("Execbuf while VT-switched.\n"); mutex_unlock(&dev->struct_mutex); - return -EBUSY; + ret = -EBUSY; + goto pre_mutex_err; } /* Look up object handles */ @@ -2499,6 +2579,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = -EBADF; goto err; } + + obj_priv = object_list[i]->driver_private; + if (obj_priv->in_execbuffer) { + DRM_ERROR("Object %p appears more than once in object list\n", + object_list[i]); + ret = -EBADF; + goto err; + } + obj_priv->in_execbuffer = true; } /* Pin and relocate */ @@ -2554,9 +2643,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); + i915_gem_object_set_to_gpu_domain(obj); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -2575,6 +2662,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, (void)i915_add_request(dev, dev->flush_domains); } + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + + obj->write_domain = obj->pending_write_domain; + } + i915_verify_inactive(dev, __FILE__, __LINE__); #if WATCH_COHERENCY @@ -2632,24 +2725,32 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, i915_verify_inactive(dev, __FILE__, __LINE__); - /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user((struct drm_i915_relocation_entry __user *) - (uintptr_t) args->buffers_ptr, - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) - DRM_ERROR("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); err: for (i = 0; i < pinned; i++) i915_gem_object_unpin(object_list[i]); - for (i = 0; i < args->buffer_count; i++) + for (i = 0; i < args->buffer_count; i++) { + if (object_list[i]) { + obj_priv = object_list[i]->driver_private; + obj_priv->in_execbuffer = false; + } drm_gem_object_unreference(object_list[i]); + } mutex_unlock(&dev->struct_mutex); + if (!ret) { + /* Copy the new buffer offsets back to the user's exec list. */ + ret = copy_to_user((struct drm_i915_relocation_entry __user *) + (uintptr_t) args->buffers_ptr, + exec_list, + sizeof(*exec_list) * args->buffer_count); + if (ret) + DRM_ERROR("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + } + pre_mutex_err: drm_free(object_list, sizeof(*object_list) * args->buffer_count, DRM_MEM_DRIVER); @@ -2671,17 +2772,24 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) ret = i915_gem_object_bind_to_gtt(obj, alignment); if (ret != 0) { if (ret != -EBUSY && ret != -ERESTARTSYS) - DRM_ERROR("Failure to bind: %d", ret); + DRM_ERROR("Failure to bind: %d\n", ret); + return ret; + } + } + /* + * Pre-965 chips need a fence register set up in order to + * properly handle tiled surfaces. + */ + if (!IS_I965G(dev) && + obj_priv->fence_reg == I915_FENCE_REG_NONE && + obj_priv->tiling_mode != I915_TILING_NONE) { + ret = i915_gem_object_get_fence_reg(obj, true); + if (ret != 0) { + if (ret != -EBUSY && ret != -ERESTARTSYS) + DRM_ERROR("Failure to install fence: %d\n", + ret); return ret; } - /* - * Pre-965 chips need a fence register set up in order to - * properly handle tiled surfaces. - */ - if (!IS_I965G(dev) && - obj_priv->fence_reg == I915_FENCE_REG_NONE && - obj_priv->tiling_mode != I915_TILING_NONE) - i915_gem_object_get_fence_reg(obj, true); } obj_priv->pin_count++; @@ -2753,6 +2861,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", args->handle); + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -2833,6 +2942,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return -EBADF; } + /* Update the active list for the hardware's current position. + * Otherwise this only updates on a delayed timer or when irqs are + * actually unmasked, and our working set ends up being larger than + * required. + */ + i915_gem_retire_requests(dev); + obj_priv = obj->driver_private; /* Don't count being on the flushing list against the object being * done. Otherwise, a buffer left on the flushing list but not getting @@ -2885,9 +3001,6 @@ int i915_gem_init_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - struct drm_gem_mm *mm = dev->mm_private; - struct drm_map_list *list; - struct drm_map *map; struct drm_i915_gem_object *obj_priv = obj->driver_private; while (obj_priv->pin_count > 0) @@ -2898,19 +3011,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_object_unbind(obj); - list = &obj->map_list; - drm_ht_remove_item(&mm->offset_hash, &list->hash); - - if (list->file_offset_node) { - drm_mm_put_block(list->file_offset_node); - list->file_offset_node = NULL; - } - - map = list->map; - if (map) { - drm_free(map, sizeof(*map), DRM_MEM_DRIVER); - list->map = NULL; - } + i915_gem_free_mmap_offset(obj); drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); @@ -2949,7 +3050,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) return 0; } -static int +int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -3095,6 +3196,7 @@ i915_gem_init_hws(struct drm_device *dev) if (dev_priv->hw_status_page == NULL) { DRM_ERROR("Failed to map status page.\n"); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); return -EINVAL; } @@ -3107,6 +3209,31 @@ i915_gem_init_hws(struct drm_device *dev) return 0; } +static void +i915_gem_cleanup_hws(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + if (dev_priv->hws_obj == NULL) + return; + + obj = dev_priv->hws_obj; + obj_priv = obj->driver_private; + + kunmap(obj_priv->page_list[0]); + i915_gem_object_unpin(obj); + drm_gem_object_unreference(obj); + dev_priv->hws_obj = NULL; + + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + dev_priv->hw_status_page = NULL; + + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); +} + int i915_gem_init_ringbuffer(struct drm_device *dev) { @@ -3124,6 +3251,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) obj = drm_gem_object_alloc(dev, 128 * 1024); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); + i915_gem_cleanup_hws(dev); return -ENOMEM; } obj_priv = obj->driver_private; @@ -3131,6 +3259,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev) ret = i915_gem_object_pin(obj, 4096); if (ret != 0) { drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return ret; } @@ -3148,7 +3277,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev) if (ring->map.handle == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); + i915_gem_cleanup_hws(dev); return -EINVAL; } ring->ring_obj = obj; @@ -3228,20 +3359,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) dev_priv->ring.ring_obj = NULL; memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); - if (dev_priv->hws_obj != NULL) { - struct drm_gem_object *obj = dev_priv->hws_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - kunmap(obj_priv->page_list[0]); - i915_gem_object_unpin(obj); - drm_gem_object_unreference(obj); - dev_priv->hws_obj = NULL; - memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); - dev_priv->hw_status_page = NULL; - - /* Write high address into HWS_PGA when disabling. */ - I915_WRITE(HWS_PGA, 0x1ffff000); - } + i915_gem_cleanup_hws(dev); } int @@ -3497,7 +3615,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; - DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); + DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); ret = copy_from_user(obj_addr, user_data, args->size); if (ret) return -EFAULT; |