summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-16 08:41:56 +1000
committerBen Skeggs <bskeggs@redhat.com>2011-02-25 06:45:34 +1000
commitd550c41e4ff11fe69b5f92868157253d27937d1f (patch)
tree5c51d494f497d6cdffc822964bfeca0ba310ac3d /drivers/gpu/drm/nouveau/nouveau_bo.c
parent2503c6fa3edf7c2bb001c7f7926786eed24cc06e (diff)
drm/nouveau: remove no_vm/mappable flags from nouveau_bo
'mappable' isn't really used at all, nor is it necessary anymore as the bo code is capable of moving buffers to mappable vram as required. 'no_vm' isn't necessary anymore either, any places that don't want to be mapped into a GPU address space should allocate the VRAM directly instead. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c62
1 files changed, 24 insertions, 38 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index bf260af18b31..897c55509a6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -98,8 +98,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
- uint32_t tile_flags, bool no_vm, bool mappable,
- struct nouveau_bo **pnvbo)
+ uint32_t tile_flags, struct nouveau_bo **pnvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
@@ -110,8 +109,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
return -ENOMEM;
INIT_LIST_HEAD(&nvbo->head);
INIT_LIST_HEAD(&nvbo->entry);
- nvbo->mappable = mappable;
- nvbo->no_vm = no_vm;
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
@@ -119,7 +116,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
align >>= PAGE_SHIFT;
- if (!nvbo->no_vm && dev_priv->chan_vm) {
+ if (dev_priv->chan_vm) {
ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
@@ -504,14 +501,6 @@ static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
- struct nouveau_bo *nvbo = nouveau_bo(bo);
-
- if (nvbo->no_vm) {
- if (mem->mem_type == TTM_PL_TT)
- return NvDmaGART;
- return NvDmaVRAM;
- }
-
if (mem->mem_type == TTM_PL_TT)
return chan->gart_handle;
return chan->vram_handle;
@@ -523,22 +512,21 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 src_offset = old_mem->start << PAGE_SHIFT;
- u64 dst_offset = new_mem->start << PAGE_SHIFT;
u32 page_count = new_mem->num_pages;
+ u64 src_offset, dst_offset;
int ret;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
+ src_offset = old_mem->start << PAGE_SHIFT;
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ src_offset = nvbo->vma.offset;
+ else
+ src_offset += dev_priv->gart_info.aper_base;
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
+ dst_offset = new_mem->start << PAGE_SHIFT;
+ if (new_mem->mem_type == TTM_PL_VRAM)
+ dst_offset = nvbo->vma.offset;
+ else
+ dst_offset += dev_priv->gart_info.aper_base;
page_count = new_mem->num_pages;
while (page_count) {
@@ -580,18 +568,16 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
int ret;
src_offset = old_mem->start << PAGE_SHIFT;
- dst_offset = new_mem->start << PAGE_SHIFT;
- if (!nvbo->no_vm) {
- if (old_mem->mem_type == TTM_PL_VRAM)
- src_offset = nvbo->vma.offset;
- else
- src_offset += dev_priv->gart_info.aper_base;
+ if (old_mem->mem_type == TTM_PL_VRAM)
+ src_offset = nvbo->vma.offset;
+ else
+ src_offset += dev_priv->gart_info.aper_base;
- if (new_mem->mem_type == TTM_PL_VRAM)
- dst_offset = nvbo->vma.offset;
- else
- dst_offset += dev_priv->gart_info.aper_base;
- }
+ dst_offset = new_mem->start << PAGE_SHIFT;
+ if (new_mem->mem_type == TTM_PL_VRAM)
+ dst_offset = nvbo->vma.offset;
+ else
+ dst_offset += dev_priv->gart_info.aper_base;
ret = RING_SPACE(chan, 3);
if (ret)
@@ -737,7 +723,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int ret;
chan = nvbo->channel;
- if (!chan || nvbo->no_vm) {
+ if (!chan) {
chan = dev_priv->channel;
mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
}
@@ -836,7 +822,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
struct nouveau_bo *nvbo = nouveau_bo(bo);
uint64_t offset;
- if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
+ if (new_mem->mem_type != TTM_PL_VRAM) {
/* Nothing to do. */
*new_tile = NULL;
return 0;