diff options
author | Rebecca Schultz Zavin <rebecca@android.com> | 2012-05-07 16:06:32 -0700 |
---|---|---|
committer | Varun Wadekar <vwadekar@nvidia.com> | 2012-07-18 14:40:09 +0530 |
commit | 72fd968a507c1eb7e5a033501794a10872374f42 (patch) | |
tree | 99d8364325407806e02b00c8e4fc7f17296116e5 /drivers/gpu | |
parent | 242fe3b623f94bc7d62c8dba0271d5d7ae91bbe3 (diff) |
gpu: ion: Allocate the sg_table at creation time rather than dynamically
Rather than calling map_dma on the allocations dynamically, this patch
switches to creating the sg_table at the time the buffer is created.
This is necessary because in future updates the sg_table will be used
for cache maintenance.
Change-Id: I49aac7c6d3a5afc440d18b917ae0e73be5d3f56d
Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/ion/ion.c | 79 |
1 files changed, 41 insertions, 38 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c index 534b37620d0e..cc57ed383d7d 100644 --- a/drivers/gpu/ion/ion.c +++ b/drivers/gpu/ion/ion.c @@ -137,6 +137,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, unsigned long flags) { struct ion_buffer *buffer; + struct sg_table *table; int ret; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); @@ -151,6 +152,15 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, kfree(buffer); return ERR_PTR(ret); } + + table = buffer->heap->ops->map_dma(buffer->heap, buffer); + if (IS_ERR_OR_NULL(table)) { + heap->ops->free(buffer); + kfree(buffer); + return ERR_PTR(PTR_ERR(table)); + } + buffer->sg_table = table; + buffer->dev = dev; buffer->size = len; mutex_init(&buffer->lock); @@ -166,9 +176,7 @@ static void ion_buffer_destroy(struct kref *kref) if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - if (WARN_ON(buffer->dmap_cnt > 0)) - buffer->heap->ops->unmap_dma(buffer->heap, buffer); - + buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->heap->ops->free(buffer); mutex_lock(&dev->lock); rb_erase(&buffer->node, &dev->buffers); @@ -350,6 +358,7 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, mutex_unlock(&client->lock); } + return handle; } @@ -635,53 +644,42 @@ void ion_client_destroy(struct ion_client *client) kfree(client); } -static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction direction) +struct sg_table *ion_map_dma(struct ion_client *client, + struct ion_handle *handle) { - struct dma_buf *dmabuf = attachment->dmabuf; - struct ion_buffer *buffer = dmabuf->priv; + struct ion_buffer *buffer; struct sg_table *table; - mutex_lock(&buffer->lock); - - if (!buffer->heap->ops->map_dma) { - pr_err("%s: map_dma is not implemented by this heap.\n", + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", __func__); - mutex_unlock(&buffer->lock); - return ERR_PTR(-ENODEV); - } - /* if an sg list already exists for this buffer just return it */ - if (buffer->dmap_cnt) { - table = buffer->sg_table; - goto end; + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); } - - /* otherwise call into the heap to create one */ - table = buffer->heap->ops->map_dma(buffer->heap, buffer); - if (IS_ERR_OR_NULL(table)) - goto err; - buffer->sg_table = table; -end: - buffer->dmap_cnt++; -err: - mutex_unlock(&buffer->lock); + buffer = handle->buffer; + table = buffer->sg_table; + mutex_unlock(&client->lock); return table; } -static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, - struct sg_table *table, - enum dma_data_direction direction) +void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle) +{ +} + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; - mutex_lock(&buffer->lock); - buffer->dmap_cnt--; - if (!buffer->dmap_cnt) { - buffer->heap->ops->unmap_dma(buffer->heap, buffer); - buffer->sg_table = NULL; - } - mutex_unlock(&buffer->lock); + return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ } static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) @@ -1015,6 +1013,11 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) struct rb_node *parent = NULL; struct ion_heap *entry; + if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || + !heap->ops->unmap_dma) + pr_err("%s: can not add heap with invalid ops struct.\n", + __func__); + heap->dev = dev; mutex_lock(&dev->lock); while (*p) { |