diff options
author | Andrew F. Davis <afd@ti.com> | 2022-08-21 01:12:11 +0000 |
---|---|---|
committer | Praneeth Bajjuri <praneeth@ti.com> | 2022-09-07 18:17:48 -0500 |
commit | 2b3eb01afb523f6ea2229596a2faf7e25b70b44a (patch) | |
tree | 370569796a3f59c35fab58efedda1a824a437c67 | |
parent | f9344bef79dc600e7c3651e4f4a3c5b196614f87 (diff) |
dma-buf: heaps: carveout: Add DMA sync ops for cached buffers
When originally adding support for cached carveout buffers we made some
assumptions about the devices this would run on, most important here is
that devices with buffer attachable DMA accelerators would all be K3
devices with MSMC coherency support.
With the introduction of AM62a this does not hold true. Lets do this
the right way here and add the needed sync operation handlers in the
caching path.
Fixes: 5b692a0c8157 ("HACK: dma-buf: heaps: carveout: Add support for cached carveout heaps")
Signed-off-by: Andrew Davis <afd@ti.com>
-rw-r--r-- | drivers/dma-buf/heaps/carveout-heap.c | 65 |
1 files changed, 61 insertions, 4 deletions
diff --git a/drivers/dma-buf/heaps/carveout-heap.c b/drivers/dma-buf/heaps/carveout-heap.c index 5947e8eda958..c41631f6017a 100644 --- a/drivers/dma-buf/heaps/carveout-heap.c +++ b/drivers/dma-buf/heaps/carveout-heap.c @@ -2,8 +2,8 @@ /* * Carveout DMA-Heap userspace exporter * - * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ - * Andrew F. Davis <afd@ti.com> + * Copyright (C) 2019-2022 Texas Instruments Incorporated - https://www.ti.com/ + * Andrew Davis <afd@ti.com> */ #include <linux/dma-mapping.h> @@ -13,6 +13,7 @@ #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> +#include <linux/highmem.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> @@ -93,11 +94,14 @@ static void dma_heap_detatch(struct dma_buf *dmabuf, static struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { + struct carveout_dma_heap_buffer *buffer = attachment->dmabuf->priv; struct dma_heap_attachment *a = attachment->priv; struct sg_table *table = a->table; + unsigned long attrs = buffer->cached ? 0 : DMA_ATTR_SKIP_CPU_SYNC; + if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents, - direction, DMA_ATTR_SKIP_CPU_SYNC)) + direction, attrs)) return ERR_PTR(-ENOMEM); return table; @@ -107,8 +111,11 @@ static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { + struct carveout_dma_heap_buffer *buffer = attachment->dmabuf->priv; + unsigned long attrs = buffer->cached ? 0 : DMA_ATTR_SKIP_CPU_SYNC; + dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, - direction, DMA_ATTR_SKIP_CPU_SYNC); + direction, attrs); } static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) @@ -124,6 +131,54 @@ static void dma_heap_dma_buf_release(struct dma_buf *dmabuf) kfree(buffer); } +static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct carveout_dma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (!buffer->cached) + return 0; + + mutex_lock(&buffer->vmap_lock); + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + mutex_unlock(&buffer->vmap_lock); + + mutex_lock(&buffer->attachments_lock); + list_for_each_entry(a, &buffer->attachments, list) { + dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, + direction); + } + mutex_unlock(&buffer->attachments_lock); + + return 0; +} + +static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct carveout_dma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (!buffer->cached) + return 0; + + mutex_lock(&buffer->vmap_lock); + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + mutex_unlock(&buffer->vmap_lock); + + mutex_lock(&buffer->attachments_lock); + list_for_each_entry(a, &buffer->attachments, list) { + dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, + direction); + } + mutex_unlock(&buffer->attachments_lock); + + return 0; +} + static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct carveout_dma_heap_buffer *buffer = dmabuf->priv; @@ -187,6 +242,8 @@ static const struct dma_buf_ops carveout_dma_heap_buf_ops = { .map_dma_buf = dma_heap_map_dma_buf, .unmap_dma_buf = dma_heap_unmap_dma_buf, .release = dma_heap_dma_buf_release, + .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = dma_heap_dma_buf_end_cpu_access, .mmap = dma_heap_mmap, .vmap = dma_heap_vmap, .vunmap = dma_heap_vunmap, |