summaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
authorFancy Fang <B47543@freescale.com>2013-12-25 18:04:56 +0800
committerNitin Garg <nitin.garg@freescale.com>2014-04-16 08:47:32 -0500
commit341d73a579f399dc1d583dfd669e14581524a4dd (patch)
tree1bf2bafeb8a25a2e97e53eef1c5e4ff8fa63ad00 /drivers/dma
parentf9d57e053215de61fc092eeeaac10b9dd0eca924 (diff)
ENGR00293323 PXP: add WC and cacheable dma buffer support for PXP device
This change add support for new dma buffer type(writecombine and cacheable) which allows user application has more choices for the buffer type. And if the dma buffer is cacheable, then add flush interfaces to make it cache coherent when necessary. Signed-off-by: Fancy Fang <B47543@freescale.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/pxp/pxp_device.c53
1 files changed, 52 insertions, 1 deletions
diff --git a/drivers/dma/pxp/pxp_device.c b/drivers/dma/pxp/pxp_device.c
index 1342ce64ab44..2b52ee1aad6e 100644
--- a/drivers/dma/pxp/pxp_device.c
+++ b/drivers/dma/pxp/pxp_device.c
@@ -447,7 +447,19 @@ static int pxp_device_mmap(struct file *file, struct vm_area_struct *vma)
(vma->vm_pgoff + vma_pages(vma)))
return -ENOMEM;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ switch (obj->mem_type) {
+ case MEMORY_TYPE_UNCACHED:
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ case MEMORY_TYPE_WC:
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ case MEMORY_TYPE_CACHED:
+ break;
+ default:
+ pr_err("%s: invalid memory type!\n", __func__);
+ return -EINVAL;
+ }
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
request_size, vma->vm_page_prot) ? -EAGAIN : 0;
@@ -579,6 +591,7 @@ static long pxp_device_ioctl(struct file *filp,
if (!obj)
return -ENOMEM;
obj->size = buffer.size;
+ obj->mem_type = buffer.mtype;
ret = pxp_alloc_dma_buffer(obj);
if (ret == -1) {
@@ -635,6 +648,44 @@ static long pxp_device_ioctl(struct file *filp,
break;
}
+ case PXP_IOC_FLUSH_PHYMEM:
+ {
+ int ret;
+ struct pxp_mem_flush flush;
+ struct pxp_buf_obj *obj;
+
+ ret = copy_from_user(&flush,
+ (struct pxp_mem_flush *)arg,
+ sizeof(struct pxp_mem_flush));
+ if (ret)
+ return -EACCES;
+
+ obj = pxp_buffer_object_lookup(file_priv, flush.handle);
+ if (!obj)
+ return -EINVAL;
+
+ switch (flush.type) {
+ case CACHE_CLEAN:
+ dma_sync_single_for_device(NULL, obj->offset,
+ obj->size, DMA_TO_DEVICE);
+ break;
+ case CACHE_INVALIDATE:
+ dma_sync_single_for_device(NULL, obj->offset,
+ obj->size, DMA_FROM_DEVICE);
+ break;
+ case CACHE_FLUSH:
+ dma_sync_single_for_device(NULL, obj->offset,
+ obj->size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(NULL, obj->offset,
+ obj->size, DMA_FROM_DEVICE);
+ break;
+ default:
+ pr_err("%s: invalid cache flush type\n", __func__);
+ return -EINVAL;
+ }
+
+ break;
+ }
case PXP_IOC_WAIT4CMPLT:
{
struct pxp_chan_handle chan_handle;