summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/common/dmabounce.c4
-rw-r--r--arch/arm/mm/consistent.c4
-rw-r--r--include/asm-arm/dma-mapping.h14
3 files changed, 11 insertions, 11 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index b36b1e8a105d..44ab0dad4035 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -263,7 +263,7 @@ map_single(struct device *dev, void *ptr, size_t size,
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
- consistent_sync(ptr, size, dir);
+ dma_cache_maint(ptr, size, dir);
}
return dma_addr;
@@ -383,7 +383,7 @@ sync_single(struct device *dev, dma_addr_t dma_addr, size_t size,
* via the coherent allocators.
*/
} else {
- consistent_sync(dma_to_virt(dev, dma_addr), size, dir);
+ dma_cache_maint(dma_to_virt(dev, dma_addr), size, dir);
}
}
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 1f9f94f9af4b..cefdf2f9f26e 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -481,7 +481,7 @@ core_initcall(consistent_init);
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-void consistent_sync(const void *start, size_t size, int direction)
+void dma_cache_maint(const void *start, size_t size, int direction)
{
const void *end = start + size;
@@ -504,4 +504,4 @@ void consistent_sync(const void *start, size_t size, int direction)
BUG();
}
}
-EXPORT_SYMBOL(consistent_sync);
+EXPORT_SYMBOL(dma_cache_maint);
diff --git a/include/asm-arm/dma-mapping.h b/include/asm-arm/dma-mapping.h
index c8b5d0db0cf0..678134bf2475 100644
--- a/include/asm-arm/dma-mapping.h
+++ b/include/asm-arm/dma-mapping.h
@@ -17,7 +17,7 @@
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-extern void consistent_sync(const void *kaddr, size_t size, int rw);
+extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
/*
* Return whether the given device DMA address mask can be supported
@@ -165,7 +165,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir)
{
if (!arch_is_coherent())
- consistent_sync(cpu_addr, size, dir);
+ dma_cache_maint(cpu_addr, size, dir);
return virt_to_dma(dev, (unsigned long)cpu_addr);
}
@@ -278,7 +278,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
virt = page_address(sg->page) + sg->offset;
if (!arch_is_coherent())
- consistent_sync(virt, sg->length, dir);
+ dma_cache_maint(virt, sg->length, dir);
}
return nents;
@@ -334,7 +334,7 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (!arch_is_coherent())
- consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
}
static inline void
@@ -342,7 +342,7 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir)
{
if (!arch_is_coherent())
- consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
+ dma_cache_maint((void *)dma_to_virt(dev, handle), size, dir);
}
#else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
@@ -373,7 +373,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
if (!arch_is_coherent())
- consistent_sync(virt, sg->length, dir);
+ dma_cache_maint(virt, sg->length, dir);
}
}
@@ -386,7 +386,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset;
if (!arch_is_coherent())
- consistent_sync(virt, sg->length, dir);
+ dma_cache_maint(virt, sg->length, dir);
}
}
#else