diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-19 01:40:46 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-19 01:48:52 +1100 |
commit | 78b09735a2f42f32c4611d92ea51755e1faae385 (patch) | |
tree | 7123a78093d45454aff1350e95457b129383366d /include | |
parent | 78baa2f8ad53968ff82ad9827b7793b3f46cba0e (diff) |
powerpc: merge dma-mapping.h
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-powerpc/dma-mapping.h (renamed from include/asm-ppc/dma-mapping.h) | 128 | ||||
-rw-r--r-- | include/asm-ppc64/dma-mapping.h | 136 |
2 files changed, 93 insertions, 171 deletions
diff --git a/include/asm-ppc/dma-mapping.h b/include/asm-powerpc/dma-mapping.h index 798602620e87..59a80163f75f 100644 --- a/include/asm-ppc/dma-mapping.h +++ b/include/asm-powerpc/dma-mapping.h @@ -1,15 +1,22 @@ /* - * This is based on both include/asm-sh/dma-mapping.h and - * include/asm-ppc/pci.h + * Copyright (C) 2004 IBM + * + * Implements the generic device dma API for powerpc. + * the pci and vio busses */ -#ifndef __ASM_PPC_DMA_MAPPING_H -#define __ASM_PPC_DMA_MAPPING_H +#ifndef _ASM_DMA_MAPPING_H +#define _ASM_DMA_MAPPING_H #include <linux/config.h> +#include <linux/types.h> +#include <linux/cache.h> /* need struct page definitions */ #include <linux/mm.h> #include <asm/scatterlist.h> #include <asm/io.h> +#include <asm/bug.h> + +#define DMA_ERROR_CODE (~(dma_addr_t)0x0) #ifdef CONFIG_NOT_COHERENT_CACHE /* @@ -37,6 +44,30 @@ extern void __dma_sync_page(struct page *page, unsigned long offset, #endif /* ! CONFIG_NOT_COHERENT_CACHE */ +#ifdef CONFIG_PPC64 + +extern int dma_supported(struct device *dev, u64 mask); +extern int dma_set_mask(struct device *dev, u64 dma_mask); +extern void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); +extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t dma_handle); +extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, + size_t size, enum dma_data_direction direction); +extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction); +extern dma_addr_t dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction); +extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, + size_t size, enum dma_data_direction direction); +extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); +extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nhwentries, enum dma_data_direction direction); + +#else /* CONFIG_PPC64 */ + #define dma_supported(dev, mask) (1) static inline int dma_set_mask(struct device *dev, u64 dma_mask) @@ -134,29 +165,27 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, /* We don't do anything here. */ #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) +#endif /* CONFIG_PPC64 */ + +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); - __dma_sync(bus_to_virt(dma_handle), size, direction); } -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, - size_t size, - enum dma_data_direction direction) +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction) { BUG_ON(direction == DMA_NONE); - __dma_sync(bus_to_virt(dma_handle), size, direction); } -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) +static inline void dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) { int i; @@ -166,9 +195,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, __dma_sync_page(sg->page, sg->offset, sg->length, direction); } -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction) +static inline void dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) { int i; @@ -178,6 +207,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, __dma_sync_page(sg->page, sg->offset, sg->length, direction); } +static inline int dma_mapping_error(dma_addr_t dma_addr) +{ +#ifdef CONFIG_PPC64 + return (dma_addr == DMA_ERROR_CODE); +#else + return 0; +#endif +} + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #ifdef CONFIG_NOT_COHERENT_CACHE @@ -188,40 +226,60 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, static inline int dma_get_cache_alignment(void) { +#ifdef CONFIG_PPC64 + /* no easy way to get cache size on all processors, so return + * the maximum possible, to be safe */ + return (1 << L1_CACHE_SHIFT_MAX); +#else /* * Each processor family will define its own L1_CACHE_SHIFT, * L1_CACHE_BYTES wraps to this, so this is always safe. */ return L1_CACHE_BYTES; +#endif } -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) { /* just sync everything for now */ dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction); } -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) +static inline void dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, unsigned long offset, size_t size, + enum dma_data_direction direction) { /* just sync everything for now */ dma_sync_single_for_device(dev, dma_handle, offset + size, direction); } static inline void dma_cache_sync(void *vaddr, size_t size, - enum dma_data_direction direction) + enum dma_data_direction direction) { + BUG_ON(direction == DMA_NONE); __dma_sync(vaddr, size, (int)direction); } -static inline int dma_mapping_error(dma_addr_t dma_addr) -{ - return 0; -} - -#endif /* __ASM_PPC_DMA_MAPPING_H */ +/* + * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO + */ +struct dma_mapping_ops { + void * (*alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + void (*free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); + dma_addr_t (*map_single)(struct device *dev, void *ptr, + size_t size, enum dma_data_direction direction); + void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction direction); + int (*map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + void (*unmap_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction direction); + int (*dma_supported)(struct device *dev, u64 mask); + int (*dac_dma_supported)(struct device *dev, u64 mask); +}; + +#endif /* _ASM_DMA_MAPPING_H */ diff --git a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h deleted file mode 100644 index fb68fa23bea8..000000000000 --- a/include/asm-ppc64/dma-mapping.h +++ /dev/null @@ -1,136 +0,0 @@ -/* Copyright (C) 2004 IBM - * - * Implements the generic device dma API for ppc64. Handles - * the pci and vio busses - */ - -#ifndef _ASM_DMA_MAPPING_H -#define _ASM_DMA_MAPPING_H - -#include <linux/types.h> -#include <linux/cache.h> -/* need struct page definitions */ -#include <linux/mm.h> -#include <asm/scatterlist.h> -#include <asm/bug.h> - -#define DMA_ERROR_CODE (~(dma_addr_t)0x0) - -extern int dma_supported(struct device *dev, u64 mask); -extern int dma_set_mask(struct device *dev, u64 dma_mask); -extern void *dma_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); -extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle); -extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction direction); -extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction); -extern dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction direction); -extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, - size_t size, enum dma_data_direction direction); -extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction direction); -extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, - int nhwentries, enum dma_data_direction direction); - -static inline void -dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline int dma_mapping_error(dma_addr_t dma_addr) -{ - return (dma_addr == DMA_ERROR_CODE); -} - -/* Now for the API extensions over the pci_ one */ - -#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) -#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -#define dma_is_consistent(d) (1) - -static inline int -dma_get_cache_alignment(void) -{ - /* no easy way to get cache size on all processors, so return - * the maximum possible, to be safe */ - return (1 << L1_CACHE_SHIFT_MAX); -} - -static inline void -dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, - unsigned long offset, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -static inline void -dma_cache_sync(void *vaddr, size_t size, - enum dma_data_direction direction) -{ - BUG_ON(direction == DMA_NONE); - /* nothing to do */ -} - -/* - * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO - */ -struct dma_mapping_ops { - void * (*alloc_coherent)(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag); - void (*free_coherent)(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_handle); - dma_addr_t (*map_single)(struct device *dev, void *ptr, - size_t size, enum dma_data_direction direction); - void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, - size_t size, enum dma_data_direction direction); - int (*map_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - void (*unmap_sg)(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction direction); - int (*dma_supported)(struct device *dev, u64 mask); - int (*dac_dma_supported)(struct device *dev, u64 mask); -}; - -#endif /* _ASM_DMA_MAPPING_H */ |