diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-04 17:47:44 +0100 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2018-12-01 18:07:16 +0100 |
commit | f04b951f6c7eccd85ea7750a5fafa68fb98d6bfa (patch) | |
tree | 4c371d7195aee8298ec34120951d3f28eec47ffe /arch/csky/mm | |
parent | 576d0d552be803b22867ed98a8619d68b1f78bbe (diff) |
csky: use the generic remapping dma alloc implementation
The csky code was largely copied from arm/arm64, so switch to the
generic arm64-based implementation instead.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Guo Ren <ren_guo@c-sky.com>
Diffstat (limited to 'arch/csky/mm')
-rw-r--r-- | arch/csky/mm/dma-mapping.c | 142 |
1 files changed, 2 insertions, 140 deletions
diff --git a/arch/csky/mm/dma-mapping.c b/arch/csky/mm/dma-mapping.c index ad4046939713..80783bb71c5c 100644 --- a/arch/csky/mm/dma-mapping.c +++ b/arch/csky/mm/dma-mapping.c @@ -14,73 +14,13 @@ #include <linux/version.h> #include <asm/cache.h> -static struct gen_pool *atomic_pool; -static size_t atomic_pool_size __initdata = SZ_256K; - -static int __init early_coherent_pool(char *p) -{ - atomic_pool_size = memparse(p, &p); - return 0; -} -early_param("coherent_pool", early_coherent_pool); - static int __init atomic_pool_init(void) { - struct page *page; - size_t size = atomic_pool_size; - void *ptr; - int ret; - - atomic_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!atomic_pool) - BUG(); - - page = alloc_pages(GFP_KERNEL, get_order(size)); - if (!page) - BUG(); - - ptr = dma_common_contiguous_remap(page, size, VM_ALLOC, - pgprot_noncached(PAGE_KERNEL), - __builtin_return_address(0)); - if (!ptr) - BUG(); - - ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, - page_to_phys(page), atomic_pool_size, -1); - if (ret) - BUG(); - - gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL); - - pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n", - atomic_pool_size / 1024); - - pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr, - page_to_phys(page)); - - return 0; + return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL)); } postcore_initcall(atomic_pool_init); -static void *csky_dma_alloc_atomic(struct device *dev, size_t size, - dma_addr_t *dma_handle) -{ - unsigned long addr; - - addr = gen_pool_alloc(atomic_pool, size); - if (addr) - *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr); - - return (void *)addr; -} - -static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - gen_pool_free(atomic_pool, (unsigned long)vaddr, size); -} - -static void __dma_clear_buffer(struct page *page, size_t size) +void arch_dma_prep_coherent(struct page *page, size_t size) { if (PageHighMem(page)) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; @@ -107,84 +47,6 @@ static void __dma_clear_buffer(struct page *page, size_t size) } } -static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t gfp, - unsigned long attrs) -{ - void *vaddr; - struct page *page; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if (DMA_ATTR_NON_CONSISTENT & attrs) { - pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__); - return NULL; - } - - if (IS_ENABLED(CONFIG_DMA_CMA)) - page = dma_alloc_from_contiguous(dev, count, get_order(size), - gfp); - else - page = alloc_pages(gfp, get_order(size)); - - if (!page) { - pr_err("csky %s no more free pages.\n", __func__); - return NULL; - } - - *dma_handle = page_to_phys(page); - - __dma_clear_buffer(page, size); - - if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) - return page; - - vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP, - pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0)); - if (!vaddr) - BUG(); - - return vaddr; -} - -static void csky_dma_free_nonatomic( - struct device *dev, - size_t size, - void *vaddr, - dma_addr_t dma_handle, - unsigned long attrs - ) -{ - struct page *page = phys_to_page(dma_handle); - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - - if ((unsigned int)vaddr >= VMALLOC_START) - dma_common_free_remap(vaddr, size, VM_USERMAP); - - if (IS_ENABLED(CONFIG_DMA_CMA)) - dma_release_from_contiguous(dev, page, count); - else - __free_pages(page, get_order(size)); -} - -void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - if (gfpflags_allow_blocking(gfp)) - return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp, - attrs); - else - return csky_dma_alloc_atomic(dev, size, dma_handle); -} - -void arch_dma_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size)) - csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs); - else - csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs); -} - static inline void cache_op(phys_addr_t paddr, size_t size, void (*fn)(unsigned long start, unsigned long end)) { |