diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-13 16:16:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-12-13 16:16:03 -0800 |
commit | b2077ebc19a4792d1b4749ab9fe013c893588989 (patch) | |
tree | dcf2cec138264c79e1ac0d85fca320145ab987a6 /arch/arm/mm/dma-mapping.c | |
parent | 2430cdd0fe14884cb533b13020a0ff000144af6d (diff) | |
parent | b713aa0b15015a65ad5421543b80df86de043d62 (diff) |
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King:
"This resolves some further issues with the dma mask changes on ARM
which have been found by TI and others, and also some corner cases
with the updates to the virtual to physical address translations.
Konstantin also found some problems with the unwinder, which now
performs tighter verification that the stack is valid while unwinding"
* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: fix asm/memory.h build error
ARM: 7917/1: cacheflush: correctly limit range of memory region being flushed
ARM: 7913/1: fix framepointer check in unwind_frame
ARM: 7912/1: check stack pointer in get_wchan
ARM: 7909/1: mm: Call setup_dma_zone() post early_paging_init()
ARM: 7908/1: mm: Fix the arm_dma_limit calculation
ARM: another fix for the DMA mapping checks
Diffstat (limited to 'arch/arm/mm/dma-mapping.c')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 91 |
1 files changed, 40 insertions, 51 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f6b6bfa88ecf..f61a5707823a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = { }; EXPORT_SYMBOL(arm_coherent_dma_ops); +static int __dma_supported(struct device *dev, u64 mask, bool warn) +{ + unsigned long max_dma_pfn; + + /* + * If the mask allows for more memory than we can address, + * and we actually have that much memory, then we must + * indicate that DMA to this device is not supported. + */ + if (sizeof(mask) != sizeof(dma_addr_t) && + mask > (dma_addr_t)~0 && + dma_to_pfn(dev, ~0) < max_pfn) { + if (warn) { + dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", + mask); + dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); + } + return 0; + } + + max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); + + /* + * Translate the device's DMA mask to a PFN limit. This + * PFN number includes the page which we can DMA to. + */ + if (dma_to_pfn(dev, mask) < max_dma_pfn) { + if (warn) + dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", + mask, + dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, + max_dma_pfn + 1); + return 0; + } + + return 1; +} + static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)DMA_BIT_MASK(32); if (dev) { - unsigned long max_dma_pfn; - mask = dev->coherent_dma_mask; /* @@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev) return 0; } - max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then fail the - * allocation. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > max_dma_pfn) { - dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", - mask); - dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); - return 0; - } - - /* - * Now check that the mask, when translated to a PFN, - * fits within the allowable addresses which we can - * allocate. - */ - if (dma_to_pfn(dev, mask) < max_dma_pfn) { - dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", - mask, - dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, - arm_dma_pfn_limit + 1); + if (!__dma_supported(dev, mask, true)) return 0; - } } return mask; @@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, */ int dma_supported(struct device *dev, u64 mask) { - unsigned long limit; - - /* - * If the mask allows for more memory than we can address, - * and we actually have that much memory, then we must - * indicate that DMA to this device is not supported. - */ - if (sizeof(mask) != sizeof(dma_addr_t) && - mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) > arm_dma_pfn_limit) - return 0; - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - limit = dma_to_pfn(dev, mask); - - if (limit < arm_dma_pfn_limit) - return 0; - - return 1; + return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); |