summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2013-02-26 07:46:24 +0100
committerMarek Szyprowski <m.szyprowski@samsung.com>2013-03-14 09:25:19 +0100
commit9d1400cf79afb49584b4873eb22cd5130cb341db (patch)
tree9590b2041122620f4f70cdf0922a39cc67ba9021
parent6dbe51c251a327e012439c4772097a13df43c5b8 (diff)
ARM: DMA-mapping: add missing GFP_DMA flag for atomic buffer allocation
Atomic pool should always be allocated from DMA zone if such zone is available in the system to avoid issues caused by limited dma mask of any of the devices used for making an atomic allocation. Reported-by: Krzysztof Halasa <khc@pm.waw.pl> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Cc: Stable <stable@vger.kernel.org> [v3.6+]
-rw-r--r--arch/arm/mm/dma-mapping.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c7e3759f16d3..e9db6b4bf65a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void)
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, atomic_pool_init);
+ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
+ atomic_pool_init);
if (ptr) {
int i;