summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-01-20 10:16:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-01-20 10:16:18 -0800
commitc03e9c42ae8f9be76a0cf55ef3f88663f0f6a63a (patch)
tree2cd0a78dfac34afb1d534f18d5d23df7665b033c
parent8f7537efbe5636a798cf885ea2fa0e4889995fa9 (diff)
parentc6ccd098807483762ccd726e1498bac5a71d0005 (diff)
Merge tag 'dma-mapping-6.19-2026-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux
Pull dma-mapping fixes from Marek Szyprowski: - minor fixes for the corner cases of the SWIOTLB pool management (Robin Murphy) * tag 'dma-mapping-6.19-2026-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux: dma/pool: Avoid allocating redundant pools mm_zone: Generalise has_managed_dma() dma/pool: Improve pool lookup
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--kernel/dma/pool.c27
-rw-r--r--mm/page_alloc.c8
3 files changed, 25 insertions, 19 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 75ef7c9f9307..fc5d6c88d2f0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1648,14 +1648,15 @@ static inline int is_highmem(const struct zone *zone)
return is_highmem_idx(zone_idx(zone));
}
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void);
-#else
+bool has_managed_zone(enum zone_type zone);
static inline bool has_managed_dma(void)
{
+#ifdef CONFIG_ZONE_DMA
+ return has_managed_zone(ZONE_DMA);
+#else
return false;
-}
#endif
+}
#ifndef CONFIG_NUMA
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 26392badc36b..c5da29ad010c 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -184,6 +184,12 @@ static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
return pool;
}
+#ifdef CONFIG_ZONE_DMA32
+#define has_managed_dma32 has_managed_zone(ZONE_DMA32)
+#else
+#define has_managed_dma32 false
+#endif
+
static int __init dma_atomic_pool_init(void)
{
int ret = 0;
@@ -199,17 +205,20 @@ static int __init dma_atomic_pool_init(void)
}
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
- atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
+ /* All memory might be in the DMA zone(s) to begin with */
+ if (has_managed_zone(ZONE_NORMAL)) {
+ atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL);
- if (!atomic_pool_kernel)
- ret = -ENOMEM;
+ if (!atomic_pool_kernel)
+ ret = -ENOMEM;
+ }
if (has_managed_dma()) {
atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA);
if (!atomic_pool_dma)
ret = -ENOMEM;
}
- if (IS_ENABLED(CONFIG_ZONE_DMA32)) {
+ if (has_managed_dma32) {
atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA32);
if (!atomic_pool_dma32)
@@ -224,11 +233,11 @@ postcore_initcall(dma_atomic_pool_init);
static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
{
if (prev == NULL) {
- if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
- return atomic_pool_dma32;
- if (atomic_pool_dma && (gfp & GFP_DMA))
- return atomic_pool_dma;
- return atomic_pool_kernel;
+ if (gfp & GFP_DMA)
+ return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel;
+ if (gfp & GFP_DMA32)
+ return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel;
+ return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma;
}
if (prev == atomic_pool_kernel)
return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f65c4edf199d..cbf758e27aa2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7457,20 +7457,16 @@ bool put_page_back_buddy(struct page *page)
}
#endif
-#ifdef CONFIG_ZONE_DMA
-bool has_managed_dma(void)
+bool has_managed_zone(enum zone_type zone)
{
struct pglist_data *pgdat;
for_each_online_pgdat(pgdat) {
- struct zone *zone = &pgdat->node_zones[ZONE_DMA];
-
- if (managed_zone(zone))
+ if (managed_zone(&pgdat->node_zones[zone]))
return true;
}
return false;
}
-#endif /* CONFIG_ZONE_DMA */
#ifdef CONFIG_UNACCEPTED_MEMORY