Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-6.19-2026-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

- minor fixes for the corner cases of the SWIOTLB pool management
(Robin Murphy)

* tag 'dma-mapping-6.19-2026-01-20' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
dma/pool: Avoid allocating redundant pools
mm_zone: Generalise has_managed_dma()
dma/pool: Improve pool lookup

+25 -19
+5 -4
include/linux/mmzone.h
··· 1648 1648 return is_highmem_idx(zone_idx(zone)); 1649 1649 } 1650 1650 1651 - #ifdef CONFIG_ZONE_DMA 1652 - bool has_managed_dma(void); 1653 - #else 1651 + bool has_managed_zone(enum zone_type zone); 1654 1652 static inline bool has_managed_dma(void) 1655 1653 { 1654 + #ifdef CONFIG_ZONE_DMA 1655 + return has_managed_zone(ZONE_DMA); 1656 + #else 1656 1657 return false; 1657 - } 1658 1658 #endif 1659 + } 1659 1660 1660 1661 1661 1662 #ifndef CONFIG_NUMA
+18 -9
kernel/dma/pool.c
··· 184 184 return pool; 185 185 } 186 186 187 + #ifdef CONFIG_ZONE_DMA32 188 + #define has_managed_dma32 has_managed_zone(ZONE_DMA32) 189 + #else 190 + #define has_managed_dma32 false 191 + #endif 192 + 187 193 static int __init dma_atomic_pool_init(void) 188 194 { 189 195 int ret = 0; ··· 205 199 } 206 200 INIT_WORK(&atomic_pool_work, atomic_pool_work_fn); 207 201 208 - atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size, 202 + /* All memory might be in the DMA zone(s) to begin with */ 203 + if (has_managed_zone(ZONE_NORMAL)) { 204 + atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size, 209 205 GFP_KERNEL); 210 - if (!atomic_pool_kernel) 211 - ret = -ENOMEM; 206 + if (!atomic_pool_kernel) 207 + ret = -ENOMEM; 208 + } 212 209 if (has_managed_dma()) { 213 210 atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size, 214 211 GFP_KERNEL | GFP_DMA); 215 212 if (!atomic_pool_dma) 216 213 ret = -ENOMEM; 217 214 } 218 - if (IS_ENABLED(CONFIG_ZONE_DMA32)) { 215 + if (has_managed_dma32) { 219 216 atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size, 220 217 GFP_KERNEL | GFP_DMA32); 221 218 if (!atomic_pool_dma32) ··· 233 224 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) 234 225 { 235 226 if (prev == NULL) { 236 - if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) 237 - return atomic_pool_dma32; 238 - if (atomic_pool_dma && (gfp & GFP_DMA)) 239 - return atomic_pool_dma; 240 - return atomic_pool_kernel; 227 + if (gfp & GFP_DMA) 228 + return atomic_pool_dma ?: atomic_pool_dma32 ?: atomic_pool_kernel; 229 + if (gfp & GFP_DMA32) 230 + return atomic_pool_dma32 ?: atomic_pool_dma ?: atomic_pool_kernel; 231 + return atomic_pool_kernel ?: atomic_pool_dma32 ?: atomic_pool_dma; 241 232 } 242 233 if (prev == atomic_pool_kernel) 243 234 return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
+2 -6
mm/page_alloc.c
··· 7457 7457 } 7458 7458 #endif 7459 7459 7460 - #ifdef CONFIG_ZONE_DMA 7461 - bool has_managed_dma(void) 7460 + bool has_managed_zone(enum zone_type zone) 7462 7461 { 7463 7462 struct pglist_data *pgdat; 7464 7463 7465 7464 for_each_online_pgdat(pgdat) { 7466 - struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 7467 - 7468 - if (managed_zone(zone)) 7465 + if (managed_zone(&pgdat->node_zones[zone])) 7469 7466 return true; 7470 7467 } 7471 7468 return false; 7472 7469 } 7473 - #endif /* CONFIG_ZONE_DMA */ 7474 7470 7475 7471 #ifdef CONFIG_UNACCEPTED_MEMORY 7476 7472