Merge tag 'dma-mapping-6.19-2026-01-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

- important fix for ARM 32-bit based systems using cma= kernel
parameter (Oreoluwa Babatunde)

- a fix for the corner case of the DMA atomic pool based allocations
(Sai Sree Kartheek Adivi)

* tag 'dma-mapping-6.19-2026-01-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
dma/pool: distinguish between missing and exhausted atomic pools
of: reserved_mem: Allow reserved_mem framework detect "cma=" kernel param

+42 -9
+17 -2
drivers/of/of_reserved_mem.c
··· 157 157 phys_addr_t base, size; 158 158 int i, len; 159 159 const __be32 *prop; 160 - bool nomap; 160 + bool nomap, default_cma; 161 161 162 162 prop = of_flat_dt_get_addr_size_prop(node, "reg", &len); 163 163 if (!prop) 164 164 return -ENOENT; 165 165 166 166 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 167 + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 168 + 169 + if (default_cma && cma_skip_dt_default_reserved_mem()) { 170 + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); 171 + return -EINVAL; 172 + } 167 173 168 174 for (i = 0; i < len; i++) { 169 175 u64 b, s; ··· 254 248 255 249 fdt_for_each_subnode(child, fdt, node) { 256 250 const char *uname; 251 + bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL); 257 252 u64 b, s; 258 253 259 254 if (!of_fdt_device_is_available(fdt, child)) 255 + continue; 256 + if (default_cma && cma_skip_dt_default_reserved_mem()) 260 257 continue; 261 258 262 259 if (!of_flat_dt_get_addr_size(child, "reg", &b, &s)) ··· 398 389 phys_addr_t base = 0, align = 0, size; 399 390 int i, len; 400 391 const __be32 *prop; 401 - bool nomap; 392 + bool nomap, default_cma; 402 393 int ret; 403 394 404 395 prop = of_get_flat_dt_prop(node, "size", &len); ··· 422 413 } 423 414 424 415 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 416 + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 417 + 418 + if (default_cma && cma_skip_dt_default_reserved_mem()) { 419 + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); 420 + return -EINVAL; 421 + } 425 422 426 423 /* Need adjust the alignment to satisfy the CMA requirement */ 427 424 if (IS_ENABLED(CONFIG_CMA)
+9
include/linux/cma.h
··· 57 57 58 58 extern void cma_reserve_pages_on_error(struct cma *cma); 59 59 60 + #ifdef CONFIG_DMA_CMA 61 + extern bool cma_skip_dt_default_reserved_mem(void); 62 + #else 63 + static inline bool cma_skip_dt_default_reserved_mem(void) 64 + { 65 + return false; 66 + } 67 + #endif 68 + 60 69 #ifdef CONFIG_CMA 61 70 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); 62 71 bool cma_free_folio(struct cma *cma, const struct folio *folio);
+10 -6
kernel/dma/contiguous.c
··· 91 91 } 92 92 early_param("cma", early_cma); 93 93 94 + /* 95 + * cma_skip_dt_default_reserved_mem - This is called from the 96 + * reserved_mem framework to detect if the default cma region is being 97 + * set by the "cma=" kernel parameter. 98 + */ 99 + bool __init cma_skip_dt_default_reserved_mem(void) 100 + { 101 + return size_cmdline != -1; 102 + } 103 + 94 104 #ifdef CONFIG_DMA_NUMA_CMA 95 105 96 106 static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; ··· 479 469 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 480 470 struct cma *cma; 481 471 int err; 482 - 483 - if (size_cmdline != -1 && default_cma) { 484 - pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", 485 - rmem->name); 486 - return -EBUSY; 487 - } 488 472 489 473 if (!of_get_flat_dt_prop(node, "reusable", NULL) || 490 474 of_get_flat_dt_prop(node, "no-map", NULL))
+6 -1
kernel/dma/pool.c
··· 277 277 { 278 278 struct gen_pool *pool = NULL; 279 279 struct page *page; 280 + bool pool_found = false; 280 281 281 282 while ((pool = dma_guess_pool(pool, gfp))) { 283 + pool_found = true; 282 284 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, 283 285 phys_addr_ok); 284 286 if (page) 285 287 return page; 286 288 } 287 289 288 - WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); 290 + if (pool_found) 291 + WARN(!(gfp & __GFP_NOWARN), "DMA pool exhausted for %s\n", dev_name(dev)); 292 + else 293 + WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); 289 294 return NULL; 290 295 } 291 296