Merge tag 'dma-mapping-6.19-2026-01-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:

- important fix for ARM 32-bit based systems using cma= kernel
parameter (Oreoluwa Babatunde)

- a fix for the corner case of the DMA atomic pool based allocations
(Sai Sree Kartheek Adivi)

* tag 'dma-mapping-6.19-2026-01-30' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
dma/pool: distinguish between missing and exhausted atomic pools
of: reserved_mem: Allow reserved_mem framework detect "cma=" kernel param

+42 -9
+17 -2
drivers/of/of_reserved_mem.c
··· 157 phys_addr_t base, size; 158 int i, len; 159 const __be32 *prop; 160 - bool nomap; 161 162 prop = of_flat_dt_get_addr_size_prop(node, "reg", &len); 163 if (!prop) 164 return -ENOENT; 165 166 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 167 168 for (i = 0; i < len; i++) { 169 u64 b, s; ··· 254 255 fdt_for_each_subnode(child, fdt, node) { 256 const char *uname; 257 u64 b, s; 258 259 if (!of_fdt_device_is_available(fdt, child)) 260 continue; 261 262 if (!of_flat_dt_get_addr_size(child, "reg", &b, &s)) ··· 398 phys_addr_t base = 0, align = 0, size; 399 int i, len; 400 const __be32 *prop; 401 - bool nomap; 402 int ret; 403 404 prop = of_get_flat_dt_prop(node, "size", &len); ··· 422 } 423 424 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 425 426 /* Need adjust the alignment to satisfy the CMA requirement */ 427 if (IS_ENABLED(CONFIG_CMA)
··· 157 phys_addr_t base, size; 158 int i, len; 159 const __be32 *prop; 160 + bool nomap, default_cma; 161 162 prop = of_flat_dt_get_addr_size_prop(node, "reg", &len); 163 if (!prop) 164 return -ENOENT; 165 166 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 167 + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 168 + 169 + if (default_cma && cma_skip_dt_default_reserved_mem()) { 170 + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); 171 + return -EINVAL; 172 + } 173 174 for (i = 0; i < len; i++) { 175 u64 b, s; ··· 248 249 fdt_for_each_subnode(child, fdt, node) { 250 const char *uname; 251 + bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL); 252 u64 b, s; 253 254 if (!of_fdt_device_is_available(fdt, child)) 255 + continue; 256 + if (default_cma && cma_skip_dt_default_reserved_mem()) 257 continue; 258 259 if (!of_flat_dt_get_addr_size(child, "reg", &b, &s)) ··· 389 phys_addr_t base = 0, align = 0, size; 390 int i, len; 391 const __be32 *prop; 392 + bool nomap, default_cma; 393 int ret; 394 395 prop = of_get_flat_dt_prop(node, "size", &len); ··· 413 } 414 415 nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; 416 + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 417 + 418 + if (default_cma && cma_skip_dt_default_reserved_mem()) { 419 + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); 420 + return -EINVAL; 421 + } 422 423 /* Need adjust the alignment to satisfy the CMA requirement */ 424 if (IS_ENABLED(CONFIG_CMA)
+9
include/linux/cma.h
··· 57 58 extern void cma_reserve_pages_on_error(struct cma *cma); 59 60 #ifdef CONFIG_CMA 61 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); 62 bool cma_free_folio(struct cma *cma, const struct folio *folio);
··· 57 58 extern void cma_reserve_pages_on_error(struct cma *cma); 59 60 + #ifdef CONFIG_DMA_CMA 61 + extern bool cma_skip_dt_default_reserved_mem(void); 62 + #else 63 + static inline bool cma_skip_dt_default_reserved_mem(void) 64 + { 65 + return false; 66 + } 67 + #endif 68 + 69 #ifdef CONFIG_CMA 70 struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); 71 bool cma_free_folio(struct cma *cma, const struct folio *folio);
+10 -6
kernel/dma/contiguous.c
··· 91 } 92 early_param("cma", early_cma); 93 94 #ifdef CONFIG_DMA_NUMA_CMA 95 96 static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; ··· 479 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 480 struct cma *cma; 481 int err; 482 - 483 - if (size_cmdline != -1 && default_cma) { 484 - pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", 485 - rmem->name); 486 - return -EBUSY; 487 - } 488 489 if (!of_get_flat_dt_prop(node, "reusable", NULL) || 490 of_get_flat_dt_prop(node, "no-map", NULL))
··· 91 } 92 early_param("cma", early_cma); 93 94 + /* 95 + * cma_skip_dt_default_reserved_mem - This is called from the 96 + * reserved_mem framework to detect if the default cma region is being 97 + * set by the "cma=" kernel parameter. 98 + */ 99 + bool __init cma_skip_dt_default_reserved_mem(void) 100 + { 101 + return size_cmdline != -1; 102 + } 103 + 104 #ifdef CONFIG_DMA_NUMA_CMA 105 106 static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; ··· 469 bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); 470 struct cma *cma; 471 int err; 472 473 if (!of_get_flat_dt_prop(node, "reusable", NULL) || 474 of_get_flat_dt_prop(node, "no-map", NULL))
+6 -1
kernel/dma/pool.c
··· 277 { 278 struct gen_pool *pool = NULL; 279 struct page *page; 280 281 while ((pool = dma_guess_pool(pool, gfp))) { 282 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, 283 phys_addr_ok); 284 if (page) 285 return page; 286 } 287 288 - WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); 289 return NULL; 290 } 291
··· 277 { 278 struct gen_pool *pool = NULL; 279 struct page *page; 280 + bool pool_found = false; 281 282 while ((pool = dma_guess_pool(pool, gfp))) { 283 + pool_found = true; 284 page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, 285 phys_addr_ok); 286 if (page) 287 return page; 288 } 289 290 + if (pool_found) 291 + WARN(!(gfp & __GFP_NOWARN), "DMA pool exhausted for %s\n", dev_name(dev)); 292 + else 293 + WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); 294 return NULL; 295 } 296