Merge tag 'dma-mapping-6.19-2026-02-06' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux

Pull dma-mapping fixes from Marek Szyprowski:
"Two minor fixes for the DMA-mapping subsystem:

- check for the rare case of the allocation failure of the global CMA
pool (Shanker Donthineni)

- avoid perf buffer overflow when tracing large scatter-gather lists
(Deepanshu Kartikey)"

* tag 'dma-mapping-6.19-2026-02-06' of git://git.kernel.org/pub/scm/linux/kernel/git/mszyprowski/linux:
dma: contiguous: Check return value of dma_contiguous_reserve_area()
tracing/dma: Cap dma_map_sg tracepoint arrays to prevent buffer overflow

+25 -10
+19 -6
include/trace/events/dma.h
··· 275 275 sizeof(u64), sizeof(u64))) 276 276 ); 277 277 278 + #define DMA_TRACE_MAX_ENTRIES 128 279 + 278 280 TRACE_EVENT(dma_map_sg, 279 281 TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, 280 282 int ents, enum dma_data_direction dir, unsigned long attrs), ··· 284 282 285 283 TP_STRUCT__entry( 286 284 __string(device, dev_name(dev)) 287 - __dynamic_array(u64, phys_addrs, nents) 288 - __dynamic_array(u64, dma_addrs, ents) 289 - __dynamic_array(unsigned int, lengths, ents) 285 + __field(int, full_nents) 286 + __field(int, full_ents) 287 + __field(bool, truncated) 288 + __dynamic_array(u64, phys_addrs, min(nents, DMA_TRACE_MAX_ENTRIES)) 289 + __dynamic_array(u64, dma_addrs, min(ents, DMA_TRACE_MAX_ENTRIES)) 290 + __dynamic_array(unsigned int, lengths, min(ents, DMA_TRACE_MAX_ENTRIES)) 290 291 __field(enum dma_data_direction, dir) 291 292 __field(unsigned long, attrs) 292 293 ), ··· 297 292 TP_fast_assign( 298 293 struct scatterlist *sg; 299 294 int i; 295 + int traced_nents = min_t(int, nents, DMA_TRACE_MAX_ENTRIES); 296 + int traced_ents = min_t(int, ents, DMA_TRACE_MAX_ENTRIES); 300 297 301 298 __assign_str(device); 302 - for_each_sg(sgl, sg, nents, i) 299 + __entry->full_nents = nents; 300 + __entry->full_ents = ents; 301 + __entry->truncated = (nents > DMA_TRACE_MAX_ENTRIES) || (ents > DMA_TRACE_MAX_ENTRIES); 302 + for_each_sg(sgl, sg, traced_nents, i) 303 303 ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg); 304 - for_each_sg(sgl, sg, ents, i) { 304 + for_each_sg(sgl, sg, traced_ents, i) { 305 305 ((u64 *)__get_dynamic_array(dma_addrs))[i] = 306 306 sg_dma_address(sg); 307 307 ((unsigned int *)__get_dynamic_array(lengths))[i] = ··· 316 306 __entry->attrs = attrs; 317 307 ), 318 308 319 - TP_printk("%s dir=%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s", 309 + TP_printk("%s dir=%s nents=%d/%d ents=%d/%d%s dma_addrs=%s sizes=%s phys_addrs=%s attrs=%s", 320 310 __get_str(device), 321 311 decode_dma_data_direction(__entry->dir), 312 + min_t(int, __entry->full_nents, DMA_TRACE_MAX_ENTRIES), __entry->full_nents, 313 + min_t(int, __entry->full_ents, DMA_TRACE_MAX_ENTRIES), __entry->full_ents, 314 + __entry->truncated ? " [TRUNCATED]" : "", 322 315 __print_array(__get_dynamic_array(dma_addrs), 323 316 __get_dynamic_array_len(dma_addrs) / 324 317 sizeof(u64), sizeof(u64)),
+6 -4
kernel/dma/contiguous.c
··· 257 257 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 258 258 (unsigned long)selected_size / SZ_1M); 259 259 260 - dma_contiguous_reserve_area(selected_size, selected_base, 261 - selected_limit, 262 - &dma_contiguous_default_area, 263 - fixed); 260 + ret = dma_contiguous_reserve_area(selected_size, selected_base, 261 + selected_limit, 262 + &dma_contiguous_default_area, 263 + fixed); 264 + if (ret) 265 + return; 264 266 265 267 ret = dma_heap_cma_register_heap(dma_contiguous_default_area); 266 268 if (ret)