Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-debug: refactor to use physical addresses for page mapping

Convert the DMA debug infrastructure from page-based to physical address-based
mapping as a preparation to rely on physical address for DMA mapping routines.

The refactoring renames debug_dma_map_page() to debug_dma_map_phys() and
changes its signature to accept a phys_addr_t parameter instead of struct page
and offset. Similarly, debug_dma_unmap_page() becomes debug_dma_unmap_phys().
A new dma_debug_phy type is introduced to distinguish physical address mappings
from other debug entry types. All callers throughout the codebase are updated
to pass physical addresses directly, eliminating the need for page-to-physical
conversion in the debug layer.

This refactoring eliminates the need to convert between page pointers and
physical addresses in the debug layer, making the code more efficient and
consistent with the DMA mapping API's physical address focus.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
[mszyprow: added a fixup]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/56d1a6769b68dfcbf8b26a75a7329aeb8e3c3b6a.1757423202.git.leonro@nvidia.com
Link: https://lore.kernel.org/all/20250910052618.GH341237@unreal/

authored by

Leon Romanovsky and committed by
Marek Szyprowski
e9e81d86 c288d657

+35 -35
+2 -2
Documentation/core-api/dma-api.rst
··· 761 761 [<ffffffff80235177>] find_busiest_group+0x207/0x8a0 762 762 [<ffffffff8064784f>] _spin_lock_irqsave+0x1f/0x50 763 763 [<ffffffff803c7ea3>] check_unmap+0x203/0x490 764 - [<ffffffff803c8259>] debug_dma_unmap_page+0x49/0x50 764 + [<ffffffff803c8259>] debug_dma_unmap_phys+0x49/0x50 765 765 [<ffffffff80485f26>] nv_tx_done_optimized+0xc6/0x2c0 766 766 [<ffffffff80486c13>] nv_nic_irq_optimized+0x73/0x2b0 767 767 [<ffffffff8026df84>] handle_IRQ_event+0x34/0x70 ··· 855 855 dma-debug interface debug_dma_mapping_error() to debug drivers that fail 856 856 to check DMA mapping errors on addresses returned by dma_map_single() and 857 857 dma_map_page() interfaces. This interface clears a flag set by 858 - debug_dma_map_page() to indicate that dma_mapping_error() has been called by 858 + debug_dma_map_phys() to indicate that dma_mapping_error() has been called by 859 859 the driver. When driver does unmap, debug_dma_unmap() checks the flag and if 860 860 this flag is still set, prints warning message that includes call trace that 861 861 leads up to the unmap. This interface can be called from dma_mapping_error()
+1
include/linux/page-flags.h
··· 618 618 #else 619 619 PAGEFLAG_FALSE(HighMem, highmem) 620 620 #endif 621 + #define PhysHighMem(__p) (PageHighMem(phys_to_page(__p))) 621 622 622 623 /* Does kmap_local_folio() only allow access to one page of the folio? */ 623 624 #ifdef CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
+20 -19
kernel/dma/debug.c
··· 40 40 dma_debug_coherent, 41 41 dma_debug_resource, 42 42 dma_debug_noncoherent, 43 + dma_debug_phy, 43 44 }; 44 45 45 46 enum map_err_types { ··· 144 143 [dma_debug_coherent] = "coherent", 145 144 [dma_debug_resource] = "resource", 146 145 [dma_debug_noncoherent] = "noncoherent", 146 + [dma_debug_phy] = "phy", 147 147 }; 148 148 149 149 static const char *dir2name[] = { ··· 1056 1054 dma_entry_free(entry); 1057 1055 } 1058 1056 1059 - static void check_for_stack(struct device *dev, 1060 - struct page *page, size_t offset) 1057 + static void check_for_stack(struct device *dev, phys_addr_t phys) 1061 1058 { 1062 1059 void *addr; 1063 1060 struct vm_struct *stack_vm_area = task_stack_vm_area(current); 1064 1061 1065 1062 if (!stack_vm_area) { 1066 1063 /* Stack is direct-mapped. */ 1067 - if (PageHighMem(page)) 1064 + if (PhysHighMem(phys)) 1068 1065 return; 1069 - addr = page_address(page) + offset; 1066 + addr = phys_to_virt(phys); 1070 1067 if (object_is_on_stack(addr)) 1071 1068 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr); 1072 1069 } else { ··· 1073 1072 int i; 1074 1073 1075 1074 for (i = 0; i < stack_vm_area->nr_pages; i++) { 1076 - if (page != stack_vm_area->pages[i]) 1075 + if (__phys_to_pfn(phys) != 1076 + page_to_pfn(stack_vm_area->pages[i])) 1077 1077 continue; 1078 1078 1079 - addr = (u8 *)current->stack + i * PAGE_SIZE + offset; 1079 + addr = (u8 *)current->stack + i * PAGE_SIZE + 1080 + (phys % PAGE_SIZE); 1080 1081 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr); 1081 1082 break; 1082 1083 } ··· 1207 1204 } 1208 1205 EXPORT_SYMBOL(debug_dma_map_single); 1209 1206 1210 - void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, 1211 - size_t size, int direction, dma_addr_t dma_addr, 1212 - unsigned long attrs) 1207 + void debug_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, 1208 + int direction, dma_addr_t dma_addr, unsigned long attrs) 1213 1209 { 1214 1210 struct dma_debug_entry *entry; 1215 1211 ··· 1223 1221 return; 1224 1222 1225 1223 entry->dev = dev; 1226 - entry->type = dma_debug_single; 1227 - entry->paddr = page_to_phys(page) + offset; 1224 + entry->type = dma_debug_phy; 1225 + entry->paddr = phys; 1228 1226 entry->dev_addr = dma_addr; 1229 1227 entry->size = size; 1230 1228 entry->direction = direction; 1231 1229 entry->map_err_type = MAP_ERR_NOT_CHECKED; 1232 1230 1233 - check_for_stack(dev, page, offset); 1231 + if (!(attrs & DMA_ATTR_MMIO)) { 1232 + check_for_stack(dev, phys); 1234 1233 1235 - if (!PageHighMem(page)) { 1236 - void *addr = page_address(page) + offset; 1237 - 1238 - check_for_illegal_area(dev, addr, size); 1234 + if (!PhysHighMem(phys)) 1235 + check_for_illegal_area(dev, phys_to_virt(phys), size); 1239 1236 } 1240 1237 1241 1238 add_dma_entry(entry, attrs); ··· 1278 1277 } 1279 1278 EXPORT_SYMBOL(debug_dma_mapping_error); 1280 1279 1281 - void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 1280 + void debug_dma_unmap_phys(struct device *dev, dma_addr_t dma_addr, 1282 1281 size_t size, int direction) 1283 1282 { 1284 1283 struct dma_debug_entry ref = { 1285 - .type = dma_debug_single, 1284 + .type = dma_debug_phy, 1286 1285 .dev = dev, 1287 1286 .dev_addr = dma_addr, 1288 1287 .size = size, ··· 1306 1305 return; 1307 1306 1308 1307 for_each_sg(sg, s, nents, i) { 1309 - check_for_stack(dev, sg_page(s), s->offset); 1308 + check_for_stack(dev, sg_phys(s)); 1310 1309 if (!PageHighMem(sg_page(s))) 1311 1310 check_for_illegal_area(dev, sg_virt(s), s->length); 1312 1311 }
+7 -9
kernel/dma/debug.h
··· 9 9 #define _KERNEL_DMA_DEBUG_H 10 10 11 11 #ifdef CONFIG_DMA_API_DEBUG 12 - extern void debug_dma_map_page(struct device *dev, struct page *page, 13 - size_t offset, size_t size, 14 - int direction, dma_addr_t dma_addr, 12 + extern void debug_dma_map_phys(struct device *dev, phys_addr_t phys, 13 + size_t size, int direction, dma_addr_t dma_addr, 15 14 unsigned long attrs); 16 15 17 - extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 16 + extern void debug_dma_unmap_phys(struct device *dev, dma_addr_t addr, 18 17 size_t size, int direction); 19 18 20 19 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, ··· 61 62 size_t size, int direction, 62 63 dma_addr_t dma_addr); 63 64 #else /* CONFIG_DMA_API_DEBUG */ 64 - static inline void debug_dma_map_page(struct device *dev, struct page *page, 65 - size_t offset, size_t size, 66 - int direction, dma_addr_t dma_addr, 67 - unsigned long attrs) 65 + static inline void debug_dma_map_phys(struct device *dev, phys_addr_t phys, 66 + size_t size, int direction, 67 + dma_addr_t dma_addr, unsigned long attrs) 68 68 { 69 69 } 70 70 71 - static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 71 + static inline void debug_dma_unmap_phys(struct device *dev, dma_addr_t addr, 72 72 size_t size, int direction) 73 73 { 74 74 }
+5 -5
kernel/dma/mapping.c
··· 157 157 unsigned long attrs) 158 158 { 159 159 const struct dma_map_ops *ops = get_dma_ops(dev); 160 + phys_addr_t phys = page_to_phys(page) + offset; 160 161 dma_addr_t addr; 161 162 162 163 BUG_ON(!valid_dma_direction(dir)); ··· 166 165 return DMA_MAPPING_ERROR; 167 166 168 167 if (dma_map_direct(dev, ops) || 169 - arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) 168 + arch_dma_map_page_direct(dev, phys + size)) 170 169 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 171 170 else if (use_dma_iommu(dev)) 172 171 addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs); 173 172 else 174 173 addr = ops->map_page(dev, page, offset, size, dir, attrs); 175 174 kmsan_handle_dma(page, offset, size, dir); 176 - trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir, 177 - attrs); 178 - debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); 175 + trace_dma_map_page(dev, phys, addr, size, dir, attrs); 176 + debug_dma_map_phys(dev, phys, size, dir, addr, attrs); 179 177 180 178 return addr; 181 179 } ··· 194 194 else 195 195 ops->unmap_page(dev, addr, size, dir, attrs); 196 196 trace_dma_unmap_page(dev, addr, size, dir, attrs); 197 - debug_dma_unmap_page(dev, addr, size, dir); 197 + debug_dma_unmap_phys(dev, addr, size, dir); 198 198 } 199 199 EXPORT_SYMBOL(dma_unmap_page_attrs); 200 200