Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: export new dma_*map_phys() interface

Introduce new DMA mapping functions dma_map_phys() and dma_unmap_phys()
that operate directly on physical addresses instead of page+offset
parameters. This provides a more efficient interface for drivers that
already have physical addresses available.

The new functions are implemented as the primary mapping layer, with
the existing dma_map_page_attrs()/dma_map_resource() and
dma_unmap_page_attrs()/dma_unmap_resource() functions converted to simple
wrappers around the phys-based implementations.

In case dma_map_page_attrs(), the struct page is converted to physical
address with help of page_to_phys() function and dma_map_resource()
provides physical address as is together with addition of DMA_ATTR_MMIO
attribute.

The old page-based API is preserved in mapping.c to ensure that existing
code won't be affected by changing EXPORT_SYMBOL to EXPORT_SYMBOL_GPL
variant for dma_*map_phys().

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/54cc52af91777906bbe4a386113437ba0bcfba9c.1757423202.git.leonro@nvidia.com

authored by

Leon Romanovsky and committed by
Marek Szyprowski
f7326196 bf0ecb3c

+50 -134
-14
drivers/iommu/dma-iommu.c
··· 1556 1556 __iommu_dma_unmap(dev, start, end - start); 1557 1557 } 1558 1558 1559 - dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 1560 - size_t size, enum dma_data_direction dir, unsigned long attrs) 1561 - { 1562 - return __iommu_dma_map(dev, phys, size, 1563 - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 1564 - dma_get_mask(dev)); 1565 - } 1566 - 1567 - void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 1568 - size_t size, enum dma_data_direction dir, unsigned long attrs) 1569 - { 1570 - __iommu_dma_unmap(dev, handle, size); 1571 - } 1572 - 1573 1559 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 1574 1560 { 1575 1561 size_t alloc_size = PAGE_ALIGN(size);
-2
include/linux/dma-direct.h
··· 149 149 struct page *page, dma_addr_t dma_addr, 150 150 enum dma_data_direction dir); 151 151 int dma_direct_supported(struct device *dev, u64 mask); 152 - dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 153 - size_t size, enum dma_data_direction dir, unsigned long attrs); 154 152 155 153 #endif /* _LINUX_DMA_DIRECT_H */
+13
include/linux/dma-mapping.h
··· 138 138 unsigned long attrs); 139 139 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 140 140 enum dma_data_direction dir, unsigned long attrs); 141 + dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, 142 + enum dma_data_direction dir, unsigned long attrs); 143 + void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, 144 + enum dma_data_direction dir, unsigned long attrs); 141 145 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 142 146 int nents, enum dma_data_direction dir, unsigned long attrs); 143 147 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, ··· 193 189 return DMA_MAPPING_ERROR; 194 190 } 195 191 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, 192 + size_t size, enum dma_data_direction dir, unsigned long attrs) 193 + { 194 + } 195 + static inline dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, 196 + size_t size, enum dma_data_direction dir, unsigned long attrs) 197 + { 198 + return DMA_MAPPING_ERROR; 199 + } 200 + static inline void dma_unmap_phys(struct device *dev, dma_addr_t addr, 196 201 size_t size, enum dma_data_direction dir, unsigned long attrs) 197 202 { 198 203 }
-4
include/linux/iommu-dma.h
··· 42 42 size_t iommu_dma_max_mapping_size(struct device *dev); 43 43 void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 44 44 dma_addr_t handle, unsigned long attrs); 45 - dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 46 - size_t size, enum dma_data_direction dir, unsigned long attrs); 47 - void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 48 - size_t size, enum dma_data_direction dir, unsigned long attrs); 49 45 struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size, 50 46 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); 51 47 void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
-2
include/trace/events/dma.h
··· 73 73 TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs)) 74 74 75 75 DEFINE_MAP_EVENT(dma_map_phys); 76 - DEFINE_MAP_EVENT(dma_map_resource); 77 76 78 77 DECLARE_EVENT_CLASS(dma_unmap, 79 78 TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, ··· 110 111 TP_ARGS(dev, addr, size, dir, attrs)) 111 112 112 113 DEFINE_UNMAP_EVENT(dma_unmap_phys); 113 - DEFINE_UNMAP_EVENT(dma_unmap_resource); 114 114 115 115 DECLARE_EVENT_CLASS(dma_alloc_class, 116 116 TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr,
-43
kernel/dma/debug.c
··· 38 38 dma_debug_single, 39 39 dma_debug_sg, 40 40 dma_debug_coherent, 41 - dma_debug_resource, 42 41 dma_debug_noncoherent, 43 42 dma_debug_phy, 44 43 }; ··· 141 142 [dma_debug_single] = "single", 142 143 [dma_debug_sg] = "scatter-gather", 143 144 [dma_debug_coherent] = "coherent", 144 - [dma_debug_resource] = "resource", 145 145 [dma_debug_noncoherent] = "noncoherent", 146 146 [dma_debug_phy] = "phy", 147 147 }; ··· 1437 1439 return; 1438 1440 1439 1441 ref.paddr = virt_to_paddr(virt); 1440 - 1441 - if (unlikely(dma_debug_disabled())) 1442 - return; 1443 - 1444 - check_unmap(&ref); 1445 - } 1446 - 1447 - void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, 1448 - int direction, dma_addr_t dma_addr, 1449 - unsigned long attrs) 1450 - { 1451 - struct dma_debug_entry *entry; 1452 - 1453 - if (unlikely(dma_debug_disabled())) 1454 - return; 1455 - 1456 - entry = dma_entry_alloc(); 1457 - if (!entry) 1458 - return; 1459 - 1460 - entry->type = dma_debug_resource; 1461 - entry->dev = dev; 1462 - entry->paddr = addr; 1463 - entry->size = size; 1464 - entry->dev_addr = dma_addr; 1465 - entry->direction = direction; 1466 - entry->map_err_type = MAP_ERR_NOT_CHECKED; 1467 - 1468 - add_dma_entry(entry, attrs); 1469 - } 1470 - 1471 - void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 1472 - size_t size, int direction) 1473 - { 1474 - struct dma_debug_entry ref = { 1475 - .type = dma_debug_resource, 1476 - .dev = dev, 1477 - .dev_addr = dma_addr, 1478 - .size = size, 1479 - .direction = direction, 1480 - }; 1481 1442 1482 1443 if (unlikely(dma_debug_disabled())) 1483 1444 return;
-21
kernel/dma/debug.h
··· 30 30 extern void debug_dma_free_coherent(struct device *dev, size_t size, 31 31 void *virt, dma_addr_t addr); 32 32 33 - extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, 34 - size_t size, int direction, 35 - dma_addr_t dma_addr, 36 - unsigned long attrs); 37 - 38 - extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 39 - size_t size, int direction); 40 - 41 33 extern void debug_dma_sync_single_for_cpu(struct device *dev, 42 34 dma_addr_t dma_handle, size_t size, 43 35 int direction); ··· 84 92 85 93 static inline void debug_dma_free_coherent(struct device *dev, size_t size, 86 94 void *virt, dma_addr_t addr) 87 - { 88 - } 89 - 90 - static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr, 91 - size_t size, int direction, 92 - dma_addr_t dma_addr, 93 - unsigned long attrs) 94 - { 95 - } 96 - 97 - static inline void debug_dma_unmap_resource(struct device *dev, 98 - dma_addr_t dma_addr, size_t size, 99 - int direction) 100 95 { 101 96 } 102 97
-16
kernel/dma/direct.c
··· 497 497 return ret; 498 498 } 499 499 500 - dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 501 - size_t size, enum dma_data_direction dir, unsigned long attrs) 502 - { 503 - dma_addr_t dma_addr = paddr; 504 - 505 - if (unlikely(!dma_capable(dev, dma_addr, size, false))) { 506 - dev_err_once(dev, 507 - "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 508 - &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 509 - WARN_ON_ONCE(1); 510 - return DMA_MAPPING_ERROR; 511 - } 512 - 513 - return dma_addr; 514 - } 515 - 516 500 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 517 501 void *cpu_addr, dma_addr_t dma_addr, size_t size, 518 502 unsigned long attrs)
+37 -32
kernel/dma/mapping.c
··· 152 152 return dma_go_direct(dev, *dev->dma_mask, ops); 153 153 } 154 154 155 - dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 156 - size_t offset, size_t size, enum dma_data_direction dir, 157 - unsigned long attrs) 155 + dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, 156 + enum dma_data_direction dir, unsigned long attrs) 158 157 { 159 158 const struct dma_map_ops *ops = get_dma_ops(dev); 160 - phys_addr_t phys = page_to_phys(page) + offset; 161 159 bool is_mmio = attrs & DMA_ATTR_MMIO; 162 160 dma_addr_t addr; 163 161 ··· 175 177 176 178 addr = ops->map_resource(dev, phys, size, dir, attrs); 177 179 } else { 180 + struct page *page = phys_to_page(phys); 181 + size_t offset = offset_in_page(phys); 182 + 178 183 /* 179 184 * The dma_ops API contract for ops->map_page() requires 180 185 * kmappable memory, while ops->map_resource() does not. ··· 192 191 193 192 return addr; 194 193 } 194 + EXPORT_SYMBOL_GPL(dma_map_phys); 195 + 196 + dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, 197 + size_t offset, size_t size, enum dma_data_direction dir, 198 + unsigned long attrs) 199 + { 200 + phys_addr_t phys = page_to_phys(page) + offset; 201 + 202 + if (unlikely(attrs & DMA_ATTR_MMIO)) 203 + return DMA_MAPPING_ERROR; 204 + 205 + if (IS_ENABLED(CONFIG_DMA_API_DEBUG) && 206 + WARN_ON_ONCE(is_zone_device_page(page))) 207 + return DMA_MAPPING_ERROR; 208 + 209 + return dma_map_phys(dev, phys, size, dir, attrs); 210 + } 195 211 EXPORT_SYMBOL(dma_map_page_attrs); 196 212 197 - void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 213 + void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, 198 214 enum dma_data_direction dir, unsigned long attrs) 199 215 { 200 216 const struct dma_map_ops *ops = get_dma_ops(dev); ··· 230 212 ops->unmap_page(dev, addr, size, dir, attrs); 231 213 trace_dma_unmap_phys(dev, addr, size, dir, attrs); 232 214 debug_dma_unmap_phys(dev, addr, size, dir); 215 + } 216 + EXPORT_SYMBOL_GPL(dma_unmap_phys); 217 + 218 + void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 219 + enum dma_data_direction dir, unsigned long attrs) 220 + { 221 + if (unlikely(attrs & DMA_ATTR_MMIO)) 222 + return; 223 + 224 + dma_unmap_phys(dev, addr, size, dir, attrs); 233 225 } 234 226 EXPORT_SYMBOL(dma_unmap_page_attrs); 235 227 ··· 366 338 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 367 339 size_t size, enum dma_data_direction dir, unsigned long attrs) 368 340 { 369 - const struct dma_map_ops *ops = get_dma_ops(dev); 370 - dma_addr_t addr = DMA_MAPPING_ERROR; 371 - 372 - BUG_ON(!valid_dma_direction(dir)); 373 - 374 - if (WARN_ON_ONCE(!dev->dma_mask)) 341 + if (IS_ENABLED(CONFIG_DMA_API_DEBUG) && 342 + WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) 375 343 return DMA_MAPPING_ERROR; 376 344 377 - if (dma_map_direct(dev, ops)) 378 - addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); 379 - else if (use_dma_iommu(dev)) 380 - addr = iommu_dma_map_resource(dev, phys_addr, size, dir, attrs); 381 - else if (ops->map_resource) 382 - addr = ops->map_resource(dev, phys_addr, size, dir, attrs); 383 - 384 - trace_dma_map_resource(dev, phys_addr, addr, size, dir, attrs); 385 - debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs); 386 - return addr; 345 + return dma_map_phys(dev, phys_addr, size, dir, attrs | DMA_ATTR_MMIO); 387 346 } 388 347 EXPORT_SYMBOL(dma_map_resource); 389 348 390 349 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, 391 350 enum dma_data_direction dir, unsigned long attrs) 392 351 { 393 - const struct dma_map_ops *ops = get_dma_ops(dev); 394 - 395 - BUG_ON(!valid_dma_direction(dir)); 396 - if (dma_map_direct(dev, ops)) 397 - ; /* nothing to do: uncached and no swiotlb */ 398 - else if (use_dma_iommu(dev)) 399 - iommu_dma_unmap_resource(dev, addr, size, dir, attrs); 400 - else if (ops->unmap_resource) 401 - ops->unmap_resource(dev, addr, size, dir, attrs); 402 - trace_dma_unmap_resource(dev, addr, size, dir, attrs); 403 - debug_dma_unmap_resource(dev, addr, size, dir); 352 + dma_unmap_phys(dev, addr, size, dir, attrs | DMA_ATTR_MMIO); 404 353 } 405 354 EXPORT_SYMBOL(dma_unmap_resource); 406 355