Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: Add a new dma_need_sync API

Add a new API to check if calls to dma_sync_single_for_{device,cpu} are
required for a given DMA streaming mapping.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20200629130359.2690853-2-hch@lst.de

authored by

Christoph Hellwig and committed by
Daniel Borkmann
3aa91625 2bdeb3ed

+30
+8
Documentation/core-api/dma-api.rst
··· 206 206 207 207 :: 208 208 209 + bool 210 + dma_need_sync(struct device *dev, dma_addr_t dma_addr); 211 + 212 + Returns %true if dma_sync_single_for_{device,cpu} calls are required to 213 + transfer memory ownership. Returns %false if those calls can be skipped. 214 + 215 + :: 216 + 209 217 unsigned long 210 218 dma_get_merge_boundary(struct device *dev); 211 219
+1
include/linux/dma-direct.h
··· 87 87 void *cpu_addr, dma_addr_t dma_addr, size_t size, 88 88 unsigned long attrs); 89 89 int dma_direct_supported(struct device *dev, u64 mask); 90 + bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); 90 91 #endif /* _LINUX_DMA_DIRECT_H */
+5
include/linux/dma-mapping.h
··· 461 461 int dma_set_coherent_mask(struct device *dev, u64 mask); 462 462 u64 dma_get_required_mask(struct device *dev); 463 463 size_t dma_max_mapping_size(struct device *dev); 464 + bool dma_need_sync(struct device *dev, dma_addr_t dma_addr); 464 465 unsigned long dma_get_merge_boundary(struct device *dev); 465 466 #else /* CONFIG_HAS_DMA */ 466 467 static inline dma_addr_t dma_map_page_attrs(struct device *dev, ··· 571 570 static inline size_t dma_max_mapping_size(struct device *dev) 572 571 { 573 572 return 0; 573 + } 574 + static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 575 + { 576 + return false; 574 577 } 575 578 static inline unsigned long dma_get_merge_boundary(struct device *dev) 576 579 {
+6
kernel/dma/direct.c
··· 530 530 return swiotlb_max_mapping_size(dev); 531 531 return SIZE_MAX; 532 532 } 533 + 534 + bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) 535 + { 536 + return !dev_is_dma_coherent(dev) || 537 + is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); 538 + }
+10
kernel/dma/mapping.c
··· 397 397 } 398 398 EXPORT_SYMBOL_GPL(dma_max_mapping_size); 399 399 400 + bool dma_need_sync(struct device *dev, dma_addr_t dma_addr) 401 + { 402 + const struct dma_map_ops *ops = get_dma_ops(dev); 403 + 404 + if (dma_is_direct(ops)) 405 + return dma_direct_need_sync(dev, dma_addr); 406 + return ops->sync_single_for_cpu || ops->sync_single_for_device; 407 + } 408 + EXPORT_SYMBOL_GPL(dma_need_sync); 409 + 400 410 unsigned long dma_get_merge_boundary(struct device *dev) 401 411 { 402 412 const struct dma_map_ops *ops = get_dma_ops(dev);