Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: Allow mixing bypass and mapped DMA operation

At the moment we allow bypassing DMA ops only when we can do this for
the entire RAM. However there are configs with mixed type memory
where we could still allow bypassing IOMMU in most cases;
POWERPC with persistent memory is one example.

This adds an arch hook to determine where bypass can still work and
we invoke direct DMA API. The following patch checks the bus limit
on POWERPC to allow or disallow direct mapping.

This adds a ARCH_HAS_DMA_MAP_DIRECT config option to make the arch_xxxx
hooks no-op by default.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Christoph Hellwig <hch@lst.de>

authored by

Alexey Kardashevskiy and committed by
Christoph Hellwig
8d8d53cf 418baf2c

+26 -4
+14
include/linux/dma-map-ops.h
··· 314 314 void *arch_dma_set_uncached(void *addr, size_t size); 315 315 void arch_dma_clear_uncached(void *addr, size_t size); 316 316 317 + #ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT 318 + bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr); 319 + bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle); 320 + bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg, 321 + int nents); 322 + bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, 323 + int nents); 324 + #else 325 + #define arch_dma_map_page_direct(d, a) (false) 326 + #define arch_dma_unmap_page_direct(d, a) (false) 327 + #define arch_dma_map_sg_direct(d, s, n) (false) 328 + #define arch_dma_unmap_sg_direct(d, s, n) (false) 329 + #endif 330 + 317 331 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS 318 332 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 319 333 const struct iommu_ops *iommu, bool coherent);
+4
kernel/dma/Kconfig
··· 20 20 config DMA_OPS_BYPASS 21 21 bool 22 22 23 + # Lets platform IOMMU driver choose between bypass and IOMMU 24 + config ARCH_HAS_DMA_MAP_DIRECT 25 + bool 26 + 23 27 config NEED_SG_DMA_LENGTH 24 28 bool 25 29
+8 -4
kernel/dma/mapping.c
··· 149 149 if (WARN_ON_ONCE(!dev->dma_mask)) 150 150 return DMA_MAPPING_ERROR; 151 151 152 - if (dma_map_direct(dev, ops)) 152 + if (dma_map_direct(dev, ops) || 153 + arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) 153 154 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); 154 155 else 155 156 addr = ops->map_page(dev, page, offset, size, dir, attrs); ··· 166 165 const struct dma_map_ops *ops = get_dma_ops(dev); 167 166 168 167 BUG_ON(!valid_dma_direction(dir)); 169 - if (dma_map_direct(dev, ops)) 168 + if (dma_map_direct(dev, ops) || 169 + arch_dma_unmap_page_direct(dev, addr + size)) 170 170 dma_direct_unmap_page(dev, addr, size, dir, attrs); 171 171 else if (ops->unmap_page) 172 172 ops->unmap_page(dev, addr, size, dir, attrs); ··· 190 188 if (WARN_ON_ONCE(!dev->dma_mask)) 191 189 return 0; 192 190 193 - if (dma_map_direct(dev, ops)) 191 + if (dma_map_direct(dev, ops) || 192 + arch_dma_map_sg_direct(dev, sg, nents)) 194 193 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 195 194 else 196 195 ents = ops->map_sg(dev, sg, nents, dir, attrs); ··· 210 207 211 208 BUG_ON(!valid_dma_direction(dir)); 212 209 debug_dma_unmap_sg(dev, sg, nents, dir); 213 - if (dma_map_direct(dev, ops)) 210 + if (dma_map_direct(dev, ops) || 211 + arch_dma_unmap_sg_direct(dev, sg, nents)) 214 212 dma_direct_unmap_sg(dev, sg, nents, dir, attrs); 215 213 else if (ops->unmap_sg) 216 214 ops->unmap_sg(dev, sg, nents, dir, attrs);