at v5.9 6.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Internals of the DMA direct mapping implementation. Only for use by the 4 * DMA mapping code and IOMMU drivers. 5 */ 6#ifndef _LINUX_DMA_DIRECT_H 7#define _LINUX_DMA_DIRECT_H 1 8 9#include <linux/dma-mapping.h> 10#include <linux/dma-noncoherent.h> 11#include <linux/memblock.h> /* for min_low_pfn */ 12#include <linux/mem_encrypt.h> 13#include <linux/swiotlb.h> 14 15extern unsigned int zone_dma_bits; 16 17#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 18#include <asm/dma-direct.h> 19#else 20static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) 21{ 22 dma_addr_t dev_addr = (dma_addr_t)paddr; 23 24 return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 25} 26 27static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr) 28{ 29 phys_addr_t paddr = (phys_addr_t)dev_addr; 30 31 return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 32} 33#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ 34 35#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED 36bool force_dma_unencrypted(struct device *dev); 37#else 38static inline bool force_dma_unencrypted(struct device *dev) 39{ 40 return false; 41} 42#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ 43 44/* 45 * If memory encryption is supported, phys_to_dma will set the memory encryption 46 * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma 47 * and __dma_to_phys versions should only be used on non-encrypted memory for 48 * special occasions like DMA coherent buffers. 49 */ 50static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 51{ 52 return __sme_set(__phys_to_dma(dev, paddr)); 53} 54 55static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 56{ 57 return __sme_clr(__dma_to_phys(dev, daddr)); 58} 59 60static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size, 61 bool is_ram) 62{ 63 dma_addr_t end = addr + size - 1; 64 65 if (!dev->dma_mask) 66 return false; 67 68 if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && 69 min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn))) 70 return false; 71 72 return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit); 73} 74 75u64 dma_direct_get_required_mask(struct device *dev); 76void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 77 gfp_t gfp, unsigned long attrs); 78void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 79 dma_addr_t dma_addr, unsigned long attrs); 80void *dma_direct_alloc_pages(struct device *dev, size_t size, 81 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); 82void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, 83 dma_addr_t dma_addr, unsigned long attrs); 84int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 85 void *cpu_addr, dma_addr_t dma_addr, size_t size, 86 unsigned long attrs); 87bool dma_direct_can_mmap(struct device *dev); 88int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, 89 void *cpu_addr, dma_addr_t dma_addr, size_t size, 90 unsigned long attrs); 91int dma_direct_supported(struct device *dev, u64 mask); 92bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); 93int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 94 enum dma_data_direction dir, unsigned long attrs); 95dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 96 size_t size, enum dma_data_direction dir, unsigned long attrs); 97size_t dma_direct_max_mapping_size(struct device *dev); 98 99#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 100 defined(CONFIG_SWIOTLB) 101void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, 102 int nents, enum dma_data_direction dir); 103#else 104static inline void dma_direct_sync_sg_for_device(struct device *dev, 105 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 106{ 107} 108#endif 109 110#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 111 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 112 defined(CONFIG_SWIOTLB) 113void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 114 int nents, enum dma_data_direction dir, unsigned long attrs); 115void dma_direct_sync_sg_for_cpu(struct device *dev, 116 struct scatterlist *sgl, int nents, enum dma_data_direction dir); 117#else 118static inline void dma_direct_unmap_sg(struct device *dev, 119 struct scatterlist *sgl, int nents, enum dma_data_direction dir, 120 unsigned long attrs) 121{ 122} 123static inline void dma_direct_sync_sg_for_cpu(struct device *dev, 124 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 125{ 126} 127#endif 128 129static inline void dma_direct_sync_single_for_device(struct device *dev, 130 dma_addr_t addr, size_t size, enum dma_data_direction dir) 131{ 132 phys_addr_t paddr = dma_to_phys(dev, addr); 133 134 if (unlikely(is_swiotlb_buffer(paddr))) 135 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); 136 137 if (!dev_is_dma_coherent(dev)) 138 arch_sync_dma_for_device(paddr, size, dir); 139} 140 141static inline void dma_direct_sync_single_for_cpu(struct device *dev, 142 dma_addr_t addr, size_t size, enum dma_data_direction dir) 143{ 144 phys_addr_t paddr = dma_to_phys(dev, addr); 145 146 if (!dev_is_dma_coherent(dev)) { 147 arch_sync_dma_for_cpu(paddr, size, dir); 148 arch_sync_dma_for_cpu_all(); 149 } 150 151 if (unlikely(is_swiotlb_buffer(paddr))) 152 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); 153} 154 155static inline dma_addr_t dma_direct_map_page(struct device *dev, 156 struct page *page, unsigned long offset, size_t size, 157 enum dma_data_direction dir, unsigned long attrs) 158{ 159 phys_addr_t phys = page_to_phys(page) + offset; 160 dma_addr_t dma_addr = phys_to_dma(dev, phys); 161 162 if (unlikely(swiotlb_force == SWIOTLB_FORCE)) 163 return swiotlb_map(dev, phys, size, dir, attrs); 164 165 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { 166 if (swiotlb_force != SWIOTLB_NO_FORCE) 167 return swiotlb_map(dev, phys, size, dir, attrs); 168 169 dev_WARN_ONCE(dev, 1, 170 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 171 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 172 return DMA_MAPPING_ERROR; 173 } 174 175 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 176 arch_sync_dma_for_device(phys, size, dir); 177 return dma_addr; 178} 179 180static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, 181 size_t size, enum dma_data_direction dir, unsigned long attrs) 182{ 183 phys_addr_t phys = dma_to_phys(dev, addr); 184 185 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 186 dma_direct_sync_single_for_cpu(dev, addr, size, dir); 187 188 if (unlikely(is_swiotlb_buffer(phys))) 189 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); 190} 191#endif /* _LINUX_DMA_DIRECT_H */