Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.0 235 lines 7.6 kB view raw
1#ifndef _ASM_GENERIC_DMA_MAPPING_H 2#define _ASM_GENERIC_DMA_MAPPING_H 3 4#include <linux/kmemcheck.h> 5#include <linux/bug.h> 6#include <linux/scatterlist.h> 7#include <linux/dma-debug.h> 8#include <linux/dma-attrs.h> 9 10static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 11 size_t size, 12 enum dma_data_direction dir, 13 struct dma_attrs *attrs) 14{ 15 struct dma_map_ops *ops = get_dma_ops(dev); 16 dma_addr_t addr; 17 18 kmemcheck_mark_initialized(ptr, size); 19 BUG_ON(!valid_dma_direction(dir)); 20 addr = ops->map_page(dev, virt_to_page(ptr), 21 (unsigned long)ptr & ~PAGE_MASK, size, 22 dir, attrs); 23 debug_dma_map_page(dev, virt_to_page(ptr), 24 (unsigned long)ptr & ~PAGE_MASK, size, 25 dir, addr, true); 26 return addr; 27} 28 29static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 30 size_t size, 31 enum dma_data_direction dir, 32 struct dma_attrs *attrs) 33{ 34 struct dma_map_ops *ops = get_dma_ops(dev); 35 36 BUG_ON(!valid_dma_direction(dir)); 37 if (ops->unmap_page) 38 ops->unmap_page(dev, addr, size, dir, attrs); 39 debug_dma_unmap_page(dev, addr, size, dir, true); 40} 41 42static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 43 int nents, enum dma_data_direction dir, 44 struct dma_attrs *attrs) 45{ 46 struct dma_map_ops *ops = get_dma_ops(dev); 47 int i, ents; 48 struct scatterlist *s; 49 50 for_each_sg(sg, s, nents, i) 51 kmemcheck_mark_initialized(sg_virt(s), s->length); 52 BUG_ON(!valid_dma_direction(dir)); 53 ents = ops->map_sg(dev, sg, nents, dir, attrs); 54 debug_dma_map_sg(dev, sg, nents, ents, dir); 55 56 return ents; 57} 58 59static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 60 int nents, enum dma_data_direction dir, 61 struct dma_attrs *attrs) 62{ 63 struct dma_map_ops *ops = get_dma_ops(dev); 64 65 BUG_ON(!valid_dma_direction(dir)); 66 debug_dma_unmap_sg(dev, sg, nents, dir); 67 if (ops->unmap_sg) 68 ops->unmap_sg(dev, sg, nents, dir, attrs); 69} 70 71static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 72 size_t offset, size_t size, 73 enum dma_data_direction dir) 74{ 75 struct dma_map_ops *ops = get_dma_ops(dev); 76 dma_addr_t addr; 77 78 kmemcheck_mark_initialized(page_address(page) + offset, size); 79 BUG_ON(!valid_dma_direction(dir)); 80 addr = ops->map_page(dev, page, offset, size, dir, NULL); 81 debug_dma_map_page(dev, page, offset, size, dir, addr, false); 82 83 return addr; 84} 85 86static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, 87 size_t size, enum dma_data_direction dir) 88{ 89 struct dma_map_ops *ops = get_dma_ops(dev); 90 91 BUG_ON(!valid_dma_direction(dir)); 92 if (ops->unmap_page) 93 ops->unmap_page(dev, addr, size, dir, NULL); 94 debug_dma_unmap_page(dev, addr, size, dir, false); 95} 96 97static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, 98 size_t size, 99 enum dma_data_direction dir) 100{ 101 struct dma_map_ops *ops = get_dma_ops(dev); 102 103 BUG_ON(!valid_dma_direction(dir)); 104 if (ops->sync_single_for_cpu) 105 ops->sync_single_for_cpu(dev, addr, size, dir); 106 debug_dma_sync_single_for_cpu(dev, addr, size, dir); 107} 108 109static inline void dma_sync_single_for_device(struct device *dev, 110 dma_addr_t addr, size_t size, 111 enum dma_data_direction dir) 112{ 113 struct dma_map_ops *ops = get_dma_ops(dev); 114 115 BUG_ON(!valid_dma_direction(dir)); 116 if (ops->sync_single_for_device) 117 ops->sync_single_for_device(dev, addr, size, dir); 118 debug_dma_sync_single_for_device(dev, addr, size, dir); 119} 120 121static inline void dma_sync_single_range_for_cpu(struct device *dev, 122 dma_addr_t addr, 123 unsigned long offset, 124 size_t size, 125 enum dma_data_direction dir) 126{ 127 const struct dma_map_ops *ops = get_dma_ops(dev); 128 129 BUG_ON(!valid_dma_direction(dir)); 130 if (ops->sync_single_for_cpu) 131 ops->sync_single_for_cpu(dev, addr + offset, size, dir); 132 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); 133} 134 135static inline void dma_sync_single_range_for_device(struct device *dev, 136 dma_addr_t addr, 137 unsigned long offset, 138 size_t size, 139 enum dma_data_direction dir) 140{ 141 const struct dma_map_ops *ops = get_dma_ops(dev); 142 143 BUG_ON(!valid_dma_direction(dir)); 144 if (ops->sync_single_for_device) 145 ops->sync_single_for_device(dev, addr + offset, size, dir); 146 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); 147} 148 149static inline void 150dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 151 int nelems, enum dma_data_direction dir) 152{ 153 struct dma_map_ops *ops = get_dma_ops(dev); 154 155 BUG_ON(!valid_dma_direction(dir)); 156 if (ops->sync_sg_for_cpu) 157 ops->sync_sg_for_cpu(dev, sg, nelems, dir); 158 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); 159} 160 161static inline void 162dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 163 int nelems, enum dma_data_direction dir) 164{ 165 struct dma_map_ops *ops = get_dma_ops(dev); 166 167 BUG_ON(!valid_dma_direction(dir)); 168 if (ops->sync_sg_for_device) 169 ops->sync_sg_for_device(dev, sg, nelems, dir); 170 debug_dma_sync_sg_for_device(dev, sg, nelems, dir); 171 172} 173 174#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 175#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) 176#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 177#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) 178 179extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 180 void *cpu_addr, dma_addr_t dma_addr, size_t size); 181 182void *dma_common_contiguous_remap(struct page *page, size_t size, 183 unsigned long vm_flags, 184 pgprot_t prot, const void *caller); 185 186void *dma_common_pages_remap(struct page **pages, size_t size, 187 unsigned long vm_flags, pgprot_t prot, 188 const void *caller); 189void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); 190 191/** 192 * dma_mmap_attrs - map a coherent DMA allocation into user space 193 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 194 * @vma: vm_area_struct describing requested user mapping 195 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs 196 * @handle: device-view address returned from dma_alloc_attrs 197 * @size: size of memory originally requested in dma_alloc_attrs 198 * @attrs: attributes of mapping properties requested in dma_alloc_attrs 199 * 200 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs 201 * into user space. The coherent DMA buffer must not be freed by the 202 * driver until the user space mapping has been released. 203 */ 204static inline int 205dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 206 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) 207{ 208 struct dma_map_ops *ops = get_dma_ops(dev); 209 BUG_ON(!ops); 210 if (ops->mmap) 211 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 212 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 213} 214 215#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) 216 217int 218dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 219 void *cpu_addr, dma_addr_t dma_addr, size_t size); 220 221static inline int 222dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, 223 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) 224{ 225 struct dma_map_ops *ops = get_dma_ops(dev); 226 BUG_ON(!ops); 227 if (ops->get_sgtable) 228 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, 229 attrs); 230 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 231} 232 233#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) 234 235#endif