Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

convert the DMA API to use dma_ops

This writes asm/dma-mapping.h to convert the DMA API to use dma_ops.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

FUJITA Tomonori and committed by
Ingo Molnar
b7ea6e95 4d9b977c

+77 -36
+77 -36
arch/ia64/include/asm/dma-mapping.h
··· 65 65 extern struct ia64_machine_vector ia64_mv; 66 66 extern void set_iommu_machvec(void); 67 67 68 - #define dma_alloc_coherent(dev, size, handle, gfp) \ 69 - platform_dma_alloc_coherent(dev, size, handle, (gfp) | GFP_DMA) 68 + static inline void *dma_alloc_coherent(struct device *dev, size_t size, 69 + dma_addr_t *daddr, gfp_t gfp) 70 + { 71 + return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); 72 + } 70 73 71 - /* coherent mem. is cheap */ 72 - static inline void * 73 - dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 74 - gfp_t flag) 74 + static inline void dma_free_coherent(struct device *dev, size_t size, 75 + void *caddr, dma_addr_t daddr) 75 76 { 76 - return dma_alloc_coherent(dev, size, dma_handle, flag); 77 + dma_ops->free_coherent(dev, size, caddr, daddr); 77 78 } 78 - #define dma_free_coherent platform_dma_free_coherent 79 - static inline void 80 - dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, 81 - dma_addr_t dma_handle) 79 + 80 + #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 81 + #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 82 + 83 + static inline dma_addr_t dma_map_single_attrs(struct device *dev, 84 + void *caddr, size_t size, 85 + enum dma_data_direction dir, 86 + struct dma_attrs *attrs) 82 87 { 83 - dma_free_coherent(dev, size, cpu_addr, dma_handle); 88 + return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs); 84 89 } 85 - #define dma_map_single_attrs platform_dma_map_single_attrs 86 - static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 87 - size_t size, int dir) 90 + 91 + static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, 92 + size_t size, 93 + enum dma_data_direction dir, 94 + struct dma_attrs *attrs) 88 95 { 89 - return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); 96 + dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs); 90 97 } 91 - #define dma_map_sg_attrs platform_dma_map_sg_attrs 92 - static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, 93 - int nents, int dir) 98 + 99 + #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 100 + #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) 101 + 102 + static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 103 + int nents, enum dma_data_direction dir, 104 + struct dma_attrs *attrs) 94 105 { 95 - return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); 106 + return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs); 96 107 } 97 - #define dma_unmap_single_attrs platform_dma_unmap_single_attrs 98 - static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, 99 - size_t size, int dir) 108 + 109 + static inline void dma_unmap_sg_attrs(struct device *dev, 110 + struct scatterlist *sgl, int nents, 111 + enum dma_data_direction dir, 112 + struct dma_attrs *attrs) 100 113 { 101 - return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); 114 + dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs); 102 115 } 103 - #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs 104 - static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 105 - int nents, int dir) 116 + 117 + #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 118 + #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) 119 + 120 + static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t daddr, 121 + size_t size, 122 + enum dma_data_direction dir) 106 123 { 107 - return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); 124 + dma_ops->sync_single_for_cpu(dev, daddr, size, dir); 108 125 } 109 - #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu 110 - #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu 111 - #define dma_sync_single_for_device platform_dma_sync_single_for_device 112 - #define dma_sync_sg_for_device platform_dma_sync_sg_for_device 113 - #define dma_mapping_error platform_dma_mapping_error 126 + 127 + static inline void dma_sync_sg_for_cpu(struct device *dev, 128 + struct scatterlist *sgl, 129 + int nents, enum dma_data_direction dir) 130 + { 131 + dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir); 132 + } 133 + 134 + static inline void dma_sync_single_for_device(struct device *dev, 135 + dma_addr_t daddr, 136 + size_t size, 137 + enum dma_data_direction dir) 138 + { 139 + dma_ops->sync_single_for_device(dev, daddr, size, dir); 140 + } 141 + 142 + static inline void dma_sync_sg_for_device(struct device *dev, 143 + struct scatterlist *sgl, 144 + int nents, 145 + enum dma_data_direction dir) 146 + { 147 + dma_ops->sync_sg_for_device(dev, sgl, nents, dir); 148 + } 149 + 150 + static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) 151 + { 152 + return dma_ops->mapping_error(dev, daddr); 153 + } 114 154 115 155 #define dma_map_page(dev, pg, off, size, dir) \ 116 156 dma_map_single(dev, page_address(pg) + (off), (size), (dir)) ··· 167 127 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ 168 128 dma_sync_single_for_device(dev, dma_handle, size, dir) 169 129 170 - #define dma_supported platform_dma_supported 130 + static inline int dma_supported(struct device *dev, u64 mask) 131 + { 132 + return dma_ops->dma_supported_op(dev, mask); 133 + } 171 134 172 135 static inline int 173 136 dma_set_mask (struct device *dev, u64 mask) ··· 200 157 { 201 158 return dma_ops; 202 159 } 203 - 204 - 205 160 206 161 #endif /* _ASM_IA64_DMA_MAPPING_H */