Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

microblaze: Added DMA sync operations

Added support gor dma_direct_sync_single_for_*() and dma_direct_sync_sg_for_*()

Signed-off-by: Eli Billauer <eli.billauer@gmail.com>
Signed-off-by: Michal Simek <monstr@monstr.eu>

authored by

Eli Billauer and committed by
Michal Simek
0fb2a6f2 cf560c18

+60
+60
arch/microblaze/kernel/dma.c
··· 118 118 __dma_sync(dma_address, size, direction); 119 119 } 120 120 121 + static inline void 122 + dma_direct_sync_single_for_cpu(struct device *dev, 123 + dma_addr_t dma_handle, size_t size, 124 + enum dma_data_direction direction) 125 + { 126 + /* 127 + * It's pointless to flush the cache as the memory segment 128 + * is given to the CPU 129 + */ 130 + 131 + if (direction == DMA_FROM_DEVICE) 132 + __dma_sync(dma_handle, size, direction); 133 + } 134 + 135 + static inline void 136 + dma_direct_sync_single_for_device(struct device *dev, 137 + dma_addr_t dma_handle, size_t size, 138 + enum dma_data_direction direction) 139 + { 140 + /* 141 + * It's pointless to invalidate the cache if the device isn't 142 + * supposed to write to the relevant region 143 + */ 144 + 145 + if (direction == DMA_TO_DEVICE) 146 + __dma_sync(dma_handle, size, direction); 147 + } 148 + 149 + static inline void 150 + dma_direct_sync_sg_for_cpu(struct device *dev, 151 + struct scatterlist *sgl, int nents, 152 + enum dma_data_direction direction) 153 + { 154 + struct scatterlist *sg; 155 + int i; 156 + 157 + /* FIXME this part of code is untested */ 158 + if (direction == DMA_FROM_DEVICE) 159 + for_each_sg(sgl, sg, nents, i) 160 + __dma_sync(sg->dma_address, sg->length, direction); 161 + } 162 + 163 + static inline void 164 + dma_direct_sync_sg_for_device(struct device *dev, 165 + struct scatterlist *sgl, int nents, 166 + enum dma_data_direction direction) 167 + { 168 + struct scatterlist *sg; 169 + int i; 170 + 171 + /* FIXME this part of code is untested */ 172 + if (direction == DMA_TO_DEVICE) 173 + for_each_sg(sgl, sg, nents, i) 174 + __dma_sync(sg->dma_address, sg->length, direction); 175 + } 176 + 121 177 struct dma_map_ops dma_direct_ops = { 122 178 .alloc_coherent = dma_direct_alloc_coherent, 123 179 .free_coherent = dma_direct_free_coherent, ··· 182 126 .dma_supported = dma_direct_dma_supported, 183 127 .map_page = dma_direct_map_page, 184 128 .unmap_page = dma_direct_unmap_page, 129 + .sync_single_for_cpu = dma_direct_sync_single_for_cpu, 130 + .sync_single_for_device = dma_direct_sync_single_for_device, 131 + .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, 132 + .sync_sg_for_device = dma_direct_sync_sg_for_device, 185 133 }; 186 134 EXPORT_SYMBOL(dma_direct_ops); 187 135