Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

nios2: convert to dma_map_ops

Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Linus Torvalds
5a1a67f1 e20dd889

+87 -186
+1
arch/nios2/Kconfig
··· 16 16 select SOC_BUS 17 17 select SPARSE_IRQ 18 18 select USB_ARCH_HAS_HCD if USB_SUPPORT 19 + select HAVE_DMA_ATTRS 19 20 20 21 config GENERIC_CSUM 21 22 def_bool y
+6 -117
arch/nios2/include/asm/dma-mapping.h
··· 10 10 #ifndef _ASM_NIOS2_DMA_MAPPING_H 11 11 #define _ASM_NIOS2_DMA_MAPPING_H 12 12 13 - #include <linux/scatterlist.h> 14 - #include <linux/cache.h> 15 - #include <asm/cacheflush.h> 13 + extern struct dma_map_ops nios2_dma_ops; 16 14 17 - static inline void __dma_sync_for_device(void *vaddr, size_t size, 18 - enum dma_data_direction direction) 15 + static inline struct dma_map_ops *get_dma_ops(struct device *dev) 19 16 { 20 - switch (direction) { 21 - case DMA_FROM_DEVICE: 22 - invalidate_dcache_range((unsigned long)vaddr, 23 - (unsigned long)(vaddr + size)); 24 - break; 25 - case DMA_TO_DEVICE: 26 - /* 27 - * We just need to flush the caches here , but Nios2 flush 28 - * instruction will do both writeback and invalidate. 29 - */ 30 - case DMA_BIDIRECTIONAL: /* flush and invalidate */ 31 - flush_dcache_range((unsigned long)vaddr, 32 - (unsigned long)(vaddr + size)); 33 - break; 34 - default: 35 - BUG(); 36 - } 37 - } 38 - 39 - static inline void __dma_sync_for_cpu(void *vaddr, size_t size, 40 - enum dma_data_direction direction) 41 - { 42 - switch (direction) { 43 - case DMA_BIDIRECTIONAL: 44 - case DMA_FROM_DEVICE: 45 - invalidate_dcache_range((unsigned long)vaddr, 46 - (unsigned long)(vaddr + size)); 47 - break; 48 - case DMA_TO_DEVICE: 49 - break; 50 - default: 51 - BUG(); 52 - } 53 - } 54 - 55 - #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 56 - #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 57 - 58 - void *dma_alloc_coherent(struct device *dev, size_t size, 59 - dma_addr_t *dma_handle, gfp_t flag); 60 - 61 - void dma_free_coherent(struct device *dev, size_t size, 62 - void *vaddr, dma_addr_t dma_handle); 63 - 64 - static inline dma_addr_t dma_map_single(struct device *dev, void *ptr, 65 - size_t size, 66 - enum dma_data_direction direction) 67 - { 68 - BUG_ON(!valid_dma_direction(direction)); 69 - __dma_sync_for_device(ptr, size, direction); 70 - return virt_to_phys(ptr); 71 - } 72 - 73 - static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 74 - size_t size, enum dma_data_direction direction) 75 - { 76 - } 77 - 78 - extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 79 - enum dma_data_direction direction); 80 - extern dma_addr_t dma_map_page(struct device *dev, struct page *page, 81 - unsigned long offset, size_t size, enum dma_data_direction direction); 82 - extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 83 - size_t size, enum dma_data_direction direction); 84 - extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 85 - int nhwentries, enum dma_data_direction direction); 86 - extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 87 - size_t size, enum dma_data_direction direction); 88 - extern void dma_sync_single_for_device(struct device *dev, 89 - dma_addr_t dma_handle, size_t size, enum dma_data_direction direction); 90 - extern void dma_sync_single_range_for_cpu(struct device *dev, 91 - dma_addr_t dma_handle, unsigned long offset, size_t size, 92 - enum dma_data_direction direction); 93 - extern void dma_sync_single_range_for_device(struct device *dev, 94 - dma_addr_t dma_handle, unsigned long offset, size_t size, 95 - enum dma_data_direction direction); 96 - extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 97 - int nelems, enum dma_data_direction direction); 98 - extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 99 - int nelems, enum dma_data_direction direction); 100 - 101 - static inline int dma_supported(struct device *dev, u64 mask) 102 - { 103 - return 1; 104 - } 105 - 106 - static inline int dma_set_mask(struct device *dev, u64 mask) 107 - { 108 - if (!dev->dma_mask || !dma_supported(dev, mask)) 109 - return -EIO; 110 - 111 - *dev->dma_mask = mask; 112 - 113 - return 0; 114 - } 115 - 116 - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 117 - { 118 - return 0; 17 + return &nios2_dma_ops; 119 18 } 120 19 121 20 /* 122 - * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to 123 - * do any flushing here. 124 - */ 21 + * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to 22 + * do any flushing here. 23 + */ 125 24 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 126 25 enum dma_data_direction direction) 127 26 { 128 27 } 129 - 130 - /* drivers/base/dma-mapping.c */ 131 - extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 132 - void *cpu_addr, dma_addr_t dma_addr, size_t size); 133 - extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 134 - void *cpu_addr, dma_addr_t dma_addr, 135 - size_t size); 136 - 137 - #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 138 - #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 139 28 140 29 #endif /* _ASM_NIOS2_DMA_MAPPING_H */
+80 -69
arch/nios2/mm/dma-mapping.c
··· 20 20 #include <linux/cache.h> 21 21 #include <asm/cacheflush.h> 22 22 23 + static inline void __dma_sync_for_device(void *vaddr, size_t size, 24 + enum dma_data_direction direction) 25 + { 26 + switch (direction) { 27 + case DMA_FROM_DEVICE: 28 + invalidate_dcache_range((unsigned long)vaddr, 29 + (unsigned long)(vaddr + size)); 30 + break; 31 + case DMA_TO_DEVICE: 32 + /* 33 + * We just need to flush the caches here , but Nios2 flush 34 + * instruction will do both writeback and invalidate. 35 + */ 36 + case DMA_BIDIRECTIONAL: /* flush and invalidate */ 37 + flush_dcache_range((unsigned long)vaddr, 38 + (unsigned long)(vaddr + size)); 39 + break; 40 + default: 41 + BUG(); 42 + } 43 + } 23 44 24 - void *dma_alloc_coherent(struct device *dev, size_t size, 25 - dma_addr_t *dma_handle, gfp_t gfp) 45 + static inline void __dma_sync_for_cpu(void *vaddr, size_t size, 46 + enum dma_data_direction direction) 47 + { 48 + switch (direction) { 49 + case DMA_BIDIRECTIONAL: 50 + case DMA_FROM_DEVICE: 51 + invalidate_dcache_range((unsigned long)vaddr, 52 + (unsigned long)(vaddr + size)); 53 + break; 54 + case DMA_TO_DEVICE: 55 + break; 56 + default: 57 + BUG(); 58 + } 59 + } 60 + 61 + static void *nios2_dma_alloc(struct device *dev, size_t size, 62 + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 26 63 { 27 64 void *ret; 28 65 ··· 82 45 83 46 return ret; 84 47 } 85 - EXPORT_SYMBOL(dma_alloc_coherent); 86 48 87 - void dma_free_coherent(struct device *dev, size_t size, void *vaddr, 88 - dma_addr_t dma_handle) 49 + static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, 50 + dma_addr_t dma_handle, struct dma_attrs *attrs) 89 51 { 90 52 unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); 91 53 92 54 free_pages(addr, get_order(size)); 93 55 } 94 - EXPORT_SYMBOL(dma_free_coherent); 95 56 96 - int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 97 - enum dma_data_direction direction) 57 + static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, 58 + int nents, enum dma_data_direction direction, 59 + struct dma_attrs *attrs) 98 60 { 99 61 int i; 100 - 101 - BUG_ON(!valid_dma_direction(direction)); 102 62 103 63 for_each_sg(sg, sg, nents, i) { 104 64 void *addr; ··· 109 75 110 76 return nents; 111 77 } 112 - EXPORT_SYMBOL(dma_map_sg); 113 78 114 - dma_addr_t dma_map_page(struct device *dev, struct page *page, 79 + static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, 115 80 unsigned long offset, size_t size, 116 - enum dma_data_direction direction) 81 + enum dma_data_direction direction, 82 + struct dma_attrs *attrs) 117 83 { 118 - void *addr; 84 + void *addr = page_address(page) + offset; 119 85 120 - BUG_ON(!valid_dma_direction(direction)); 121 - 122 - addr = page_address(page) + offset; 123 86 __dma_sync_for_device(addr, size, direction); 124 - 125 87 return page_to_phys(page) + offset; 126 88 } 127 - EXPORT_SYMBOL(dma_map_page); 128 89 129 - void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 130 - enum dma_data_direction direction) 90 + static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 91 + size_t size, enum dma_data_direction direction, 92 + struct dma_attrs *attrs) 131 93 { 132 - BUG_ON(!valid_dma_direction(direction)); 133 - 134 94 __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 135 95 } 136 - EXPORT_SYMBOL(dma_unmap_page); 137 96 138 - void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 139 - enum dma_data_direction direction) 97 + static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 98 + int nhwentries, enum dma_data_direction direction, 99 + struct dma_attrs *attrs) 140 100 { 141 101 void *addr; 142 102 int i; 143 - 144 - BUG_ON(!valid_dma_direction(direction)); 145 103 146 104 if (direction == DMA_TO_DEVICE) 147 105 return; ··· 144 118 __dma_sync_for_cpu(addr, sg->length, direction); 145 119 } 146 120 } 147 - EXPORT_SYMBOL(dma_unmap_sg); 148 121 149 - void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 150 - size_t size, enum dma_data_direction direction) 122 + static void nios2_dma_sync_single_for_cpu(struct device *dev, 123 + dma_addr_t dma_handle, size_t size, 124 + enum dma_data_direction direction) 151 125 { 152 - BUG_ON(!valid_dma_direction(direction)); 153 - 154 126 __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 155 127 } 156 - EXPORT_SYMBOL(dma_sync_single_for_cpu); 157 128 158 - void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 159 - size_t size, enum dma_data_direction direction) 129 + static void nios2_dma_sync_single_for_device(struct device *dev, 130 + dma_addr_t dma_handle, size_t size, 131 + enum dma_data_direction direction) 160 132 { 161 - BUG_ON(!valid_dma_direction(direction)); 162 - 163 133 __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 164 134 } 165 - EXPORT_SYMBOL(dma_sync_single_for_device); 166 135 167 - void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 168 - unsigned long offset, size_t size, 169 - enum dma_data_direction direction) 170 - { 171 - BUG_ON(!valid_dma_direction(direction)); 172 - 173 - __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction); 174 - } 175 - EXPORT_SYMBOL(dma_sync_single_range_for_cpu); 176 - 177 - void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 178 - unsigned long offset, size_t size, 179 - enum dma_data_direction direction) 180 - { 181 - BUG_ON(!valid_dma_direction(direction)); 182 - 183 - __dma_sync_for_device(phys_to_virt(dma_handle), size, direction); 184 - } 185 - EXPORT_SYMBOL(dma_sync_single_range_for_device); 186 - 187 - void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 188 - enum dma_data_direction direction) 136 + static void nios2_dma_sync_sg_for_cpu(struct device *dev, 137 + struct scatterlist *sg, int nelems, 138 + enum dma_data_direction direction) 189 139 { 190 140 int i; 191 - 192 - BUG_ON(!valid_dma_direction(direction)); 193 141 194 142 /* Make sure that gcc doesn't leave the empty loop body. */ 195 143 for_each_sg(sg, sg, nelems, i) 196 144 __dma_sync_for_cpu(sg_virt(sg), sg->length, direction); 197 145 } 198 - EXPORT_SYMBOL(dma_sync_sg_for_cpu); 199 146 200 - void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 201 - int nelems, enum dma_data_direction direction) 147 + static void nios2_dma_sync_sg_for_device(struct device *dev, 148 + struct scatterlist *sg, int nelems, 149 + enum dma_data_direction direction) 202 150 { 203 151 int i; 204 - 205 - BUG_ON(!valid_dma_direction(direction)); 206 152 207 153 /* Make sure that gcc doesn't leave the empty loop body. */ 208 154 for_each_sg(sg, sg, nelems, i) 209 155 __dma_sync_for_device(sg_virt(sg), sg->length, direction); 210 156 211 157 } 212 - EXPORT_SYMBOL(dma_sync_sg_for_device); 158 + 159 + struct dma_map_ops nios2_dma_ops = { 160 + .alloc = nios2_dma_alloc, 161 + .free = nios2_dma_free, 162 + .map_page = nios2_dma_map_page, 163 + .unmap_page = nios2_dma_unmap_page, 164 + .map_sg = nios2_dma_map_sg, 165 + .unmap_sg = nios2_dma_unmap_sg, 166 + .sync_single_for_device = nios2_dma_sync_single_for_device, 167 + .sync_single_for_cpu = nios2_dma_sync_single_for_cpu, 168 + .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu, 169 + .sync_sg_for_device = nios2_dma_sync_sg_for_device, 170 + }; 171 + EXPORT_SYMBOL(nios2_dma_ops);