Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arc: convert to dma_map_ops

[vgupta@synopsys.com: ARC: dma mapping fixes #2]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Cc: Carlos Palminha <CARLOS.PALMINHA@synopsys.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Linus Torvalds
052c96db 0d4a619b

+110 -230
+1
arch/arc/Kconfig
··· 38 38 select OF_EARLY_FLATTREE 39 39 select PERF_USE_VMALLOC 40 40 select HAVE_DEBUG_STACKOVERFLOW 41 + select HAVE_DMA_ATTRS 41 42 42 43 config TRACE_IRQFLAGS_SUPPORT 43 44 def_bool y
+4 -183
arch/arc/include/asm/dma-mapping.h
··· 11 11 #ifndef ASM_ARC_DMA_MAPPING_H 12 12 #define ASM_ARC_DMA_MAPPING_H 13 13 14 - #include <asm-generic/dma-coherent.h> 15 - #include <asm/cacheflush.h> 14 + extern struct dma_map_ops arc_dma_ops; 16 15 17 - void *dma_alloc_noncoherent(struct device *dev, size_t size, 18 - dma_addr_t *dma_handle, gfp_t gfp); 19 - 20 - void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, 21 - dma_addr_t dma_handle); 22 - 23 - void *dma_alloc_coherent(struct device *dev, size_t size, 24 - dma_addr_t *dma_handle, gfp_t gfp); 25 - 26 - void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, 27 - dma_addr_t dma_handle); 28 - 29 - /* drivers/base/dma-mapping.c */ 30 - extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 31 - void *cpu_addr, dma_addr_t dma_addr, size_t size); 32 - extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 33 - void *cpu_addr, dma_addr_t dma_addr, 34 - size_t size); 35 - 36 - #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 37 - #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 38 - 39 - /* 40 - * streaming DMA Mapping API... 41 - * CPU accesses page via normal paddr, thus needs to explicitly made 42 - * consistent before each use 43 - */ 44 - 45 - static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size, 46 - enum dma_data_direction dir) 16 + static inline struct dma_map_ops *get_dma_ops(struct device *dev) 47 17 { 48 - switch (dir) { 49 - case DMA_FROM_DEVICE: 50 - dma_cache_inv(paddr, size); 51 - break; 52 - case DMA_TO_DEVICE: 53 - dma_cache_wback(paddr, size); 54 - break; 55 - case DMA_BIDIRECTIONAL: 56 - dma_cache_wback_inv(paddr, size); 57 - break; 58 - default: 59 - pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); 60 - } 18 + return &arc_dma_ops; 61 19 } 62 20 63 - void __arc_dma_cache_sync(unsigned long paddr, size_t size, 64 - enum dma_data_direction dir); 65 - 66 - #define _dma_cache_sync(addr, sz, dir) \ 67 - do { \ 68 - if (__builtin_constant_p(dir)) \ 69 - __inline_dma_cache_sync(addr, sz, dir); \ 70 - else \ 71 - __arc_dma_cache_sync(addr, sz, dir); \ 72 - } \ 73 - while (0); 74 - 75 - static inline dma_addr_t 76 - dma_map_single(struct device *dev, void *cpu_addr, size_t size, 77 - enum dma_data_direction dir) 78 - { 79 - _dma_cache_sync((unsigned long)cpu_addr, size, dir); 80 - return (dma_addr_t)cpu_addr; 81 - } 82 - 83 - static inline void 84 - dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 85 - size_t size, enum dma_data_direction dir) 86 - { 87 - } 88 - 89 - static inline dma_addr_t 90 - dma_map_page(struct device *dev, struct page *page, 91 - unsigned long offset, size_t size, 92 - enum dma_data_direction dir) 93 - { 94 - unsigned long paddr = page_to_phys(page) + offset; 95 - return dma_map_single(dev, (void *)paddr, size, dir); 96 - } 97 - 98 - static inline void 99 - dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 100 - size_t size, enum dma_data_direction dir) 101 - { 102 - } 103 - 104 - static inline int 105 - dma_map_sg(struct device *dev, struct scatterlist *sg, 106 - int nents, enum dma_data_direction dir) 107 - { 108 - struct scatterlist *s; 109 - int i; 110 - 111 - for_each_sg(sg, s, nents, i) 112 - s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 113 - s->length, dir); 114 - 115 - return nents; 116 - } 117 - 118 - static inline void 119 - dma_unmap_sg(struct device *dev, struct scatterlist *sg, 120 - int nents, enum dma_data_direction dir) 121 - { 122 - struct scatterlist *s; 123 - int i; 124 - 125 - for_each_sg(sg, s, nents, i) 126 - dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 127 - } 128 - 129 - static inline void 130 - dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, 131 - size_t size, enum dma_data_direction dir) 132 - { 133 - _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); 134 - } 135 - 136 - static inline void 137 - dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, 138 - size_t size, enum dma_data_direction dir) 139 - { 140 - _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); 141 - } 142 - 143 - static inline void 144 - dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 145 - unsigned long offset, size_t size, 146 - enum dma_data_direction direction) 147 - { 148 - _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE); 149 - } 150 - 151 - static inline void 152 - dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 153 - unsigned long offset, size_t size, 154 - enum dma_data_direction direction) 155 - { 156 - _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE); 157 - } 158 - 159 - static inline void 160 - dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, 161 - enum dma_data_direction dir) 162 - { 163 - int i; 164 - struct scatterlist *sg; 165 - 166 - for_each_sg(sglist, sg, nelems, i) 167 - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 168 - } 169 - 170 - static inline void 171 - dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, 172 - int nelems, enum dma_data_direction dir) 173 - { 174 - int i; 175 - struct scatterlist *sg; 176 - 177 - for_each_sg(sglist, sg, nelems, i) 178 - _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 179 - } 180 - 181 - static inline int dma_supported(struct device *dev, u64 dma_mask) 182 - { 183 - /* Support 32 bit DMA mask exclusively */ 184 - return dma_mask == DMA_BIT_MASK(32); 185 - } 186 - 187 - static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 188 - { 189 - return 0; 190 - } 191 - 192 - static inline int dma_set_mask(struct device *dev, u64 dma_mask) 193 - { 194 - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 195 - return -EIO; 196 - 197 - *dev->dma_mask = dma_mask; 198 - 199 - return 0; 200 - } 21 + #include <asm-generic/dma-mapping-common.h> 201 22 202 23 #endif
+105 -47
arch/arc/mm/dma.c
··· 17 17 */ 18 18 19 19 #include <linux/dma-mapping.h> 20 - #include <linux/dma-debug.h> 21 - #include <linux/export.h> 22 20 #include <asm/cache.h> 23 21 #include <asm/cacheflush.h> 24 22 25 - /* 26 - * Helpers for Coherent DMA API. 27 - */ 28 - void *dma_alloc_noncoherent(struct device *dev, size_t size, 29 - dma_addr_t *dma_handle, gfp_t gfp) 23 + 24 + static void *arc_dma_alloc(struct device *dev, size_t size, 25 + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 30 26 { 31 - void *paddr; 27 + void *paddr, *kvaddr; 32 28 33 29 /* This is linear addr (0x8000_0000 based) */ 34 30 paddr = alloc_pages_exact(size, gfp); ··· 33 37 34 38 /* This is bus address, platform dependent */ 35 39 *dma_handle = (dma_addr_t)paddr; 36 - 37 - return paddr; 38 - } 39 - EXPORT_SYMBOL(dma_alloc_noncoherent); 40 - 41 - void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, 42 - dma_addr_t dma_handle) 43 - { 44 - free_pages_exact((void *)dma_handle, size); 45 - } 46 - EXPORT_SYMBOL(dma_free_noncoherent); 47 - 48 - void *dma_alloc_coherent(struct device *dev, size_t size, 49 - dma_addr_t *dma_handle, gfp_t gfp) 50 - { 51 - void *paddr, *kvaddr; 52 40 53 41 /* 54 42 * IOC relies on all data (even coherent DMA data) being in cache ··· 45 65 * -For coherent data, Read/Write to buffers terminate early in cache 46 66 * (vs. always going to memory - thus are faster) 47 67 */ 48 - if (is_isa_arcv2() && ioc_exists) 49 - return dma_alloc_noncoherent(dev, size, dma_handle, gfp); 50 - 51 - /* This is linear addr (0x8000_0000 based) */ 52 - paddr = alloc_pages_exact(size, gfp); 53 - if (!paddr) 54 - return NULL; 68 + if ((is_isa_arcv2() && ioc_exists) || 69 + dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) 70 + return paddr; 55 71 56 72 /* This is kernel Virtual address (0x7000_0000 based) */ 57 73 kvaddr = ioremap_nocache((unsigned long)paddr, size); 58 74 if (kvaddr == NULL) 59 75 return NULL; 60 - 61 - /* This is bus address, platform dependent */ 62 - *dma_handle = (dma_addr_t)paddr; 63 76 64 77 /* 65 78 * Evict any existing L1 and/or L2 lines for the backing page ··· 68 95 69 96 return kvaddr; 70 97 } 71 - EXPORT_SYMBOL(dma_alloc_coherent); 72 98 73 - void dma_free_coherent(struct device *dev, size_t size, void *kvaddr, 74 - dma_addr_t dma_handle) 99 + static void arc_dma_free(struct device *dev, size_t size, void *vaddr, 100 + dma_addr_t dma_handle, struct dma_attrs *attrs) 75 101 { 76 - if (is_isa_arcv2() && ioc_exists) 77 - return dma_free_noncoherent(dev, size, kvaddr, dma_handle); 78 - 79 - iounmap((void __force __iomem *)kvaddr); 102 + if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) && 103 + !(is_isa_arcv2() && ioc_exists)) 104 + iounmap((void __force __iomem *)vaddr); 80 105 81 106 free_pages_exact((void *)dma_handle, size); 82 107 } 83 - EXPORT_SYMBOL(dma_free_coherent); 84 108 85 109 /* 86 - * Helper for streaming DMA... 110 + * streaming DMA Mapping API... 111 + * CPU accesses page via normal paddr, thus needs to explicitly made 112 + * consistent before each use 87 113 */ 88 - void __arc_dma_cache_sync(unsigned long paddr, size_t size, 89 - enum dma_data_direction dir) 114 + static void _dma_cache_sync(unsigned long paddr, size_t size, 115 + enum dma_data_direction dir) 90 116 { 91 - __inline_dma_cache_sync(paddr, size, dir); 117 + switch (dir) { 118 + case DMA_FROM_DEVICE: 119 + dma_cache_inv(paddr, size); 120 + break; 121 + case DMA_TO_DEVICE: 122 + dma_cache_wback(paddr, size); 123 + break; 124 + case DMA_BIDIRECTIONAL: 125 + dma_cache_wback_inv(paddr, size); 126 + break; 127 + default: 128 + pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr); 129 + } 92 130 } 93 - EXPORT_SYMBOL(__arc_dma_cache_sync); 131 + 132 + static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, 133 + unsigned long offset, size_t size, enum dma_data_direction dir, 134 + struct dma_attrs *attrs) 135 + { 136 + unsigned long paddr = page_to_phys(page) + offset; 137 + _dma_cache_sync(paddr, size, dir); 138 + return (dma_addr_t)paddr; 139 + } 140 + 141 + static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, 142 + int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 143 + { 144 + struct scatterlist *s; 145 + int i; 146 + 147 + for_each_sg(sg, s, nents, i) 148 + s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 149 + s->length, dir); 150 + 151 + return nents; 152 + } 153 + 154 + static void arc_dma_sync_single_for_cpu(struct device *dev, 155 + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 156 + { 157 + _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); 158 + } 159 + 160 + static void arc_dma_sync_single_for_device(struct device *dev, 161 + dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 162 + { 163 + _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); 164 + } 165 + 166 + static void arc_dma_sync_sg_for_cpu(struct device *dev, 167 + struct scatterlist *sglist, int nelems, 168 + enum dma_data_direction dir) 169 + { 170 + int i; 171 + struct scatterlist *sg; 172 + 173 + for_each_sg(sglist, sg, nelems, i) 174 + _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 175 + } 176 + 177 + static void arc_dma_sync_sg_for_device(struct device *dev, 178 + struct scatterlist *sglist, int nelems, 179 + enum dma_data_direction dir) 180 + { 181 + int i; 182 + struct scatterlist *sg; 183 + 184 + for_each_sg(sglist, sg, nelems, i) 185 + _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); 186 + } 187 + 188 + static int arc_dma_supported(struct device *dev, u64 dma_mask) 189 + { 190 + /* Support 32 bit DMA mask exclusively */ 191 + return dma_mask == DMA_BIT_MASK(32); 192 + } 193 + 194 + struct dma_map_ops arc_dma_ops = { 195 + .alloc = arc_dma_alloc, 196 + .free = arc_dma_free, 197 + .map_page = arc_dma_map_page, 198 + .map_sg = arc_dma_map_sg, 199 + .sync_single_for_device = arc_dma_sync_single_for_device, 200 + .sync_single_for_cpu = arc_dma_sync_single_for_cpu, 201 + .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu, 202 + .sync_sg_for_device = arc_dma_sync_sg_for_device, 203 + .dma_supported = arc_dma_supported, 204 + }; 205 + EXPORT_SYMBOL(arc_dma_ops);