Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping

Pull DMA mapping branch from Marek Szyprowski:
"Short summary for the whole series:

A few limitations have been identified in the current dma-mapping
design and its implementations for various architectures. There exist
more than one function for allocating and freeing the buffers:
currently these 3 are used dma_{alloc, free}_coherent,
dma_{alloc,free}_writecombine, dma_{alloc,free}_noncoherent.

For most of the systems these calls are almost equivalent and can be
interchanged. For others, especially the truly non-coherent ones
(like ARM), the difference can be easily noticed in overall driver
performance. Sadly not all architectures provide implementations for
all of them, so the drivers might need to be adapted and cannot be
easily shared between different architectures. The provided patches
unify all these functions and hide the differences under the already
existing dma attributes concept. The thread with more references is
available here:

http://www.spinics.net/lists/linux-sh/msg09777.html

These patches are also a prerequisite for unifying DMA-mapping
implementation on ARM architecture with the common one provided by
dma_map_ops structure and extending it with IOMMU support. More
information is available in the following thread:

http://thread.gmane.org/gmane.linux.kernel.cross-arch/12819

More works on dma-mapping framework are planned, especially in the
area of buffer sharing and managing the shared mappings (together with
the recently introduced dma_buf interface: commit d15bd7ee445d
"dma-buf: Introduce dma buffer sharing mechanism").

The patches in the current set introduce a new alloc/free methods
(with support for memory attributes) in dma_map_ops structure, which
will later replace dma_alloc_coherent and dma_alloc_writecombine
functions."

People finally started piping up with support for merging this, so I'm
merging it as the last of the pending stuff from the merge window.
Looks like pohmelfs is going to wait for 3.5 and more external support
for merging.

* 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
common: DMA-mapping: add NON-CONSISTENT attribute
common: DMA-mapping: add WRITE_COMBINE attribute
common: dma-mapping: introduce mmap method
common: dma-mapping: remove old alloc_coherent and free_coherent methods
Hexagon: adapt for dma_map_ops changes
Unicore32: adapt for dma_map_ops changes
Microblaze: adapt for dma_map_ops changes
SH: adapt for dma_map_ops changes
Alpha: adapt for dma_map_ops changes
SPARC: adapt for dma_map_ops changes
PowerPC: adapt for dma_map_ops changes
MIPS: adapt for dma_map_ops changes
X86 & IA64: adapt for dma_map_ops changes
common: dma-mapping: introduce generic alloc() and free() methods

+354 -198
+18
Documentation/DMA-attributes.txt
··· 31 31 Since it is optional for platforms to implement DMA_ATTR_WEAK_ORDERING, 32 32 those that do not will simply ignore the attribute and exhibit default 33 33 behavior. 34 + 35 + DMA_ATTR_WRITE_COMBINE 36 + ---------------------- 37 + 38 + DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be 39 + buffered to improve performance. 40 + 41 + Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE, 42 + those that do not will simply ignore the attribute and exhibit default 43 + behavior. 44 + 45 + DMA_ATTR_NON_CONSISTENT 46 + ----------------------- 47 + 48 + DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either 49 + consistent or non-consistent memory as it sees fit. By using this API, 50 + you are guaranteeing to the platform that you have all the correct and 51 + necessary sync points for this memory in the driver.
+12 -6
arch/alpha/include/asm/dma-mapping.h
··· 12 12 13 13 #include <asm-generic/dma-mapping-common.h> 14 14 15 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 16 - dma_addr_t *dma_handle, gfp_t gfp) 15 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 16 + 17 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 18 + dma_addr_t *dma_handle, gfp_t gfp, 19 + struct dma_attrs *attrs) 17 20 { 18 - return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp); 21 + return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs); 19 22 } 20 23 21 - static inline void dma_free_coherent(struct device *dev, size_t size, 22 - void *vaddr, dma_addr_t dma_handle) 24 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 25 + 26 + static inline void dma_free_attrs(struct device *dev, size_t size, 27 + void *vaddr, dma_addr_t dma_handle, 28 + struct dma_attrs *attrs) 23 29 { 24 - get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle); 30 + get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs); 25 31 } 26 32 27 33 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+6 -4
arch/alpha/kernel/pci-noop.c
··· 108 108 } 109 109 110 110 static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, 111 - dma_addr_t *dma_handle, gfp_t gfp) 111 + dma_addr_t *dma_handle, gfp_t gfp, 112 + struct dma_attrs *attrs) 112 113 { 113 114 void *ret; 114 115 ··· 124 123 } 125 124 126 125 static void alpha_noop_free_coherent(struct device *dev, size_t size, 127 - void *cpu_addr, dma_addr_t dma_addr) 126 + void *cpu_addr, dma_addr_t dma_addr, 127 + struct dma_attrs *attrs) 128 128 { 129 129 free_pages((unsigned long)cpu_addr, get_order(size)); 130 130 } ··· 176 174 } 177 175 178 176 struct dma_map_ops alpha_noop_ops = { 179 - .alloc_coherent = alpha_noop_alloc_coherent, 180 - .free_coherent = alpha_noop_free_coherent, 177 + .alloc = alpha_noop_alloc_coherent, 178 + .free = alpha_noop_free_coherent, 181 179 .map_page = alpha_noop_map_page, 182 180 .map_sg = alpha_noop_map_sg, 183 181 .mapping_error = alpha_noop_mapping_error,
+6 -4
arch/alpha/kernel/pci_iommu.c
··· 434 434 else DMA_ADDRP is undefined. */ 435 435 436 436 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, 437 - dma_addr_t *dma_addrp, gfp_t gfp) 437 + dma_addr_t *dma_addrp, gfp_t gfp, 438 + struct dma_attrs *attrs) 438 439 { 439 440 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 440 441 void *cpu_addr; ··· 479 478 DMA_ADDR past this call are illegal. */ 480 479 481 480 static void alpha_pci_free_coherent(struct device *dev, size_t size, 482 - void *cpu_addr, dma_addr_t dma_addr) 481 + void *cpu_addr, dma_addr_t dma_addr, 482 + struct dma_attrs *attrs) 483 483 { 484 484 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 485 485 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); ··· 954 952 } 955 953 956 954 struct dma_map_ops alpha_pci_ops = { 957 - .alloc_coherent = alpha_pci_alloc_coherent, 958 - .free_coherent = alpha_pci_free_coherent, 955 + .alloc = alpha_pci_alloc_coherent, 956 + .free = alpha_pci_free_coherent, 959 957 .map_page = alpha_pci_map_page, 960 958 .unmap_page = alpha_pci_unmap_page, 961 959 .map_sg = alpha_pci_map_sg,
+12 -6
arch/hexagon/include/asm/dma-mapping.h
··· 71 71 return (dma_addr == bad_dma_address); 72 72 } 73 73 74 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 75 - dma_addr_t *dma_handle, gfp_t flag) 74 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 75 + 76 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 77 + dma_addr_t *dma_handle, gfp_t flag, 78 + struct dma_attrs *attrs) 76 79 { 77 80 void *ret; 78 81 struct dma_map_ops *ops = get_dma_ops(dev); 79 82 80 83 BUG_ON(!dma_ops); 81 84 82 - ret = ops->alloc_coherent(dev, size, dma_handle, flag); 85 + ret = ops->alloc(dev, size, dma_handle, flag, attrs); 83 86 84 87 debug_dma_alloc_coherent(dev, size, *dma_handle, ret); 85 88 86 89 return ret; 87 90 } 88 91 89 - static inline void dma_free_coherent(struct device *dev, size_t size, 90 - void *cpu_addr, dma_addr_t dma_handle) 92 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 93 + 94 + static inline void dma_free_attrs(struct device *dev, size_t size, 95 + void *cpu_addr, dma_addr_t dma_handle, 96 + struct dma_attrs *attrs) 91 97 { 92 98 struct dma_map_ops *dma_ops = get_dma_ops(dev); 93 99 94 100 BUG_ON(!dma_ops); 95 101 96 - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 102 + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); 97 103 98 104 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 99 105 }
+5 -4
arch/hexagon/kernel/dma.c
··· 54 54 /* Allocates from a pool of uncached memory that was reserved at boot time */ 55 55 56 56 void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, 57 - dma_addr_t *dma_addr, gfp_t flag) 57 + dma_addr_t *dma_addr, gfp_t flag, 58 + struct dma_attrs *attrs) 58 59 { 59 60 void *ret; 60 61 ··· 82 81 } 83 82 84 83 static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, 85 - dma_addr_t dma_addr) 84 + dma_addr_t dma_addr, struct dma_attrs *attrs) 86 85 { 87 86 gen_pool_free(coherent_pool, (unsigned long) vaddr, size); 88 87 } ··· 203 202 } 204 203 205 204 struct dma_map_ops hexagon_dma_ops = { 206 - .alloc_coherent = hexagon_dma_alloc_coherent, 207 - .free_coherent = hexagon_free_coherent, 205 + .alloc = hexagon_dma_alloc_coherent, 206 + .free = hexagon_free_coherent, 208 207 .map_sg = hexagon_map_sg, 209 208 .map_page = hexagon_map_page, 210 209 .sync_single_for_cpu = hexagon_sync_single_for_cpu,
+6 -5
arch/ia64/hp/common/sba_iommu.c
··· 1129 1129 * See Documentation/DMA-API-HOWTO.txt 1130 1130 */ 1131 1131 static void * 1132 - sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 1132 + sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1133 + gfp_t flags, struct dma_attrs *attrs) 1133 1134 { 1134 1135 struct ioc *ioc; 1135 1136 void *addr; ··· 1192 1191 * 1193 1192 * See Documentation/DMA-API-HOWTO.txt 1194 1193 */ 1195 - static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, 1196 - dma_addr_t dma_handle) 1194 + static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, 1195 + dma_addr_t dma_handle, struct dma_attrs *attrs) 1197 1196 { 1198 1197 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1199 1198 free_pages((unsigned long) vaddr, get_order(size)); ··· 2213 2212 __setup("sbapagesize=",sba_page_override); 2214 2213 2215 2214 struct dma_map_ops sba_dma_ops = { 2216 - .alloc_coherent = sba_alloc_coherent, 2217 - .free_coherent = sba_free_coherent, 2215 + .alloc = sba_alloc_coherent, 2216 + .free = sba_free_coherent, 2218 2217 .map_page = sba_map_page, 2219 2218 .unmap_page = sba_unmap_page, 2220 2219 .map_sg = sba_map_sg_attrs,
+12 -6
arch/ia64/include/asm/dma-mapping.h
··· 23 23 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, 24 24 enum dma_data_direction); 25 25 26 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 27 - dma_addr_t *daddr, gfp_t gfp) 26 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 27 + 28 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 29 + dma_addr_t *daddr, gfp_t gfp, 30 + struct dma_attrs *attrs) 28 31 { 29 32 struct dma_map_ops *ops = platform_dma_get_ops(dev); 30 33 void *caddr; 31 34 32 - caddr = ops->alloc_coherent(dev, size, daddr, gfp); 35 + caddr = ops->alloc(dev, size, daddr, gfp, attrs); 33 36 debug_dma_alloc_coherent(dev, size, *daddr, caddr); 34 37 return caddr; 35 38 } 36 39 37 - static inline void dma_free_coherent(struct device *dev, size_t size, 38 - void *caddr, dma_addr_t daddr) 40 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 41 + 42 + static inline void dma_free_attrs(struct device *dev, size_t size, 43 + void *caddr, dma_addr_t daddr, 44 + struct dma_attrs *attrs) 39 45 { 40 46 struct dma_map_ops *ops = platform_dma_get_ops(dev); 41 47 debug_dma_free_coherent(dev, size, caddr, daddr); 42 - ops->free_coherent(dev, size, caddr, daddr); 48 + ops->free(dev, size, caddr, daddr, attrs); 43 49 } 44 50 45 51 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+11 -3
arch/ia64/kernel/pci-swiotlb.c
··· 15 15 EXPORT_SYMBOL(swiotlb); 16 16 17 17 static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, 18 - dma_addr_t *dma_handle, gfp_t gfp) 18 + dma_addr_t *dma_handle, gfp_t gfp, 19 + struct dma_attrs *attrs) 19 20 { 20 21 if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) 21 22 gfp |= GFP_DMA; 22 23 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 23 24 } 24 25 26 + static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, 27 + void *vaddr, dma_addr_t dma_addr, 28 + struct dma_attrs *attrs) 29 + { 30 + swiotlb_free_coherent(dev, size, vaddr, dma_addr); 31 + } 32 + 25 33 struct dma_map_ops swiotlb_dma_ops = { 26 - .alloc_coherent = ia64_swiotlb_alloc_coherent, 27 - .free_coherent = swiotlb_free_coherent, 34 + .alloc = ia64_swiotlb_alloc_coherent, 35 + .free = ia64_swiotlb_free_coherent, 28 36 .map_page = swiotlb_map_page, 29 37 .unmap_page = swiotlb_unmap_page, 30 38 .map_sg = swiotlb_map_sg_attrs,
+5 -4
arch/ia64/sn/pci/pci_dma.c
··· 76 76 * more information. 77 77 */ 78 78 static void *sn_dma_alloc_coherent(struct device *dev, size_t size, 79 - dma_addr_t * dma_handle, gfp_t flags) 79 + dma_addr_t * dma_handle, gfp_t flags, 80 + struct dma_attrs *attrs) 80 81 { 81 82 void *cpuaddr; 82 83 unsigned long phys_addr; ··· 138 137 * any associated IOMMU mappings. 139 138 */ 140 139 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 141 - dma_addr_t dma_handle) 140 + dma_addr_t dma_handle, struct dma_attrs *attrs) 142 141 { 143 142 struct pci_dev *pdev = to_pci_dev(dev); 144 143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); ··· 467 466 } 468 467 469 468 static struct dma_map_ops sn_dma_ops = { 470 - .alloc_coherent = sn_dma_alloc_coherent, 471 - .free_coherent = sn_dma_free_coherent, 469 + .alloc = sn_dma_alloc_coherent, 470 + .free = sn_dma_free_coherent, 472 471 .map_page = sn_dma_map_page, 473 472 .unmap_page = sn_dma_unmap_page, 474 473 .map_sg = sn_dma_map_sg,
+12 -6
arch/microblaze/include/asm/dma-mapping.h
··· 123 123 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 124 124 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 125 125 126 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 127 - dma_addr_t *dma_handle, gfp_t flag) 126 + #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) 127 + 128 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 129 + dma_addr_t *dma_handle, gfp_t flag, 130 + struct dma_attrs *attrs) 128 131 { 129 132 struct dma_map_ops *ops = get_dma_ops(dev); 130 133 void *memory; 131 134 132 135 BUG_ON(!ops); 133 136 134 - memory = ops->alloc_coherent(dev, size, dma_handle, flag); 137 + memory = ops->alloc(dev, size, dma_handle, flag, attrs); 135 138 136 139 debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 137 140 return memory; 138 141 } 139 142 140 - static inline void dma_free_coherent(struct device *dev, size_t size, 141 - void *cpu_addr, dma_addr_t dma_handle) 143 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL) 144 + 145 + static inline void dma_free_attrs(struct device *dev, size_t size, 146 + void *cpu_addr, dma_addr_t dma_handle, 147 + struct dma_attrs *attrs) 142 148 { 143 149 struct dma_map_ops *ops = get_dma_ops(dev); 144 150 145 151 BUG_ON(!ops); 146 152 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 147 - ops->free_coherent(dev, size, cpu_addr, dma_handle); 153 + ops->free(dev, size, cpu_addr, dma_handle, attrs); 148 154 } 149 155 150 156 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+6 -4
arch/microblaze/kernel/dma.c
··· 33 33 #define NOT_COHERENT_CACHE 34 34 35 35 static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 36 - dma_addr_t *dma_handle, gfp_t flag) 36 + dma_addr_t *dma_handle, gfp_t flag, 37 + struct dma_attrs *attrs) 37 38 { 38 39 #ifdef NOT_COHERENT_CACHE 39 40 return consistent_alloc(flag, size, dma_handle); ··· 58 57 } 59 58 60 59 static void dma_direct_free_coherent(struct device *dev, size_t size, 61 - void *vaddr, dma_addr_t dma_handle) 60 + void *vaddr, dma_addr_t dma_handle, 61 + struct dma_attrs *attrs) 62 62 { 63 63 #ifdef NOT_COHERENT_CACHE 64 64 consistent_free(size, vaddr); ··· 178 176 } 179 177 180 178 struct dma_map_ops dma_direct_ops = { 181 - .alloc_coherent = dma_direct_alloc_coherent, 182 - .free_coherent = dma_direct_free_coherent, 179 + .alloc = dma_direct_alloc_coherent, 180 + .free = dma_direct_free_coherent, 183 181 .map_sg = dma_direct_map_sg, 184 182 .unmap_sg = dma_direct_unmap_sg, 185 183 .dma_supported = dma_direct_dma_supported,
+6 -6
arch/mips/cavium-octeon/dma-octeon.c
··· 157 157 } 158 158 159 159 static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, 160 - dma_addr_t *dma_handle, gfp_t gfp) 160 + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 161 161 { 162 162 void *ret; 163 163 ··· 192 192 } 193 193 194 194 static void octeon_dma_free_coherent(struct device *dev, size_t size, 195 - void *vaddr, dma_addr_t dma_handle) 195 + void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) 196 196 { 197 197 int order = get_order(size); 198 198 ··· 240 240 241 241 static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { 242 242 .dma_map_ops = { 243 - .alloc_coherent = octeon_dma_alloc_coherent, 244 - .free_coherent = octeon_dma_free_coherent, 243 + .alloc = octeon_dma_alloc_coherent, 244 + .free = octeon_dma_free_coherent, 245 245 .map_page = octeon_dma_map_page, 246 246 .unmap_page = swiotlb_unmap_page, 247 247 .map_sg = octeon_dma_map_sg, ··· 325 325 #ifdef CONFIG_PCI 326 326 static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { 327 327 .dma_map_ops = { 328 - .alloc_coherent = octeon_dma_alloc_coherent, 329 - .free_coherent = octeon_dma_free_coherent, 328 + .alloc = octeon_dma_alloc_coherent, 329 + .free = octeon_dma_free_coherent, 330 330 .map_page = octeon_dma_map_page, 331 331 .unmap_page = swiotlb_unmap_page, 332 332 .map_sg = octeon_dma_map_sg,
+12 -6
arch/mips/include/asm/dma-mapping.h
··· 57 57 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 58 58 enum dma_data_direction direction); 59 59 60 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 61 - dma_addr_t *dma_handle, gfp_t gfp) 60 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 61 + 62 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 63 + dma_addr_t *dma_handle, gfp_t gfp, 64 + struct dma_attrs *attrs) 62 65 { 63 66 void *ret; 64 67 struct dma_map_ops *ops = get_dma_ops(dev); 65 68 66 - ret = ops->alloc_coherent(dev, size, dma_handle, gfp); 69 + ret = ops->alloc(dev, size, dma_handle, gfp, attrs); 67 70 68 71 debug_dma_alloc_coherent(dev, size, *dma_handle, ret); 69 72 70 73 return ret; 71 74 } 72 75 73 - static inline void dma_free_coherent(struct device *dev, size_t size, 74 - void *vaddr, dma_addr_t dma_handle) 76 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 77 + 78 + static inline void dma_free_attrs(struct device *dev, size_t size, 79 + void *vaddr, dma_addr_t dma_handle, 80 + struct dma_attrs *attrs) 75 81 { 76 82 struct dma_map_ops *ops = get_dma_ops(dev); 77 83 78 - ops->free_coherent(dev, size, vaddr, dma_handle); 84 + ops->free(dev, size, vaddr, dma_handle, attrs); 79 85 80 86 debug_dma_free_coherent(dev, size, vaddr, dma_handle); 81 87 }
+4 -4
arch/mips/mm/dma-default.c
··· 98 98 EXPORT_SYMBOL(dma_alloc_noncoherent); 99 99 100 100 static void *mips_dma_alloc_coherent(struct device *dev, size_t size, 101 - dma_addr_t * dma_handle, gfp_t gfp) 101 + dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) 102 102 { 103 103 void *ret; 104 104 ··· 132 132 EXPORT_SYMBOL(dma_free_noncoherent); 133 133 134 134 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, 135 - dma_addr_t dma_handle) 135 + dma_addr_t dma_handle, struct dma_attrs *attrs) 136 136 { 137 137 unsigned long addr = (unsigned long) vaddr; 138 138 int order = get_order(size); ··· 323 323 EXPORT_SYMBOL(dma_cache_sync); 324 324 325 325 static struct dma_map_ops mips_default_dma_map_ops = { 326 - .alloc_coherent = mips_dma_alloc_coherent, 327 - .free_coherent = mips_dma_free_coherent, 326 + .alloc = mips_dma_alloc_coherent, 327 + .free = mips_dma_free_coherent, 328 328 .map_page = mips_dma_map_page, 329 329 .unmap_page = mips_dma_unmap_page, 330 330 .map_sg = mips_dma_map_sg,
+16 -8
arch/powerpc/include/asm/dma-mapping.h
··· 22 22 23 23 /* Some dma direct funcs must be visible for use in other dma_ops */ 24 24 extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, 25 - dma_addr_t *dma_handle, gfp_t flag); 25 + dma_addr_t *dma_handle, gfp_t flag, 26 + struct dma_attrs *attrs); 26 27 extern void dma_direct_free_coherent(struct device *dev, size_t size, 27 - void *vaddr, dma_addr_t dma_handle); 28 + void *vaddr, dma_addr_t dma_handle, 29 + struct dma_attrs *attrs); 28 30 29 31 30 32 #ifdef CONFIG_NOT_COHERENT_CACHE ··· 132 130 133 131 extern int dma_set_mask(struct device *dev, u64 dma_mask); 134 132 135 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 136 - dma_addr_t *dma_handle, gfp_t flag) 133 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 134 + 135 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 136 + dma_addr_t *dma_handle, gfp_t flag, 137 + struct dma_attrs *attrs) 137 138 { 138 139 struct dma_map_ops *dma_ops = get_dma_ops(dev); 139 140 void *cpu_addr; 140 141 141 142 BUG_ON(!dma_ops); 142 143 143 - cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); 144 + cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); 144 145 145 146 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 146 147 147 148 return cpu_addr; 148 149 } 149 150 150 - static inline void dma_free_coherent(struct device *dev, size_t size, 151 - void *cpu_addr, dma_addr_t dma_handle) 151 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 152 + 153 + static inline void dma_free_attrs(struct device *dev, size_t size, 154 + void *cpu_addr, dma_addr_t dma_handle, 155 + struct dma_attrs *attrs) 152 156 { 153 157 struct dma_map_ops *dma_ops = get_dma_ops(dev); 154 158 ··· 162 154 163 155 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 164 156 165 - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 157 + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); 166 158 } 167 159 168 160 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+6 -4
arch/powerpc/kernel/dma-iommu.c
··· 17 17 * to the dma address (mapping) of the first page. 18 18 */ 19 19 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t flag) 20 + dma_addr_t *dma_handle, gfp_t flag, 21 + struct dma_attrs *attrs) 21 22 { 22 23 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 23 24 dma_handle, dev->coherent_dma_mask, flag, ··· 26 25 } 27 26 28 27 static void dma_iommu_free_coherent(struct device *dev, size_t size, 29 - void *vaddr, dma_addr_t dma_handle) 28 + void *vaddr, dma_addr_t dma_handle, 29 + struct dma_attrs *attrs) 30 30 { 31 31 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); 32 32 } ··· 107 105 } 108 106 109 107 struct dma_map_ops dma_iommu_ops = { 110 - .alloc_coherent = dma_iommu_alloc_coherent, 111 - .free_coherent = dma_iommu_free_coherent, 108 + .alloc = dma_iommu_alloc_coherent, 109 + .free = dma_iommu_free_coherent, 112 110 .map_sg = dma_iommu_map_sg, 113 111 .unmap_sg = dma_iommu_unmap_sg, 114 112 .dma_supported = dma_iommu_dma_supported,
+2 -2
arch/powerpc/kernel/dma-swiotlb.c
··· 47 47 * for everything else. 48 48 */ 49 49 struct dma_map_ops swiotlb_dma_ops = { 50 - .alloc_coherent = dma_direct_alloc_coherent, 51 - .free_coherent = dma_direct_free_coherent, 50 + .alloc = dma_direct_alloc_coherent, 51 + .free = dma_direct_free_coherent, 52 52 .map_sg = swiotlb_map_sg_attrs, 53 53 .unmap_sg = swiotlb_unmap_sg_attrs, 54 54 .dma_supported = swiotlb_dma_supported,
+6 -4
arch/powerpc/kernel/dma.c
··· 26 26 27 27 28 28 void *dma_direct_alloc_coherent(struct device *dev, size_t size, 29 - dma_addr_t *dma_handle, gfp_t flag) 29 + dma_addr_t *dma_handle, gfp_t flag, 30 + struct dma_attrs *attrs) 30 31 { 31 32 void *ret; 32 33 #ifdef CONFIG_NOT_COHERENT_CACHE ··· 55 54 } 56 55 57 56 void dma_direct_free_coherent(struct device *dev, size_t size, 58 - void *vaddr, dma_addr_t dma_handle) 57 + void *vaddr, dma_addr_t dma_handle, 58 + struct dma_attrs *attrs) 59 59 { 60 60 #ifdef CONFIG_NOT_COHERENT_CACHE 61 61 __dma_free_coherent(size, vaddr); ··· 152 150 #endif 153 151 154 152 struct dma_map_ops dma_direct_ops = { 155 - .alloc_coherent = dma_direct_alloc_coherent, 156 - .free_coherent = dma_direct_free_coherent, 153 + .alloc = dma_direct_alloc_coherent, 154 + .free = dma_direct_free_coherent, 157 155 .map_sg = dma_direct_map_sg, 158 156 .unmap_sg = dma_direct_unmap_sg, 159 157 .dma_supported = dma_direct_dma_supported,
+6 -4
arch/powerpc/kernel/ibmebus.c
··· 65 65 static void *ibmebus_alloc_coherent(struct device *dev, 66 66 size_t size, 67 67 dma_addr_t *dma_handle, 68 - gfp_t flag) 68 + gfp_t flag, 69 + struct dma_attrs *attrs) 69 70 { 70 71 void *mem; 71 72 ··· 78 77 79 78 static void ibmebus_free_coherent(struct device *dev, 80 79 size_t size, void *vaddr, 81 - dma_addr_t dma_handle) 80 + dma_addr_t dma_handle, 81 + struct dma_attrs *attrs) 82 82 { 83 83 kfree(vaddr); 84 84 } ··· 138 136 } 139 137 140 138 static struct dma_map_ops ibmebus_dma_ops = { 141 - .alloc_coherent = ibmebus_alloc_coherent, 142 - .free_coherent = ibmebus_free_coherent, 139 + .alloc = ibmebus_alloc_coherent, 140 + .free = ibmebus_free_coherent, 143 141 .map_sg = ibmebus_map_sg, 144 142 .unmap_sg = ibmebus_unmap_sg, 145 143 .dma_supported = ibmebus_dma_supported,
+8 -6
arch/powerpc/kernel/vio.c
··· 482 482 } 483 483 484 484 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, 485 - dma_addr_t *dma_handle, gfp_t flag) 485 + dma_addr_t *dma_handle, gfp_t flag, 486 + struct dma_attrs *attrs) 486 487 { 487 488 struct vio_dev *viodev = to_vio_dev(dev); 488 489 void *ret; ··· 493 492 return NULL; 494 493 } 495 494 496 - ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); 495 + ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); 497 496 if (unlikely(ret == NULL)) { 498 497 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 499 498 atomic_inc(&viodev->cmo.allocs_failed); ··· 503 502 } 504 503 505 504 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, 506 - void *vaddr, dma_addr_t dma_handle) 505 + void *vaddr, dma_addr_t dma_handle, 506 + struct dma_attrs *attrs) 507 507 { 508 508 struct vio_dev *viodev = to_vio_dev(dev); 509 509 510 - dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); 510 + dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); 511 511 512 512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 513 513 } ··· 609 607 } 610 608 611 609 struct dma_map_ops vio_dma_mapping_ops = { 612 - .alloc_coherent = vio_dma_iommu_alloc_coherent, 613 - .free_coherent = vio_dma_iommu_free_coherent, 610 + .alloc = vio_dma_iommu_alloc_coherent, 611 + .free = vio_dma_iommu_free_coherent, 614 612 .map_sg = vio_dma_iommu_map_sg, 615 613 .unmap_sg = vio_dma_iommu_unmap_sg, 616 614 .map_page = vio_dma_iommu_map_page,
+9 -7
arch/powerpc/platforms/cell/iommu.c
··· 564 564 /* A coherent allocation implies strong ordering */ 565 565 566 566 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, 567 - dma_addr_t *dma_handle, gfp_t flag) 567 + dma_addr_t *dma_handle, gfp_t flag, 568 + struct dma_attrs *attrs) 568 569 { 569 570 if (iommu_fixed_is_weak) 570 571 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), ··· 573 572 device_to_mask(dev), flag, 574 573 dev_to_node(dev)); 575 574 else 576 - return dma_direct_ops.alloc_coherent(dev, size, dma_handle, 577 - flag); 575 + return dma_direct_ops.alloc(dev, size, dma_handle, flag, 576 + attrs); 578 577 } 579 578 580 579 static void dma_fixed_free_coherent(struct device *dev, size_t size, 581 - void *vaddr, dma_addr_t dma_handle) 580 + void *vaddr, dma_addr_t dma_handle, 581 + struct dma_attrs *attrs) 582 582 { 583 583 if (iommu_fixed_is_weak) 584 584 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, 585 585 dma_handle); 586 586 else 587 - dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); 587 + dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs); 588 588 } 589 589 590 590 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, ··· 644 642 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); 645 643 646 644 struct dma_map_ops dma_iommu_fixed_ops = { 647 - .alloc_coherent = dma_fixed_alloc_coherent, 648 - .free_coherent = dma_fixed_free_coherent, 645 + .alloc = dma_fixed_alloc_coherent, 646 + .free = dma_fixed_free_coherent, 649 647 .map_sg = dma_fixed_map_sg, 650 648 .unmap_sg = dma_fixed_unmap_sg, 651 649 .dma_supported = dma_fixed_dma_supported,
+7 -6
arch/powerpc/platforms/ps3/system-bus.c
··· 515 515 * to the dma address (mapping) of the first page. 516 516 */ 517 517 static void * ps3_alloc_coherent(struct device *_dev, size_t size, 518 - dma_addr_t *dma_handle, gfp_t flag) 518 + dma_addr_t *dma_handle, gfp_t flag, 519 + struct dma_attrs *attrs) 519 520 { 520 521 int result; 521 522 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); ··· 553 552 } 554 553 555 554 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, 556 - dma_addr_t dma_handle) 555 + dma_addr_t dma_handle, struct dma_attrs *attrs) 557 556 { 558 557 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 559 558 ··· 702 701 } 703 702 704 703 static struct dma_map_ops ps3_sb_dma_ops = { 705 - .alloc_coherent = ps3_alloc_coherent, 706 - .free_coherent = ps3_free_coherent, 704 + .alloc = ps3_alloc_coherent, 705 + .free = ps3_free_coherent, 707 706 .map_sg = ps3_sb_map_sg, 708 707 .unmap_sg = ps3_sb_unmap_sg, 709 708 .dma_supported = ps3_dma_supported, ··· 713 712 }; 714 713 715 714 static struct dma_map_ops ps3_ioc0_dma_ops = { 716 - .alloc_coherent = ps3_alloc_coherent, 717 - .free_coherent = ps3_free_coherent, 715 + .alloc = ps3_alloc_coherent, 716 + .free = ps3_free_coherent, 718 717 .map_sg = ps3_ioc0_map_sg, 719 718 .unmap_sg = ps3_ioc0_unmap_sg, 720 719 .dma_supported = ps3_dma_supported,
+18 -10
arch/sh/include/asm/dma-mapping.h
··· 52 52 return dma_addr == 0; 53 53 } 54 54 55 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 56 - dma_addr_t *dma_handle, gfp_t gfp) 55 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 56 + 57 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 58 + dma_addr_t *dma_handle, gfp_t gfp, 59 + struct dma_attrs *attrs) 57 60 { 58 61 struct dma_map_ops *ops = get_dma_ops(dev); 59 62 void *memory; 60 63 61 64 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) 62 65 return memory; 63 - if (!ops->alloc_coherent) 66 + if (!ops->alloc) 64 67 return NULL; 65 68 66 - memory = ops->alloc_coherent(dev, size, dma_handle, gfp); 69 + memory = ops->alloc(dev, size, dma_handle, gfp, attrs); 67 70 debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 68 71 69 72 return memory; 70 73 } 71 74 72 - static inline void dma_free_coherent(struct device *dev, size_t size, 73 - void *vaddr, dma_addr_t dma_handle) 75 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 76 + 77 + static inline void dma_free_attrs(struct device *dev, size_t size, 78 + void *vaddr, dma_addr_t dma_handle, 79 + struct dma_attrs *attrs) 74 80 { 75 81 struct dma_map_ops *ops = get_dma_ops(dev); 76 82 ··· 84 78 return; 85 79 86 80 debug_dma_free_coherent(dev, size, vaddr, dma_handle); 87 - if (ops->free_coherent) 88 - ops->free_coherent(dev, size, vaddr, dma_handle); 81 + if (ops->free) 82 + ops->free(dev, size, vaddr, dma_handle, attrs); 89 83 } 90 84 91 85 /* arch/sh/mm/consistent.c */ 92 86 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 93 - dma_addr_t *dma_addr, gfp_t flag); 87 + dma_addr_t *dma_addr, gfp_t flag, 88 + struct dma_attrs *attrs); 94 89 extern void dma_generic_free_coherent(struct device *dev, size_t size, 95 - void *vaddr, dma_addr_t dma_handle); 90 + void *vaddr, dma_addr_t dma_handle, 91 + struct dma_attrs *attrs); 96 92 97 93 #endif /* __ASM_SH_DMA_MAPPING_H */
+2 -2
arch/sh/kernel/dma-nommu.c
··· 63 63 #endif 64 64 65 65 struct dma_map_ops nommu_dma_ops = { 66 - .alloc_coherent = dma_generic_alloc_coherent, 67 - .free_coherent = dma_generic_free_coherent, 66 + .alloc = dma_generic_alloc_coherent, 67 + .free = dma_generic_free_coherent, 68 68 .map_page = nommu_map_page, 69 69 .map_sg = nommu_map_sg, 70 70 #ifdef CONFIG_DMA_NONCOHERENT
+4 -2
arch/sh/mm/consistent.c
··· 33 33 fs_initcall(dma_init); 34 34 35 35 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 36 - dma_addr_t *dma_handle, gfp_t gfp) 36 + dma_addr_t *dma_handle, gfp_t gfp, 37 + struct dma_attrs *attrs) 37 38 { 38 39 void *ret, *ret_nocache; 39 40 int order = get_order(size); ··· 65 64 } 66 65 67 66 void dma_generic_free_coherent(struct device *dev, size_t size, 68 - void *vaddr, dma_addr_t dma_handle) 67 + void *vaddr, dma_addr_t dma_handle, 68 + struct dma_attrs *attrs) 69 69 { 70 70 int order = get_order(size); 71 71 unsigned long pfn = dma_handle >> PAGE_SHIFT;
+12 -6
arch/sparc/include/asm/dma-mapping.h
··· 26 26 27 27 #include <asm-generic/dma-mapping-common.h> 28 28 29 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 30 - dma_addr_t *dma_handle, gfp_t flag) 29 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 30 + 31 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 32 + dma_addr_t *dma_handle, gfp_t flag, 33 + struct dma_attrs *attrs) 31 34 { 32 35 struct dma_map_ops *ops = get_dma_ops(dev); 33 36 void *cpu_addr; 34 37 35 - cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); 38 + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 36 39 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 37 40 return cpu_addr; 38 41 } 39 42 40 - static inline void dma_free_coherent(struct device *dev, size_t size, 41 - void *cpu_addr, dma_addr_t dma_handle) 43 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 44 + 45 + static inline void dma_free_attrs(struct device *dev, size_t size, 46 + void *cpu_addr, dma_addr_t dma_handle, 47 + struct dma_attrs *attrs) 42 48 { 43 49 struct dma_map_ops *ops = get_dma_ops(dev); 44 50 45 51 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 46 - ops->free_coherent(dev, size, cpu_addr, dma_handle); 52 + ops->free(dev, size, cpu_addr, dma_handle, attrs); 47 53 } 48 54 49 55 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+6 -4
arch/sparc/kernel/iommu.c
··· 280 280 } 281 281 282 282 static void *dma_4u_alloc_coherent(struct device *dev, size_t size, 283 - dma_addr_t *dma_addrp, gfp_t gfp) 283 + dma_addr_t *dma_addrp, gfp_t gfp, 284 + struct dma_attrs *attrs) 284 285 { 285 286 unsigned long flags, order, first_page; 286 287 struct iommu *iommu; ··· 331 330 } 332 331 333 332 static void dma_4u_free_coherent(struct device *dev, size_t size, 334 - void *cpu, dma_addr_t dvma) 333 + void *cpu, dma_addr_t dvma, 334 + struct dma_attrs *attrs) 335 335 { 336 336 struct iommu *iommu; 337 337 unsigned long flags, order, npages; ··· 827 825 } 828 826 829 827 static struct dma_map_ops sun4u_dma_ops = { 830 - .alloc_coherent = dma_4u_alloc_coherent, 831 - .free_coherent = dma_4u_free_coherent, 828 + .alloc = dma_4u_alloc_coherent, 829 + .free = dma_4u_free_coherent, 832 830 .map_page = dma_4u_map_page, 833 831 .unmap_page = dma_4u_unmap_page, 834 832 .map_sg = dma_4u_map_sg,
+10 -8
arch/sparc/kernel/ioport.c
··· 261 261 * CPU may access them without any explicit flushing. 262 262 */ 263 263 static void *sbus_alloc_coherent(struct device *dev, size_t len, 264 - dma_addr_t *dma_addrp, gfp_t gfp) 264 + dma_addr_t *dma_addrp, gfp_t gfp, 265 + struct dma_attrs *attrs) 265 266 { 266 267 struct platform_device *op = to_platform_device(dev); 267 268 unsigned long len_total = PAGE_ALIGN(len); ··· 316 315 } 317 316 318 317 static void sbus_free_coherent(struct device *dev, size_t n, void *p, 319 - dma_addr_t ba) 318 + dma_addr_t ba, struct dma_attrs *attrs) 320 319 { 321 320 struct resource *res; 322 321 struct page *pgv; ··· 408 407 } 409 408 410 409 struct dma_map_ops sbus_dma_ops = { 411 - .alloc_coherent = sbus_alloc_coherent, 412 - .free_coherent = sbus_free_coherent, 410 + .alloc = sbus_alloc_coherent, 411 + .free = sbus_free_coherent, 413 412 .map_page = sbus_map_page, 414 413 .unmap_page = sbus_unmap_page, 415 414 .map_sg = sbus_map_sg, ··· 437 436 * hwdev should be valid struct pci_dev pointer for PCI devices. 438 437 */ 439 438 static void *pci32_alloc_coherent(struct device *dev, size_t len, 440 - dma_addr_t *pba, gfp_t gfp) 439 + dma_addr_t *pba, gfp_t gfp, 440 + struct dma_attrs *attrs) 441 441 { 442 442 unsigned long len_total = PAGE_ALIGN(len); 443 443 void *va; ··· 491 489 * past this call are illegal. 492 490 */ 493 491 static void pci32_free_coherent(struct device *dev, size_t n, void *p, 494 - dma_addr_t ba) 492 + dma_addr_t ba, struct dma_attrs *attrs) 495 493 { 496 494 struct resource *res; 497 495 ··· 647 645 } 648 646 649 647 struct dma_map_ops pci32_dma_ops = { 650 - .alloc_coherent = pci32_alloc_coherent, 651 - .free_coherent = pci32_free_coherent, 648 + .alloc = pci32_alloc_coherent, 649 + .free = pci32_free_coherent, 652 650 .map_page = pci32_map_page, 653 651 .unmap_page = pci32_unmap_page, 654 652 .map_sg = pci32_map_sg,
+5 -4
arch/sparc/kernel/pci_sun4v.c
··· 128 128 } 129 129 130 130 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 131 - dma_addr_t *dma_addrp, gfp_t gfp) 131 + dma_addr_t *dma_addrp, gfp_t gfp, 132 + struct dma_attrs *attrs) 132 133 { 133 134 unsigned long flags, order, first_page, npages, n; 134 135 struct iommu *iommu; ··· 199 198 } 200 199 201 200 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, 202 - dma_addr_t dvma) 201 + dma_addr_t dvma, struct dma_attrs *attrs) 203 202 { 204 203 struct pci_pbm_info *pbm; 205 204 struct iommu *iommu; ··· 528 527 } 529 528 530 529 static struct dma_map_ops sun4v_dma_ops = { 531 - .alloc_coherent = dma_4v_alloc_coherent, 532 - .free_coherent = dma_4v_free_coherent, 530 + .alloc = dma_4v_alloc_coherent, 531 + .free = dma_4v_free_coherent, 533 532 .map_page = dma_4v_map_page, 534 533 .unmap_page = dma_4v_unmap_page, 535 534 .map_sg = dma_4v_map_sg,
+12 -6
arch/unicore32/include/asm/dma-mapping.h
··· 82 82 return 0; 83 83 } 84 84 85 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 86 - dma_addr_t *dma_handle, gfp_t flag) 85 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 86 + 87 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 88 + dma_addr_t *dma_handle, gfp_t flag, 89 + struct dma_attrs *attrs) 87 90 { 88 91 struct dma_map_ops *dma_ops = get_dma_ops(dev); 89 92 90 - return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 93 + return dma_ops->alloc(dev, size, dma_handle, flag, attrs); 91 94 } 92 95 93 - static inline void dma_free_coherent(struct device *dev, size_t size, 94 - void *cpu_addr, dma_addr_t dma_handle) 96 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 97 + 98 + static inline void dma_free_attrs(struct device *dev, size_t size, 99 + void *cpu_addr, dma_addr_t dma_handle, 100 + struct dma_attrs *attrs) 95 101 { 96 102 struct dma_map_ops *dma_ops = get_dma_ops(dev); 97 103 98 - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 104 + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); 99 105 } 100 106 101 107 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+16 -2
arch/unicore32/mm/dma-swiotlb.c
··· 17 17 18 18 #include <asm/dma.h> 19 19 20 + static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, 21 + dma_addr_t *dma_handle, gfp_t flags, 22 + struct dma_attrs *attrs) 23 + { 24 + return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 25 + } 26 + 27 + static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, 28 + void *vaddr, dma_addr_t dma_addr, 29 + struct dma_attrs *attrs) 30 + { 31 + swiotlb_free_coherent(dev, size, vaddr, dma_addr); 32 + } 33 + 20 34 struct dma_map_ops swiotlb_dma_map_ops = { 21 - .alloc_coherent = swiotlb_alloc_coherent, 22 - .free_coherent = swiotlb_free_coherent, 35 + .alloc = unicore_swiotlb_alloc_coherent, 36 + .free = unicore_swiotlb_free_coherent, 23 37 .map_sg = swiotlb_map_sg_attrs, 24 38 .unmap_sg = swiotlb_unmap_sg_attrs, 25 39 .dma_supported = swiotlb_dma_supported,
+16 -10
arch/x86/include/asm/dma-mapping.h
··· 59 59 extern int dma_set_mask(struct device *dev, u64 mask); 60 60 61 61 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 62 - dma_addr_t *dma_addr, gfp_t flag); 62 + dma_addr_t *dma_addr, gfp_t flag, 63 + struct dma_attrs *attrs); 63 64 64 65 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 65 66 { ··· 112 111 return gfp; 113 112 } 114 113 114 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 115 + 115 116 static inline void * 116 - dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 117 - gfp_t gfp) 117 + dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 118 + gfp_t gfp, struct dma_attrs *attrs) 118 119 { 119 120 struct dma_map_ops *ops = get_dma_ops(dev); 120 121 void *memory; ··· 132 129 if (!is_device_dma_capable(dev)) 133 130 return NULL; 134 131 135 - if (!ops->alloc_coherent) 132 + if (!ops->alloc) 136 133 return NULL; 137 134 138 - memory = ops->alloc_coherent(dev, size, dma_handle, 139 - dma_alloc_coherent_gfp_flags(dev, gfp)); 135 + memory = ops->alloc(dev, size, dma_handle, 136 + dma_alloc_coherent_gfp_flags(dev, gfp), attrs); 140 137 debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 141 138 142 139 return memory; 143 140 } 144 141 145 - static inline void dma_free_coherent(struct device *dev, size_t size, 146 - void *vaddr, dma_addr_t bus) 142 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 143 + 144 + static inline void dma_free_attrs(struct device *dev, size_t size, 145 + void *vaddr, dma_addr_t bus, 146 + struct dma_attrs *attrs) 147 147 { 148 148 struct dma_map_ops *ops = get_dma_ops(dev); 149 149 ··· 156 150 return; 157 151 158 152 debug_dma_free_coherent(dev, size, vaddr, bus); 159 - if (ops->free_coherent) 160 - ops->free_coherent(dev, size, vaddr, bus); 153 + if (ops->free) 154 + ops->free(dev, size, vaddr, bus, attrs); 161 155 } 162 156 163 157 #endif
+6 -5
arch/x86/kernel/amd_gart_64.c
··· 477 477 /* allocate and map a coherent mapping */ 478 478 static void * 479 479 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 480 - gfp_t flag) 480 + gfp_t flag, struct dma_attrs *attrs) 481 481 { 482 482 dma_addr_t paddr; 483 483 unsigned long align_mask; ··· 500 500 } 501 501 __free_pages(page, get_order(size)); 502 502 } else 503 - return dma_generic_alloc_coherent(dev, size, dma_addr, flag); 503 + return dma_generic_alloc_coherent(dev, size, dma_addr, flag, 504 + attrs); 504 505 505 506 return NULL; 506 507 } ··· 509 508 /* free a coherent mapping */ 510 509 static void 511 510 gart_free_coherent(struct device *dev, size_t size, void *vaddr, 512 - dma_addr_t dma_addr) 511 + dma_addr_t dma_addr, struct dma_attrs *attrs) 513 512 { 514 513 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); 515 514 free_pages((unsigned long)vaddr, get_order(size)); ··· 701 700 .unmap_sg = gart_unmap_sg, 702 701 .map_page = gart_map_page, 703 702 .unmap_page = gart_unmap_page, 704 - .alloc_coherent = gart_alloc_coherent, 705 - .free_coherent = gart_free_coherent, 703 + .alloc = gart_alloc_coherent, 704 + .free = gart_free_coherent, 706 705 .mapping_error = gart_mapping_error, 707 706 }; 708 707
+5 -4
arch/x86/kernel/pci-calgary_64.c
··· 430 430 } 431 431 432 432 static void* calgary_alloc_coherent(struct device *dev, size_t size, 433 - dma_addr_t *dma_handle, gfp_t flag) 433 + dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) 434 434 { 435 435 void *ret = NULL; 436 436 dma_addr_t mapping; ··· 463 463 } 464 464 465 465 static void calgary_free_coherent(struct device *dev, size_t size, 466 - void *vaddr, dma_addr_t dma_handle) 466 + void *vaddr, dma_addr_t dma_handle, 467 + struct dma_attrs *attrs) 467 468 { 468 469 unsigned int npages; 469 470 struct iommu_table *tbl = find_iommu_table(dev); ··· 477 476 } 478 477 479 478 static struct dma_map_ops calgary_dma_ops = { 480 - .alloc_coherent = calgary_alloc_coherent, 481 - .free_coherent = calgary_free_coherent, 479 + .alloc = calgary_alloc_coherent, 480 + .free = calgary_free_coherent, 482 481 .map_sg = calgary_map_sg, 483 482 .unmap_sg = calgary_unmap_sg, 484 483 .map_page = calgary_map_page,
+2 -1
arch/x86/kernel/pci-dma.c
··· 96 96 } 97 97 } 98 98 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 99 - dma_addr_t *dma_addr, gfp_t flag) 99 + dma_addr_t *dma_addr, gfp_t flag, 100 + struct dma_attrs *attrs) 100 101 { 101 102 unsigned long dma_mask; 102 103 struct page *page;
+3 -3
arch/x86/kernel/pci-nommu.c
··· 75 75 } 76 76 77 77 static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, 78 - dma_addr_t dma_addr) 78 + dma_addr_t dma_addr, struct dma_attrs *attrs) 79 79 { 80 80 free_pages((unsigned long)vaddr, get_order(size)); 81 81 } ··· 96 96 } 97 97 98 98 struct dma_map_ops nommu_dma_ops = { 99 - .alloc_coherent = dma_generic_alloc_coherent, 100 - .free_coherent = nommu_free_coherent, 99 + .alloc = dma_generic_alloc_coherent, 100 + .free = nommu_free_coherent, 101 101 .map_sg = nommu_map_sg, 102 102 .map_page = nommu_map_page, 103 103 .sync_single_for_device = nommu_sync_single_for_device,
+13 -4
arch/x86/kernel/pci-swiotlb.c
··· 15 15 int swiotlb __read_mostly; 16 16 17 17 static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 18 - dma_addr_t *dma_handle, gfp_t flags) 18 + dma_addr_t *dma_handle, gfp_t flags, 19 + struct dma_attrs *attrs) 19 20 { 20 21 void *vaddr; 21 22 22 - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); 23 + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, 24 + attrs); 23 25 if (vaddr) 24 26 return vaddr; 25 27 26 28 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 27 29 } 28 30 31 + static void x86_swiotlb_free_coherent(struct device *dev, size_t size, 32 + void *vaddr, dma_addr_t dma_addr, 33 + struct dma_attrs *attrs) 34 + { 35 + swiotlb_free_coherent(dev, size, vaddr, dma_addr); 36 + } 37 + 29 38 static struct dma_map_ops swiotlb_dma_ops = { 30 39 .mapping_error = swiotlb_dma_mapping_error, 31 - .alloc_coherent = x86_swiotlb_alloc_coherent, 32 - .free_coherent = swiotlb_free_coherent, 40 + .alloc = x86_swiotlb_alloc_coherent, 41 + .free = x86_swiotlb_free_coherent, 33 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 34 43 .sync_single_for_device = swiotlb_sync_single_for_device, 35 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+2 -2
arch/x86/xen/pci-swiotlb-xen.c
··· 12 12 13 13 static struct dma_map_ops xen_swiotlb_dma_ops = { 14 14 .mapping_error = xen_swiotlb_dma_mapping_error, 15 - .alloc_coherent = xen_swiotlb_alloc_coherent, 16 - .free_coherent = xen_swiotlb_free_coherent, 15 + .alloc = xen_swiotlb_alloc_coherent, 16 + .free = xen_swiotlb_free_coherent, 17 17 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, 18 18 .sync_single_for_device = xen_swiotlb_sync_single_for_device, 19 19 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+6 -4
drivers/iommu/amd_iommu.c
··· 2707 2707 * The exported alloc_coherent function for dma_ops. 2708 2708 */ 2709 2709 static void *alloc_coherent(struct device *dev, size_t size, 2710 - dma_addr_t *dma_addr, gfp_t flag) 2710 + dma_addr_t *dma_addr, gfp_t flag, 2711 + struct dma_attrs *attrs) 2711 2712 { 2712 2713 unsigned long flags; 2713 2714 void *virt_addr; ··· 2766 2765 * The exported free_coherent function for dma_ops. 2767 2766 */ 2768 2767 static void free_coherent(struct device *dev, size_t size, 2769 - void *virt_addr, dma_addr_t dma_addr) 2768 + void *virt_addr, dma_addr_t dma_addr, 2769 + struct dma_attrs *attrs) 2770 2770 { 2771 2771 unsigned long flags; 2772 2772 struct protection_domain *domain; ··· 2848 2846 } 2849 2847 2850 2848 static struct dma_map_ops amd_iommu_dma_ops = { 2851 - .alloc_coherent = alloc_coherent, 2852 - .free_coherent = free_coherent, 2849 + .alloc = alloc_coherent, 2850 + .free = free_coherent, 2853 2851 .map_page = map_page, 2854 2852 .unmap_page = unmap_page, 2855 2853 .map_sg = map_sg,
+5 -4
drivers/iommu/intel-iommu.c
··· 2949 2949 } 2950 2950 2951 2951 static void *intel_alloc_coherent(struct device *hwdev, size_t size, 2952 - dma_addr_t *dma_handle, gfp_t flags) 2952 + dma_addr_t *dma_handle, gfp_t flags, 2953 + struct dma_attrs *attrs) 2953 2954 { 2954 2955 void *vaddr; 2955 2956 int order; ··· 2982 2981 } 2983 2982 2984 2983 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 2985 - dma_addr_t dma_handle) 2984 + dma_addr_t dma_handle, struct dma_attrs *attrs) 2986 2985 { 2987 2986 int order; 2988 2987 ··· 3127 3126 } 3128 3127 3129 3128 struct dma_map_ops intel_dma_ops = { 3130 - .alloc_coherent = intel_alloc_coherent, 3131 - .free_coherent = intel_free_coherent, 3129 + .alloc = intel_alloc_coherent, 3130 + .free = intel_free_coherent, 3132 3131 .map_sg = intel_map_sg, 3133 3132 .unmap_sg = intel_unmap_sg, 3134 3133 .map_page = intel_map_page,
+3 -2
drivers/xen/swiotlb-xen.c
··· 204 204 205 205 void * 206 206 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 207 - dma_addr_t *dma_handle, gfp_t flags) 207 + dma_addr_t *dma_handle, gfp_t flags, 208 + struct dma_attrs *attrs) 208 209 { 209 210 void *ret; 210 211 int order = get_order(size); ··· 254 253 255 254 void 256 255 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 257 - dma_addr_t dev_addr) 256 + dma_addr_t dev_addr, struct dma_attrs *attrs) 258 257 { 259 258 int order = get_order(size); 260 259 phys_addr_t phys;
+2
include/linux/dma-attrs.h
··· 13 13 enum dma_attr { 14 14 DMA_ATTR_WRITE_BARRIER, 15 15 DMA_ATTR_WEAK_ORDERING, 16 + DMA_ATTR_WRITE_COMBINE, 17 + DMA_ATTR_NON_CONSISTENT, 16 18 DMA_ATTR_MAX, 17 19 }; 18 20
+9 -4
include/linux/dma-mapping.h
··· 9 9 #include <linux/scatterlist.h> 10 10 11 11 struct dma_map_ops { 12 - void* (*alloc_coherent)(struct device *dev, size_t size, 13 - dma_addr_t *dma_handle, gfp_t gfp); 14 - void (*free_coherent)(struct device *dev, size_t size, 15 - void *vaddr, dma_addr_t dma_handle); 12 + void* (*alloc)(struct device *dev, size_t size, 13 + dma_addr_t *dma_handle, gfp_t gfp, 14 + struct dma_attrs *attrs); 15 + void (*free)(struct device *dev, size_t size, 16 + void *vaddr, dma_addr_t dma_handle, 17 + struct dma_attrs *attrs); 18 + int (*mmap)(struct device *, struct vm_area_struct *, 19 + void *, dma_addr_t, size_t, struct dma_attrs *attrs); 20 + 16 21 dma_addr_t (*map_page)(struct device *dev, struct page *page, 17 22 unsigned long offset, size_t size, 18 23 enum dma_data_direction dir,
+4 -2
include/xen/swiotlb-xen.h
··· 7 7 8 8 extern void 9 9 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 10 - dma_addr_t *dma_handle, gfp_t flags); 10 + dma_addr_t *dma_handle, gfp_t flags, 11 + struct dma_attrs *attrs); 11 12 12 13 extern void 13 14 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, 14 - void *vaddr, dma_addr_t dma_handle); 15 + void *vaddr, dma_addr_t dma_handle, 16 + struct dma_attrs *attrs); 15 17 16 18 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, 17 19 unsigned long offset, size_t size,