Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: dma-mapping: use alloc, mmap, free from dma_ops

This patch converts dma_alloc/free/mmap_{coherent,writecombine}
functions to use generic alloc/free/mmap methods from dma_map_ops
structure. A new DMA_ATTR_WRITE_COMBINE DMA attribute have been
introduced to implement writecombine methods.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Tested-By: Subash Patel <subash.ramaswamy@linaro.org>

+104 -66
+3
arch/arm/common/dmabounce.c
··· 449 449 } 450 450 451 451 static struct dma_map_ops dmabounce_ops = { 452 + .alloc = arm_dma_alloc, 453 + .free = arm_dma_free, 454 + .mmap = arm_dma_mmap, 452 455 .map_page = dmabounce_map_page, 453 456 .unmap_page = dmabounce_unmap_page, 454 457 .sync_single_for_cpu = dmabounce_sync_for_cpu,
+77 -30
arch/arm/include/asm/dma-mapping.h
··· 5 5 6 6 #include <linux/mm_types.h> 7 7 #include <linux/scatterlist.h> 8 + #include <linux/dma-attrs.h> 8 9 #include <linux/dma-debug.h> 9 10 10 11 #include <asm-generic/dma-coherent.h> ··· 111 110 extern int dma_supported(struct device *dev, u64 mask); 112 111 113 112 /** 114 - * dma_alloc_coherent - allocate consistent memory for DMA 113 + * arm_dma_alloc - allocate consistent memory for DMA 115 114 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 116 115 * @size: required memory size 117 116 * @handle: bus-specific DMA address 117 + * @attrs: optinal attributes that specific mapping properties 118 118 * 119 - * Allocate some uncached, unbuffered memory for a device for 120 - * performing DMA. This function allocates pages, and will 121 - * return the CPU-viewed address, and sets @handle to be the 122 - * device-viewed address. 119 + * Allocate some memory for a device for performing DMA. This function 120 + * allocates pages, and will return the CPU-viewed address, and sets @handle 121 + * to be the device-viewed address. 123 122 */ 124 - extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); 123 + extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 124 + gfp_t gfp, struct dma_attrs *attrs); 125 + 126 + #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) 127 + 128 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 129 + dma_addr_t *dma_handle, gfp_t flag, 130 + struct dma_attrs *attrs) 131 + { 132 + struct dma_map_ops *ops = get_dma_ops(dev); 133 + void *cpu_addr; 134 + BUG_ON(!ops); 135 + 136 + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); 137 + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 138 + return cpu_addr; 139 + } 125 140 126 141 /** 127 - * dma_free_coherent - free memory allocated by dma_alloc_coherent 142 + * arm_dma_free - free memory allocated by arm_dma_alloc 128 143 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 129 144 * @size: size of memory originally requested in dma_alloc_coherent 130 145 * @cpu_addr: CPU-view address returned from dma_alloc_coherent 131 146 * @handle: device-view address returned from dma_alloc_coherent 147 + * @attrs: optinal attributes that specific mapping properties 132 148 * 133 149 * Free (and unmap) a DMA buffer previously allocated by 134 - * dma_alloc_coherent(). 150 + * arm_dma_alloc(). 135 151 * 136 152 * References to memory and mappings associated with cpu_addr/handle 137 153 * during and after this call executing are illegal. 138 154 */ 139 - extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); 155 + extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 156 + dma_addr_t handle, struct dma_attrs *attrs); 157 + 158 + #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) 159 + 160 + static inline void dma_free_attrs(struct device *dev, size_t size, 161 + void *cpu_addr, dma_addr_t dma_handle, 162 + struct dma_attrs *attrs) 163 + { 164 + struct dma_map_ops *ops = get_dma_ops(dev); 165 + BUG_ON(!ops); 166 + 167 + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 168 + ops->free(dev, size, cpu_addr, dma_handle, attrs); 169 + } 140 170 141 171 /** 142 - * dma_mmap_coherent - map a coherent DMA allocation into user space 172 + * arm_dma_mmap - map a coherent DMA allocation into user space 143 173 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 144 174 * @vma: vm_area_struct describing requested user mapping 145 175 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent 146 176 * @handle: device-view address returned from dma_alloc_coherent 147 177 * @size: size of memory originally requested in dma_alloc_coherent 178 + * @attrs: optinal attributes that specific mapping properties 148 179 * 149 180 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent 150 181 * into user space. The coherent DMA buffer must not be freed by the 151 182 * driver until the user space mapping has been released. 152 183 */ 153 - int dma_mmap_coherent(struct device *, struct vm_area_struct *, 154 - void *, dma_addr_t, size_t); 184 + extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 185 + void *cpu_addr, dma_addr_t dma_addr, size_t size, 186 + struct dma_attrs *attrs); 155 187 188 + #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) 156 189 157 - /** 158 - * dma_alloc_writecombine - allocate writecombining memory for DMA 159 - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 160 - * @size: required memory size 161 - * @handle: bus-specific DMA address 162 - * 163 - * Allocate some uncached, buffered memory for a device for 164 - * performing DMA. This function allocates pages, and will 165 - * return the CPU-viewed address, and sets @handle to be the 166 - * device-viewed address. 167 - */ 168 - extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, 169 - gfp_t); 190 + static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 191 + void *cpu_addr, dma_addr_t dma_addr, 192 + size_t size, struct dma_attrs *attrs) 193 + { 194 + struct dma_map_ops *ops = get_dma_ops(dev); 195 + BUG_ON(!ops); 196 + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 197 + } 170 198 171 - #define dma_free_writecombine(dev,size,cpu_addr,handle) \ 172 - dma_free_coherent(dev,size,cpu_addr,handle) 199 + static inline void *dma_alloc_writecombine(struct device *dev, size_t size, 200 + dma_addr_t *dma_handle, gfp_t flag) 201 + { 202 + DEFINE_DMA_ATTRS(attrs); 203 + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 204 + return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); 205 + } 173 206 174 - int dma_mmap_writecombine(struct device *, struct vm_area_struct *, 175 - void *, dma_addr_t, size_t); 207 + static inline void dma_free_writecombine(struct device *dev, size_t size, 208 + void *cpu_addr, dma_addr_t dma_handle) 209 + { 210 + DEFINE_DMA_ATTRS(attrs); 211 + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 212 + return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); 213 + } 214 + 215 + static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, 216 + void *cpu_addr, dma_addr_t dma_addr, size_t size) 217 + { 218 + DEFINE_DMA_ATTRS(attrs); 219 + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 220 + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); 221 + } 176 222 177 223 /* 178 224 * This can be called during boot to increase the size of the consistent ··· 227 179 * memory allocator is initialised, i.e. before any core_initcall. 228 180 */ 229 181 extern void __init init_consistent_dma_size(unsigned long size); 230 - 231 182 232 183 /* 233 184 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+24 -36
arch/arm/mm/dma-mapping.c
··· 113 113 static int arm_dma_set_mask(struct device *dev, u64 dma_mask); 114 114 115 115 struct dma_map_ops arm_dma_ops = { 116 + .alloc = arm_dma_alloc, 117 + .free = arm_dma_free, 118 + .mmap = arm_dma_mmap, 116 119 .map_page = arm_dma_map_page, 117 120 .unmap_page = arm_dma_unmap_page, 118 121 .map_sg = arm_dma_map_sg, ··· 418 415 arm_vmregion_free(&consistent_head, c); 419 416 } 420 417 418 + static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 419 + { 420 + prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 421 + pgprot_writecombine(prot) : 422 + pgprot_dmacoherent(prot); 423 + return prot; 424 + } 425 + 421 426 #else /* !CONFIG_MMU */ 422 427 423 428 #define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) 424 429 #define __dma_free_remap(addr, size) do { } while (0) 430 + #define __get_dma_pgprot(attrs, prot) __pgprot(0) 425 431 426 432 #endif /* CONFIG_MMU */ 427 433 ··· 474 462 * Allocate DMA-coherent memory space and return both the kernel remapped 475 463 * virtual and bus address for that space. 476 464 */ 477 - void * 478 - dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 465 + void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 466 + gfp_t gfp, struct dma_attrs *attrs) 479 467 { 468 + pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 480 469 void *memory; 481 470 482 471 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 483 472 return memory; 484 473 485 - return __dma_alloc(dev, size, handle, gfp, 486 - pgprot_dmacoherent(pgprot_kernel), 474 + return __dma_alloc(dev, size, handle, gfp, prot, 487 475 __builtin_return_address(0)); 488 476 } 489 - EXPORT_SYMBOL(dma_alloc_coherent); 490 477 491 478 /* 492 - * Allocate a writecombining region, in much the same way as 493 - * dma_alloc_coherent above. 479 + * Create userspace mapping for the DMA-coherent memory. 494 480 */ 495 - void * 496 - dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 497 - { 498 - return __dma_alloc(dev, size, handle, gfp, 499 - pgprot_writecombine(pgprot_kernel), 500 - __builtin_return_address(0)); 501 - } 502 - EXPORT_SYMBOL(dma_alloc_writecombine); 503 - 504 - static int dma_mmap(struct device *dev, struct vm_area_struct *vma, 505 - void *cpu_addr, dma_addr_t dma_addr, size_t size) 481 + int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 482 + void *cpu_addr, dma_addr_t dma_addr, size_t size, 483 + struct dma_attrs *attrs) 506 484 { 507 485 int ret = -ENXIO; 508 486 #ifdef CONFIG_MMU 509 487 unsigned long user_size, kern_size; 510 488 struct arm_vmregion *c; 489 + 490 + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 511 491 512 492 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 513 493 return ret; ··· 525 521 return ret; 526 522 } 527 523 528 - int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 529 - void *cpu_addr, dma_addr_t dma_addr, size_t size) 530 - { 531 - vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); 532 - return dma_mmap(dev, vma, cpu_addr, dma_addr, size); 533 - } 534 - EXPORT_SYMBOL(dma_mmap_coherent); 535 - 536 - int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, 537 - void *cpu_addr, dma_addr_t dma_addr, size_t size) 538 - { 539 - vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 540 - return dma_mmap(dev, vma, cpu_addr, dma_addr, size); 541 - } 542 - EXPORT_SYMBOL(dma_mmap_writecombine); 543 - 544 524 /* 545 525 * free a page as defined by the above mapping. 546 526 * Must not be called with IRQs disabled. 547 527 */ 548 - void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 528 + void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 529 + dma_addr_t handle, struct dma_attrs *attrs) 549 530 { 550 531 WARN_ON(irqs_disabled()); 551 532 ··· 544 555 545 556 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); 546 557 } 547 - EXPORT_SYMBOL(dma_free_coherent); 548 558 549 559 static void dma_cache_maint_page(struct page *page, unsigned long offset, 550 560 size_t size, enum dma_data_direction dir,