ARM: NOMMU: Wire-up default DMA interface

The way how default DMA pool is exposed has changed and now we need to
use dedicated interface to work with it. This patch makes alloc/release
operations to use such interface. Since, default DMA pool is not
handled by generic code anymore we have to implement our own mmap
operation.

Tested-by: Andras Szemzo <sza@esh.hu>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>

authored by

Vladimir Murzin and committed by
Christoph Hellwig
878ec367 43fc509c

+36 -9
+36 -9
arch/arm/mm/dma-mapping-nommu.c
··· 40 40 41 41 { 42 42 const struct dma_map_ops *ops = &dma_noop_ops; 43 + void *ret; 43 44 44 45 /* 45 - * We are here because: 46 + * Try generic allocator first if we are advertised that 47 + * consistency is not required. 48 + */ 49 + 50 + if (attrs & DMA_ATTR_NON_CONSISTENT) 51 + return ops->alloc(dev, size, dma_handle, gfp, attrs); 52 + 53 + ret = dma_alloc_from_global_coherent(size, dma_handle); 54 + 55 + /* 56 + * dma_alloc_from_global_coherent() may fail because: 57 + * 46 58 * - no consistent DMA region has been defined, so we can't 47 59 * continue. 48 60 * - there is no space left in consistent DMA region, so we ··· 62 50 * advertised that consistency is not required. 63 51 */ 64 52 65 - if (attrs & DMA_ATTR_NON_CONSISTENT) 66 - return ops->alloc(dev, size, dma_handle, gfp, attrs); 67 - 68 - WARN_ON_ONCE(1); 69 - return NULL; 53 + WARN_ON_ONCE(ret == NULL); 54 + return ret; 70 55 } 71 56 72 57 static void arm_nommu_dma_free(struct device *dev, size_t size, ··· 72 63 { 73 64 const struct dma_map_ops *ops = &dma_noop_ops; 74 65 75 - if (attrs & DMA_ATTR_NON_CONSISTENT) 66 + if (attrs & DMA_ATTR_NON_CONSISTENT) { 76 67 ops->free(dev, size, cpu_addr, dma_addr, attrs); 77 - else 78 - WARN_ON_ONCE(1); 68 + } else { 69 + int ret = dma_release_from_global_coherent(get_order(size), 70 + cpu_addr); 71 + 72 + WARN_ON_ONCE(ret == 0); 73 + } 79 74 80 75 return; 81 76 } 77 + 78 + static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 79 + void *cpu_addr, dma_addr_t dma_addr, size_t size, 80 + unsigned long attrs) 81 + { 82 + int ret; 83 + 84 + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 85 + return ret; 86 + 87 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 88 + } 89 + 82 90 83 91 static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, 84 92 enum dma_data_direction dir) ··· 199 173 const struct dma_map_ops arm_nommu_dma_ops = { 200 174 .alloc = arm_nommu_dma_alloc, 201 175 .free = arm_nommu_dma_free, 176 + .mmap = arm_nommu_dma_mmap, 202 177 .map_page = arm_nommu_dma_map_page, 203 178 .unmap_page = arm_nommu_dma_unmap_page, 204 179 .map_sg = arm_nommu_dma_map_sg,