ARM: NOMMU: Wire-up default DMA interface

The way how default DMA pool is exposed has changed and now we need to
use dedicated interface to work with it. This patch makes alloc/release
operations to use such interface. Since, default DMA pool is not
handled by generic code anymore we have to implement our own mmap
operation.

Tested-by: Andras Szemzo <sza@esh.hu>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>

authored by

Vladimir Murzin and committed by
Christoph Hellwig
878ec367 43fc509c

+36 -9
+36 -9
arch/arm/mm/dma-mapping-nommu.c
··· 40 41 { 42 const struct dma_map_ops *ops = &dma_noop_ops; 43 44 /* 45 - * We are here because: 46 * - no consistent DMA region has been defined, so we can't 47 * continue. 48 * - there is no space left in consistent DMA region, so we ··· 62 * advertised that consistency is not required. 63 */ 64 65 - if (attrs & DMA_ATTR_NON_CONSISTENT) 66 - return ops->alloc(dev, size, dma_handle, gfp, attrs); 67 - 68 - WARN_ON_ONCE(1); 69 - return NULL; 70 } 71 72 static void arm_nommu_dma_free(struct device *dev, size_t size, ··· 72 { 73 const struct dma_map_ops *ops = &dma_noop_ops; 74 75 - if (attrs & DMA_ATTR_NON_CONSISTENT) 76 ops->free(dev, size, cpu_addr, dma_addr, attrs); 77 - else 78 - WARN_ON_ONCE(1); 79 80 return; 81 } 82 83 static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, 84 enum dma_data_direction dir) ··· 199 const struct dma_map_ops arm_nommu_dma_ops = { 200 .alloc = arm_nommu_dma_alloc, 201 .free = arm_nommu_dma_free, 202 .map_page = arm_nommu_dma_map_page, 203 .unmap_page = arm_nommu_dma_unmap_page, 204 .map_sg = arm_nommu_dma_map_sg,
··· 40 41 { 42 const struct dma_map_ops *ops = &dma_noop_ops; 43 + void *ret; 44 45 /* 46 + * Try generic allocator first if we are advertised that 47 + * consistency is not required. 48 + */ 49 + 50 + if (attrs & DMA_ATTR_NON_CONSISTENT) 51 + return ops->alloc(dev, size, dma_handle, gfp, attrs); 52 + 53 + ret = dma_alloc_from_global_coherent(size, dma_handle); 54 + 55 + /* 56 + * dma_alloc_from_global_coherent() may fail because: 57 + * 58 * - no consistent DMA region has been defined, so we can't 59 * continue. 60 * - there is no space left in consistent DMA region, so we ··· 50 * advertised that consistency is not required. 51 */ 52 53 + WARN_ON_ONCE(ret == NULL); 54 + return ret; 55 } 56 57 static void arm_nommu_dma_free(struct device *dev, size_t size, ··· 63 { 64 const struct dma_map_ops *ops = &dma_noop_ops; 65 66 + if (attrs & DMA_ATTR_NON_CONSISTENT) { 67 ops->free(dev, size, cpu_addr, dma_addr, attrs); 68 + } else { 69 + int ret = dma_release_from_global_coherent(get_order(size), 70 + cpu_addr); 71 + 72 + WARN_ON_ONCE(ret == 0); 73 + } 74 75 return; 76 } 77 + 78 + static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 79 + void *cpu_addr, dma_addr_t dma_addr, size_t size, 80 + unsigned long attrs) 81 + { 82 + int ret; 83 + 84 + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 85 + return ret; 86 + 87 + return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 88 + } 89 + 90 91 static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, 92 enum dma_data_direction dir) ··· 173 const struct dma_map_ops arm_nommu_dma_ops = { 174 .alloc = arm_nommu_dma_alloc, 175 .free = arm_nommu_dma_free, 176 + .mmap = arm_nommu_dma_mmap, 177 .map_page = arm_nommu_dma_map_page, 178 .unmap_page = arm_nommu_dma_unmap_page, 179 .map_sg = arm_nommu_dma_map_sg,