Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

microblaze: use generic dma_noncoherent_ops

Switch to the generic noncoherent direct mapping implementation.

This removes the direction-based optimizations in
sync_{single,sg}_for_{cpu,device} which were marked untestested and
do not match the usually very well tested {un,}map_{single,sg}
implementations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>

authored by

Christoph Hellwig and committed by
Michal Simek
5411ad27 bd05a58d

+22 -166
+4
arch/microblaze/Kconfig
··· 1 1 config MICROBLAZE 2 2 def_bool y 3 3 select ARCH_HAS_GCOV_PROFILE_ALL 4 + select ARCH_HAS_SYNC_DMA_FOR_CPU 5 + select ARCH_HAS_SYNC_DMA_FOR_DEVICE 4 6 select ARCH_MIGHT_HAVE_PC_PARPORT 5 7 select ARCH_NO_COHERENT_DMA_MMAP if !MMU 6 8 select ARCH_WANT_IPC_PARSE_VERSION ··· 10 8 select TIMER_OF 11 9 select CLONE_BACKWARDS3 12 10 select COMMON_CLK 11 + select DMA_NONCOHERENT_OPS 12 + select DMA_NONCOHERENT_MMAP 13 13 select GENERIC_ATOMIC64 14 14 select GENERIC_CLOCKEVENTS 15 15 select GENERIC_CPU_DEVICES
+1
arch/microblaze/include/asm/Kbuild
··· 5 5 generic-y += compat.h 6 6 generic-y += device.h 7 7 generic-y += div64.h 8 + generic-y += dma-mapping.h 8 9 generic-y += emergency-restart.h 9 10 generic-y += exec.h 10 11 generic-y += extable.h
-28
arch/microblaze/include/asm/dma-mapping.h
··· 1 - /* 2 - * Implements the generic device dma API for microblaze and the pci 3 - * 4 - * Copyright (C) 2009-2010 Michal Simek <monstr@monstr.eu> 5 - * Copyright (C) 2009-2010 PetaLogix 6 - * 7 - * This file is subject to the terms and conditions of the GNU General 8 - * Public License. See the file COPYING in the main directory of this 9 - * archive for more details. 10 - * 11 - * This file is base on powerpc and x86 dma-mapping.h versions 12 - * Copyright (C) 2004 IBM 13 - */ 14 - 15 - #ifndef _ASM_MICROBLAZE_DMA_MAPPING_H 16 - #define _ASM_MICROBLAZE_DMA_MAPPING_H 17 - 18 - /* 19 - * Available generic sets of operations 20 - */ 21 - extern const struct dma_map_ops dma_nommu_ops; 22 - 23 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 - { 25 - return &dma_nommu_ops; 26 - } 27 - 28 - #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
-2
arch/microblaze/include/asm/pgtable.h
··· 553 553 554 554 extern unsigned long ioremap_bot, ioremap_base; 555 555 556 - void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle); 557 - void consistent_free(size_t size, void *vaddr); 558 556 void consistent_sync(void *vaddr, size_t size, int direction); 559 557 void consistent_sync_page(struct page *page, unsigned long offset, 560 558 size_t size, int direction);
+12 -132
arch/microblaze/kernel/dma.c
··· 8 8 */ 9 9 10 10 #include <linux/device.h> 11 - #include <linux/dma-mapping.h> 11 + #include <linux/dma-noncoherent.h> 12 12 #include <linux/gfp.h> 13 13 #include <linux/dma-debug.h> 14 14 #include <linux/export.h> 15 15 #include <linux/bug.h> 16 16 #include <asm/cacheflush.h> 17 17 18 - static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, 19 - dma_addr_t *dma_handle, gfp_t flag, 20 - unsigned long attrs) 21 - { 22 - return consistent_alloc(flag, size, dma_handle); 23 - } 24 - 25 - static void dma_nommu_free_coherent(struct device *dev, size_t size, 26 - void *vaddr, dma_addr_t dma_handle, 27 - unsigned long attrs) 28 - { 29 - consistent_free(size, vaddr); 30 - } 31 - 32 - static inline void __dma_sync(unsigned long paddr, 33 - size_t size, enum dma_data_direction direction) 18 + static void __dma_sync(struct device *dev, phys_addr_t paddr, size_t size, 19 + enum dma_data_direction direction) 34 20 { 35 21 switch (direction) { 36 22 case DMA_TO_DEVICE: ··· 31 45 } 32 46 } 33 47 34 - static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, 35 - int nents, enum dma_data_direction direction, 36 - unsigned long attrs) 48 + void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 49 + size_t size, enum dma_data_direction dir) 37 50 { 38 - struct scatterlist *sg; 39 - int i; 40 - 41 - /* FIXME this part of code is untested */ 42 - for_each_sg(sgl, sg, nents, i) { 43 - sg->dma_address = sg_phys(sg); 44 - 45 - if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 46 - continue; 47 - 48 - __dma_sync(sg_phys(sg), sg->length, direction); 49 - } 50 - 51 - return nents; 51 + __dma_sync(dev, paddr, size, dir); 52 52 } 53 53 54 - static inline dma_addr_t dma_nommu_map_page(struct device *dev, 55 - struct page *page, 56 - unsigned long offset, 57 - size_t size, 58 - enum dma_data_direction direction, 59 - unsigned long attrs) 54 + void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, 55 + size_t size, enum dma_data_direction dir) 60 56 { 61 - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 62 - __dma_sync(page_to_phys(page) + offset, size, direction); 63 - return page_to_phys(page) + offset; 57 + __dma_sync(dev, paddr, size, dir); 64 58 } 65 59 66 - static inline void dma_nommu_unmap_page(struct device *dev, 67 - dma_addr_t dma_address, 68 - size_t size, 69 - enum dma_data_direction direction, 70 - unsigned long attrs) 71 - { 72 - /* There is not necessary to do cache cleanup 73 - * 74 - * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and 75 - * dma_address is physical address 76 - */ 77 - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 78 - __dma_sync(dma_address, size, direction); 79 - } 80 - 81 - static inline void 82 - dma_nommu_sync_single_for_cpu(struct device *dev, 83 - dma_addr_t dma_handle, size_t size, 84 - enum dma_data_direction direction) 85 - { 86 - /* 87 - * It's pointless to flush the cache as the memory segment 88 - * is given to the CPU 89 - */ 90 - 91 - if (direction == DMA_FROM_DEVICE) 92 - __dma_sync(dma_handle, size, direction); 93 - } 94 - 95 - static inline void 96 - dma_nommu_sync_single_for_device(struct device *dev, 97 - dma_addr_t dma_handle, size_t size, 98 - enum dma_data_direction direction) 99 - { 100 - /* 101 - * It's pointless to invalidate the cache if the device isn't 102 - * supposed to write to the relevant region 103 - */ 104 - 105 - if (direction == DMA_TO_DEVICE) 106 - __dma_sync(dma_handle, size, direction); 107 - } 108 - 109 - static inline void 110 - dma_nommu_sync_sg_for_cpu(struct device *dev, 111 - struct scatterlist *sgl, int nents, 112 - enum dma_data_direction direction) 113 - { 114 - struct scatterlist *sg; 115 - int i; 116 - 117 - /* FIXME this part of code is untested */ 118 - if (direction == DMA_FROM_DEVICE) 119 - for_each_sg(sgl, sg, nents, i) 120 - __dma_sync(sg->dma_address, sg->length, direction); 121 - } 122 - 123 - static inline void 124 - dma_nommu_sync_sg_for_device(struct device *dev, 125 - struct scatterlist *sgl, int nents, 126 - enum dma_data_direction direction) 127 - { 128 - struct scatterlist *sg; 129 - int i; 130 - 131 - /* FIXME this part of code is untested */ 132 - if (direction == DMA_TO_DEVICE) 133 - for_each_sg(sgl, sg, nents, i) 134 - __dma_sync(sg->dma_address, sg->length, direction); 135 - } 136 - 137 - static 138 - int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 139 - void *cpu_addr, dma_addr_t handle, size_t size, 140 - unsigned long attrs) 60 + int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma, 61 + void *cpu_addr, dma_addr_t handle, size_t size, 62 + unsigned long attrs) 141 63 { 142 64 #ifdef CONFIG_MMU 143 65 unsigned long user_count = vma_pages(vma); ··· 64 170 return -ENXIO; 65 171 #endif 66 172 } 67 - 68 - const struct dma_map_ops dma_nommu_ops = { 69 - .alloc = dma_nommu_alloc_coherent, 70 - .free = dma_nommu_free_coherent, 71 - .mmap = dma_nommu_mmap_coherent, 72 - .map_sg = dma_nommu_map_sg, 73 - .map_page = dma_nommu_map_page, 74 - .unmap_page = dma_nommu_unmap_page, 75 - .sync_single_for_cpu = dma_nommu_sync_single_for_cpu, 76 - .sync_single_for_device = dma_nommu_sync_single_for_device, 77 - .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu, 78 - .sync_sg_for_device = dma_nommu_sync_sg_for_device, 79 - }; 80 - EXPORT_SYMBOL(dma_nommu_ops);
+5 -4
arch/microblaze/mm/consistent.c
··· 33 33 #include <linux/pci.h> 34 34 #include <linux/interrupt.h> 35 35 #include <linux/gfp.h> 36 + #include <linux/dma-noncoherent.h> 36 37 37 38 #include <asm/pgalloc.h> 38 39 #include <linux/io.h> ··· 60 59 * uncached region. This will no doubt cause big problems if memory allocated 61 60 * here is not also freed properly. -- JW 62 61 */ 63 - void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) 62 + void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 63 + gfp_t gfp, unsigned long attrs) 64 64 { 65 65 unsigned long order, vaddr; 66 66 void *ret; ··· 156 154 157 155 return ret; 158 156 } 159 - EXPORT_SYMBOL(consistent_alloc); 160 157 161 158 #ifdef CONFIG_MMU 162 159 static pte_t *consistent_virt_to_pte(void *vaddr) ··· 179 178 /* 180 179 * free page(s) as defined by the above mapping. 181 180 */ 182 - void consistent_free(size_t size, void *vaddr) 181 + void arch_dma_free(struct device *dev, size_t size, void *vaddr, 182 + dma_addr_t dma_addr, unsigned long attrs) 183 183 { 184 184 struct page *page; 185 185 ··· 220 218 flush_tlb_all(); 221 219 #endif 222 220 } 223 - EXPORT_SYMBOL(consistent_free); 224 221 225 222 /* 226 223 * make an area consistent.