Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

- turn dma_cache_sync into a dma_map_ops instance and remove
implementation that purely are dead because the architecture doesn't
support noncoherent allocations

- add a flag for busses that need DMA configuration (Robin Murphy)

* tag 'dma-mapping-4.15' of git://git.infradead.org/users/hch/dma-mapping:
dma-mapping: turn dma_cache_sync into a dma_map_ops method
sh: make dma_cache_sync a no-op
xtensa: make dma_cache_sync a no-op
unicore32: make dma_cache_sync a no-op
powerpc: make dma_cache_sync a no-op
mn10300: make dma_cache_sync a no-op
microblaze: make dma_cache_sync a no-op
ia64: make dma_cache_sync a no-op
frv: make dma_cache_sync a no-op
x86: make dma_cache_sync a no-op
floppy: consolidate the dummy fd_cacheflush definition
drivers: flag buses which demand DMA configuration

+71 -250
-2
arch/alpha/include/asm/dma-mapping.h
··· 9 9 return dma_ops; 10 10 } 11 11 12 - #define dma_cache_sync(dev, va, size, dir) ((void)0) 13 - 14 12 #endif /* _ALPHA_DMA_MAPPING_H */
-2
arch/alpha/include/asm/floppy.h
··· 24 24 #define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count) 25 25 #define fd_enable_irq() enable_irq(FLOPPY_IRQ) 26 26 #define fd_disable_irq() disable_irq(FLOPPY_IRQ) 27 - #define fd_cacheflush(addr,size) /* nothing */ 28 27 #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ 29 28 0, "floppy", NULL) 30 29 #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) ··· 61 62 prev_dir = dir; 62 63 63 64 fd_clear_dma_ff(); 64 - fd_cacheflush(addr, size); 65 65 fd_set_dma_mode(mode); 66 66 set_dma_addr(FLOPPY_DMA, bus_addr); 67 67 fd_set_dma_count(size);
-6
arch/cris/include/asm/dma-mapping.h
··· 17 17 } 18 18 #endif 19 19 20 - static inline void 21 - dma_cache_sync(struct device *dev, void *vaddr, size_t size, 22 - enum dma_data_direction direction) 23 - { 24 - } 25 - 26 20 #endif
-7
arch/frv/include/asm/dma-mapping.h
··· 15 15 return &frv_dma_ops; 16 16 } 17 17 18 - static inline 19 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 20 - enum dma_data_direction direction) 21 - { 22 - flush_write_buffers(); 23 - } 24 - 25 18 #endif /* _ASM_DMA_MAPPING_H */
-3
arch/hexagon/include/asm/dma-mapping.h
··· 37 37 return dma_ops; 38 38 } 39 39 40 - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 41 - enum dma_data_direction direction); 42 - 43 40 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 44 41 { 45 42 if (!dev->dma_mask)
-11
arch/ia64/include/asm/dma-mapping.h
··· 45 45 return daddr; 46 46 } 47 47 48 - static inline void 49 - dma_cache_sync (struct device *dev, void *vaddr, size_t size, 50 - enum dma_data_direction dir) 51 - { 52 - /* 53 - * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to 54 - * ensure that dma_cache_sync() enforces order, hence the mb(). 55 - */ 56 - mb(); 57 - } 58 - 59 48 #endif /* _ASM_IA64_DMA_MAPPING_H */
-5
arch/m32r/include/asm/dma-mapping.h
··· 14 14 return &dma_noop_ops; 15 15 } 16 16 17 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 18 - enum dma_data_direction direction) 19 - { 20 - } 21 - 22 17 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 23 18 { 24 19 if (!dev->dma_mask)
-6
arch/m68k/include/asm/dma-mapping.h
··· 9 9 return &m68k_dma_ops; 10 10 } 11 11 12 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 13 - enum dma_data_direction dir) 14 - { 15 - /* we use coherent allocation, so not much to do here. */ 16 - } 17 - 18 12 #endif /* _M68K_DMA_MAPPING_H */
-10
arch/metag/include/asm/dma-mapping.h
··· 9 9 return &metag_dma_ops; 10 10 } 11 11 12 - /* 13 - * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to 14 - * do any flushing here. 15 - */ 16 - static inline void 17 - dma_cache_sync(struct device *dev, void *vaddr, size_t size, 18 - enum dma_data_direction direction) 19 - { 20 - } 21 - 22 12 #endif
-39
arch/microblaze/include/asm/dma-mapping.h
··· 16 16 #define _ASM_MICROBLAZE_DMA_MAPPING_H 17 17 18 18 /* 19 - * See Documentation/DMA-API-HOWTO.txt and 20 - * Documentation/DMA-API.txt for documentation. 21 - */ 22 - 23 - #include <linux/types.h> 24 - #include <linux/cache.h> 25 - #include <linux/mm.h> 26 - #include <linux/scatterlist.h> 27 - #include <linux/dma-debug.h> 28 - #include <asm/io.h> 29 - #include <asm/cacheflush.h> 30 - 31 - #define __dma_alloc_coherent(dev, gfp, size, handle) NULL 32 - #define __dma_free_coherent(size, addr) ((void)0) 33 - 34 - /* 35 19 * Available generic sets of operations 36 20 */ 37 21 extern const struct dma_map_ops dma_direct_ops; ··· 23 39 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 40 { 25 41 return &dma_direct_ops; 26 - } 27 - 28 - static inline void __dma_sync(unsigned long paddr, 29 - size_t size, enum dma_data_direction direction) 30 - { 31 - switch (direction) { 32 - case DMA_TO_DEVICE: 33 - case DMA_BIDIRECTIONAL: 34 - flush_dcache_range(paddr, paddr + size); 35 - break; 36 - case DMA_FROM_DEVICE: 37 - invalidate_dcache_range(paddr, paddr + size); 38 - break; 39 - default: 40 - BUG(); 41 - } 42 - } 43 - 44 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 45 - enum dma_data_direction direction) 46 - { 47 - BUG_ON(direction == DMA_NONE); 48 - __dma_sync(virt_to_phys(vaddr), size, (int)direction); 49 42 } 50 43 51 44 #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
+17
arch/microblaze/kernel/dma.c
··· 13 13 #include <linux/dma-debug.h> 14 14 #include <linux/export.h> 15 15 #include <linux/bug.h> 16 + #include <asm/cacheflush.h> 16 17 17 18 #define NOT_COHERENT_CACHE 18 19 ··· 51 50 #else 52 51 free_pages((unsigned long)vaddr, get_order(size)); 53 52 #endif 53 + } 54 + 55 + static inline void __dma_sync(unsigned long paddr, 56 + size_t size, enum dma_data_direction direction) 57 + { 58 + switch (direction) { 59 + case DMA_TO_DEVICE: 60 + case DMA_BIDIRECTIONAL: 61 + flush_dcache_range(paddr, paddr + size); 62 + break; 63 + case DMA_FROM_DEVICE: 64 + invalidate_dcache_range(paddr, paddr + size); 65 + break; 66 + default: 67 + BUG(); 68 + } 54 69 } 55 70 56 71 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
-3
arch/mips/include/asm/dma-mapping.h
··· 27 27 28 28 static inline void dma_mark_clean(void *addr, size_t size) {} 29 29 30 - extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 31 - enum dma_data_direction direction); 32 - 33 30 #define arch_setup_dma_ops arch_setup_dma_ops 34 31 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 35 32 u64 size, const struct iommu_ops *iommu,
+3 -4
arch/mips/mm/dma-default.c
··· 383 383 return plat_dma_supported(dev, mask); 384 384 } 385 385 386 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 386 + static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 387 387 enum dma_data_direction direction) 388 388 { 389 389 BUG_ON(direction == DMA_NONE); ··· 391 391 if (!plat_device_is_coherent(dev)) 392 392 __dma_sync_virtual(vaddr, size, direction); 393 393 } 394 - 395 - EXPORT_SYMBOL(dma_cache_sync); 396 394 397 395 static const struct dma_map_ops mips_default_dma_map_ops = { 398 396 .alloc = mips_dma_alloc_coherent, ··· 405 407 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu, 406 408 .sync_sg_for_device = mips_dma_sync_sg_for_device, 407 409 .mapping_error = mips_dma_mapping_error, 408 - .dma_supported = mips_dma_supported 410 + .dma_supported = mips_dma_supported, 411 + .cache_sync = mips_dma_cache_sync, 409 412 }; 410 413 411 414 const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
-10
arch/mn10300/include/asm/dma-mapping.h
··· 11 11 #ifndef _ASM_DMA_MAPPING_H 12 12 #define _ASM_DMA_MAPPING_H 13 13 14 - #include <asm/cache.h> 15 - #include <asm/io.h> 16 - 17 14 extern const struct dma_map_ops mn10300_dma_ops; 18 15 19 16 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 20 17 { 21 18 return &mn10300_dma_ops; 22 - } 23 - 24 - static inline 25 - void dma_cache_sync(void *vaddr, size_t size, 26 - enum dma_data_direction direction) 27 - { 28 - mn10300_dcache_flush_inv(); 29 19 } 30 20 31 21 #endif
-9
arch/nios2/include/asm/dma-mapping.h
··· 17 17 return &nios2_dma_ops; 18 18 } 19 19 20 - /* 21 - * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to 22 - * do any flushing here. 23 - */ 24 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 25 - enum dma_data_direction direction) 26 - { 27 - } 28 - 29 20 #endif /* _ASM_NIOS2_DMA_MAPPING_H */
-8
arch/parisc/include/asm/dma-mapping.h
··· 33 33 return hppa_dma_ops; 34 34 } 35 35 36 - static inline void 37 - dma_cache_sync(struct device *dev, void *vaddr, size_t size, 38 - enum dma_data_direction direction) 39 - { 40 - if (hppa_dma_ops->sync_single_for_cpu) 41 - flush_kernel_dcache_range((unsigned long)vaddr, size); 42 - } 43 - 44 36 static inline void * 45 37 parisc_walk_tree(struct device *dev) 46 38 {
+8
arch/parisc/kernel/pci-dma.c
··· 572 572 flush_kernel_vmap_range(sg_virt(sg), sg->length); 573 573 } 574 574 575 + static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size, 576 + enum dma_data_direction direction) 577 + { 578 + flush_kernel_dcache_range((unsigned long)vaddr, size); 579 + } 580 + 575 581 const struct dma_map_ops pcxl_dma_ops = { 576 582 .dma_supported = pa11_dma_supported, 577 583 .alloc = pa11_dma_alloc, ··· 590 584 .sync_single_for_device = pa11_dma_sync_single_for_device, 591 585 .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, 592 586 .sync_sg_for_device = pa11_dma_sync_sg_for_device, 587 + .cache_sync = pa11_dma_cache_sync, 593 588 }; 594 589 595 590 static void *pcx_dma_alloc(struct device *dev, size_t size, ··· 627 620 .sync_single_for_device = pa11_dma_sync_single_for_device, 628 621 .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu, 629 622 .sync_sg_for_device = pa11_dma_sync_sg_for_device, 623 + .cache_sync = pa11_dma_cache_sync, 630 624 };
-7
arch/powerpc/include/asm/dma-mapping.h
··· 142 142 143 143 #define ARCH_HAS_DMA_MMAP_COHERENT 144 144 145 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 146 - enum dma_data_direction direction) 147 - { 148 - BUG_ON(direction == DMA_NONE); 149 - __dma_sync(vaddr, size, (int)direction); 150 - } 151 - 152 145 #endif /* __KERNEL__ */ 153 146 #endif /* _ASM_DMA_MAPPING_H */
-2
arch/powerpc/include/asm/floppy.h
··· 25 25 #define fd_get_dma_residue() fd_ops->_get_dma_residue(FLOPPY_DMA) 26 26 #define fd_enable_irq() enable_irq(FLOPPY_IRQ) 27 27 #define fd_disable_irq() disable_irq(FLOPPY_IRQ) 28 - #define fd_cacheflush(addr,size) /* nothing */ 29 28 #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL); 30 29 31 30 #include <linux/pci.h> ··· 151 152 prev_dir = dir; 152 153 153 154 fd_clear_dma_ff(); 154 - fd_cacheflush(addr, size); 155 155 fd_set_dma_mode(mode); 156 156 set_dma_addr(FLOPPY_DMA, bus_addr); 157 157 fd_set_dma_count(size);
-5
arch/s390/include/asm/dma-mapping.h
··· 16 16 return &dma_noop_ops; 17 17 } 18 18 19 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 20 - enum dma_data_direction direction) 21 - { 22 - } 23 - 24 19 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 25 20 { 26 21 if (!dev->dma_mask)
+3 -4
arch/sh/include/asm/dma-mapping.h
··· 10 10 return dma_ops; 11 11 } 12 12 13 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 14 - enum dma_data_direction dir); 15 - 16 - /* arch/sh/mm/consistent.c */ 17 13 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 18 14 dma_addr_t *dma_addr, gfp_t flag, 19 15 unsigned long attrs); 20 16 extern void dma_generic_free_coherent(struct device *dev, size_t size, 21 17 void *vaddr, dma_addr_t dma_handle, 22 18 unsigned long attrs); 19 + 20 + void sh_sync_dma_for_device(void *vaddr, size_t size, 21 + enum dma_data_direction dir); 23 22 24 23 #endif /* __ASM_SH_DMA_MAPPING_H */
+9 -8
arch/sh/kernel/dma-nommu.c
··· 9 9 */ 10 10 #include <linux/dma-mapping.h> 11 11 #include <linux/io.h> 12 + #include <asm/cacheflush.h> 12 13 13 14 static dma_addr_t nommu_map_page(struct device *dev, struct page *page, 14 15 unsigned long offset, size_t size, ··· 21 20 WARN_ON(size == 0); 22 21 23 22 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 24 - dma_cache_sync(dev, page_address(page) + offset, size, dir); 23 + sh_sync_dma_for_device(page_address(page) + offset, size, dir); 25 24 26 25 return addr; 27 26 } ··· 39 38 BUG_ON(!sg_page(s)); 40 39 41 40 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 42 - dma_cache_sync(dev, sg_virt(s), s->length, dir); 41 + sh_sync_dma_for_device(sg_virt(s), s->length, dir); 43 42 44 43 s->dma_address = sg_phys(s); 45 44 s->dma_length = s->length; ··· 49 48 } 50 49 51 50 #ifdef CONFIG_DMA_NONCOHERENT 52 - static void nommu_sync_single(struct device *dev, dma_addr_t addr, 51 + static void nommu_sync_single_for_device(struct device *dev, dma_addr_t addr, 53 52 size_t size, enum dma_data_direction dir) 54 53 { 55 - dma_cache_sync(dev, phys_to_virt(addr), size, dir); 54 + sh_sync_dma_for_device(phys_to_virt(addr), size, dir); 56 55 } 57 56 58 - static void nommu_sync_sg(struct device *dev, struct scatterlist *sg, 57 + static void nommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 59 58 int nelems, enum dma_data_direction dir) 60 59 { 61 60 struct scatterlist *s; 62 61 int i; 63 62 64 63 for_each_sg(sg, s, nelems, i) 65 - dma_cache_sync(dev, sg_virt(s), s->length, dir); 64 + sh_sync_dma_for_device(sg_virt(s), s->length, dir); 66 65 } 67 66 #endif 68 67 ··· 72 71 .map_page = nommu_map_page, 73 72 .map_sg = nommu_map_sg, 74 73 #ifdef CONFIG_DMA_NONCOHERENT 75 - .sync_single_for_device = nommu_sync_single, 76 - .sync_sg_for_device = nommu_sync_sg, 74 + .sync_single_for_device = nommu_sync_single_for_device, 75 + .sync_sg_for_device = nommu_sync_sg_for_device, 77 76 #endif 78 77 .is_phys = 1, 79 78 };
+3 -3
arch/sh/mm/consistent.c
··· 49 49 * Pages from the page allocator may have data present in 50 50 * cache. So flush the cache before using uncached memory. 51 51 */ 52 - dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL); 52 + sh_sync_dma_for_device(ret, size, DMA_BIDIRECTIONAL); 53 53 54 54 ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size); 55 55 if (!ret_nocache) { ··· 78 78 iounmap(vaddr); 79 79 } 80 80 81 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 81 + void sh_sync_dma_for_device(void *vaddr, size_t size, 82 82 enum dma_data_direction direction) 83 83 { 84 84 void *addr; ··· 100 100 BUG(); 101 101 } 102 102 } 103 - EXPORT_SYMBOL(dma_cache_sync); 103 + EXPORT_SYMBOL(sh_sync_dma_for_device); 104 104 105 105 static int __init memchunk_setup(char *str) 106 106 {
-8
arch/sparc/include/asm/dma-mapping.h
··· 6 6 #include <linux/mm.h> 7 7 #include <linux/dma-debug.h> 8 8 9 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 10 - enum dma_data_direction dir) 11 - { 12 - /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this 13 - * routine can be a nop. 14 - */ 15 - } 16 - 17 9 extern const struct dma_map_ops *dma_ops; 18 10 extern const struct dma_map_ops pci32_dma_ops; 19 11
-1
arch/sparc/include/asm/floppy_32.h
··· 71 71 #define fd_set_dma_count(count) sun_fd_set_dma_count(count) 72 72 #define fd_enable_irq() /* nothing... */ 73 73 #define fd_disable_irq() /* nothing... */ 74 - #define fd_cacheflush(addr, size) /* nothing... */ 75 74 #define fd_request_irq() sun_fd_request_irq() 76 75 #define fd_free_irq() /* nothing... */ 77 76 #if 0 /* P3: added by Alain, these cause a MMU corruption. 19960524 XXX */
-1
arch/sparc/include/asm/floppy_64.h
··· 73 73 #define fd_set_dma_addr(addr) sun_fdops.fd_set_dma_addr(addr) 74 74 #define fd_set_dma_count(count) sun_fdops.fd_set_dma_count(count) 75 75 #define get_dma_residue(x) sun_fdops.get_dma_residue() 76 - #define fd_cacheflush(addr, size) /* nothing... */ 77 76 #define fd_request_irq() sun_fdops.fd_request_irq() 78 77 #define fd_free_irq() sun_fdops.fd_free_irq() 79 78 #define fd_eject(drive) sun_fdops.fd_eject(drive)
-9
arch/tile/include/asm/dma-mapping.h
··· 67 67 #define HAVE_ARCH_DMA_SET_MASK 1 68 68 int dma_set_mask(struct device *dev, u64 mask); 69 69 70 - /* 71 - * dma_alloc_attrs() always returns non-cacheable memory, so there's no need to 72 - * do any flushing here. 73 - */ 74 - static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 75 - enum dma_data_direction direction) 76 - { 77 - } 78 - 79 70 #endif /* _ASM_TILE_DMA_MAPPING_H */
-9
arch/unicore32/include/asm/cacheflush.h
··· 102 102 extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size); 103 103 104 104 /* 105 - * These are private to the dma-mapping API. Do not use directly. 106 - * Their sole purpose is to ensure that data held in the cache 107 - * is visible to DMA, or data written by DMA to system memory is 108 - * visible to the CPU. 109 - */ 110 - extern void __cpuc_dma_clean_range(unsigned long, unsigned long); 111 - extern void __cpuc_dma_flush_range(unsigned long, unsigned long); 112 - 113 - /* 114 105 * Copy user data from/to a page which is mapped into a different 115 106 * processes address space. Really, we want to allow our "user 116 107 * space" model to handle this.
-22
arch/unicore32/include/asm/dma-mapping.h
··· 18 18 #include <linux/scatterlist.h> 19 19 #include <linux/swiotlb.h> 20 20 21 - #include <asm/memory.h> 22 - #include <asm/cacheflush.h> 23 - 24 21 extern const struct dma_map_ops swiotlb_dma_map_ops; 25 22 26 23 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ··· 44 47 } 45 48 46 49 static inline void dma_mark_clean(void *addr, size_t size) {} 47 - 48 - static inline void dma_cache_sync(struct device *dev, void *vaddr, 49 - size_t size, enum dma_data_direction direction) 50 - { 51 - unsigned long start = (unsigned long)vaddr; 52 - unsigned long end = start + size; 53 - 54 - switch (direction) { 55 - case DMA_NONE: 56 - BUG(); 57 - case DMA_FROM_DEVICE: 58 - case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 59 - __cpuc_dma_flush_range(start, end); 60 - break; 61 - case DMA_TO_DEVICE: /* writeback only */ 62 - __cpuc_dma_clean_range(start, end); 63 - break; 64 - } 65 - } 66 50 67 51 #endif /* __KERNEL__ */ 68 52 #endif
-3
arch/unicore32/mm/proc-syms.c
··· 20 20 EXPORT_SYMBOL(cpu_set_pte); 21 21 22 22 EXPORT_SYMBOL(__cpuc_coherent_kern_range); 23 - 24 - EXPORT_SYMBOL(__cpuc_dma_flush_range); 25 - EXPORT_SYMBOL(__cpuc_dma_clean_range);
-7
arch/x86/include/asm/dma-mapping.h
··· 68 68 } 69 69 #endif /* CONFIG_X86_DMA_REMAP */ 70 70 71 - static inline void 72 - dma_cache_sync(struct device *dev, void *vaddr, size_t size, 73 - enum dma_data_direction dir) 74 - { 75 - flush_write_buffers(); 76 - } 77 - 78 71 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 79 72 gfp_t gfp) 80 73 {
-3
arch/xtensa/include/asm/dma-mapping.h
··· 23 23 return &xtensa_dma_map_ops; 24 24 } 25 25 26 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 27 - enum dma_data_direction direction); 28 - 29 26 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 30 27 { 31 28 return (dma_addr_t)paddr;
-23
arch/xtensa/kernel/pci-dma.c
··· 26 26 #include <asm/cacheflush.h> 27 27 #include <asm/io.h> 28 28 29 - void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 30 - enum dma_data_direction dir) 31 - { 32 - switch (dir) { 33 - case DMA_BIDIRECTIONAL: 34 - __flush_invalidate_dcache_range((unsigned long)vaddr, size); 35 - break; 36 - 37 - case DMA_FROM_DEVICE: 38 - __invalidate_dcache_range((unsigned long)vaddr, size); 39 - break; 40 - 41 - case DMA_TO_DEVICE: 42 - __flush_dcache_range((unsigned long)vaddr, size); 43 - break; 44 - 45 - case DMA_NONE: 46 - BUG(); 47 - break; 48 - } 49 - } 50 - EXPORT_SYMBOL(dma_cache_sync); 51 - 52 29 static void do_cache_op(dma_addr_t dma_handle, size_t size, 53 30 void (*fn)(unsigned long, unsigned long)) 54 31 {
+1
drivers/amba/bus.c
··· 195 195 .match = amba_match, 196 196 .uevent = amba_uevent, 197 197 .pm = &amba_pm, 198 + .force_dma = true, 198 199 }; 199 200 200 201 static int __init amba_init(void)
+1
drivers/base/platform.c
··· 1143 1143 .match = platform_match, 1144 1144 .uevent = platform_uevent, 1145 1145 .pm = &platform_dev_pm_ops, 1146 + .force_dma = true, 1146 1147 }; 1147 1148 EXPORT_SYMBOL_GPL(platform_bus_type); 1148 1149
+4
drivers/block/floppy.c
··· 275 275 #define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL, get_order(size)) 276 276 #endif 277 277 278 + #ifndef fd_cacheflush 279 + #define fd_cacheflush(addr, size) /* nothing... */ 280 + #endif 281 + 278 282 static inline void fallback_on_nodma_alloc(char **addr, size_t l) 279 283 { 280 284 #ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
+1
drivers/gpu/host1x/bus.c
··· 320 320 .name = "host1x", 321 321 .match = host1x_device_match, 322 322 .pm = &host1x_device_pm_ops, 323 + .force_dma = true, 323 324 }; 324 325 325 326 static void __host1x_device_del(struct host1x_device *device)
+1 -7
drivers/of/device.c
··· 9 9 #include <linux/module.h> 10 10 #include <linux/mod_devicetable.h> 11 11 #include <linux/slab.h> 12 - #include <linux/pci.h> 13 12 #include <linux/platform_device.h> 14 - #include <linux/amba/bus.h> 15 13 16 14 #include <asm/errno.h> 17 15 #include "of_private.h" ··· 99 101 * DMA configuration regardless of whether "dma-ranges" is 100 102 * correctly specified or not. 101 103 */ 102 - if (!dev_is_pci(dev) && 103 - #ifdef CONFIG_ARM_AMBA 104 - dev->bus != &amba_bustype && 105 - #endif 106 - dev->bus != &platform_bus_type) 104 + if (!dev->bus->force_dma) 107 105 return ret == -ENODEV ? 0 : ret; 108 106 109 107 dma_addr = offset = 0;
+1
drivers/pci/pci-driver.c
··· 1516 1516 .drv_groups = pci_drv_groups, 1517 1517 .pm = PCI_PM_OPS_PTR, 1518 1518 .num_vf = pci_bus_num_vf, 1519 + .force_dma = true, 1519 1520 }; 1520 1521 EXPORT_SYMBOL(pci_bus_type); 1521 1522
+2 -3
drivers/sh/maple/maple.c
··· 300 300 mutex_unlock(&maple_wlist_lock); 301 301 if (maple_packets > 0) { 302 302 for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++) 303 - dma_cache_sync(0, maple_sendbuf + i * PAGE_SIZE, 303 + sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE, 304 304 PAGE_SIZE, DMA_BIDIRECTIONAL); 305 305 } 306 306 ··· 642 642 list_for_each_entry_safe(mq, nmq, &maple_sentq, list) { 643 643 mdev = mq->dev; 644 644 recvbuf = mq->recvbuf->buf; 645 - dma_cache_sync(&mdev->dev, recvbuf, 0x400, 646 - DMA_FROM_DEVICE); 645 + sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE); 647 646 code = recvbuf[0]; 648 647 kfree(mq->sendbuf); 649 648 list_del_init(&mq->list);
+4
include/linux/device.h
··· 97 97 * @p: The private data of the driver core, only the driver core can 98 98 * touch this. 99 99 * @lock_key: Lock class key for use by the lock validator 100 + * @force_dma: Assume devices on this bus should be set up by dma_configure() 101 + * even if DMA capability is not explicitly described by firmware. 100 102 * 101 103 * A bus is a channel between the processor and one or more devices. For the 102 104 * purposes of the device model, all devices are connected via a bus, even if ··· 137 135 138 136 struct subsys_private *p; 139 137 struct lock_class_key lock_key; 138 + 139 + bool force_dma; 140 140 }; 141 141 142 142 extern int __must_check bus_register(struct bus_type *bus);
+13
include/linux/dma-mapping.h
··· 127 127 void (*sync_sg_for_device)(struct device *dev, 128 128 struct scatterlist *sg, int nents, 129 129 enum dma_data_direction dir); 130 + void (*cache_sync)(struct device *dev, void *vaddr, size_t size, 131 + enum dma_data_direction direction); 130 132 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); 131 133 int (*dma_supported)(struct device *dev, u64 mask); 132 134 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK ··· 438 436 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 439 437 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) 440 438 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) 439 + 440 + static inline void 441 + dma_cache_sync(struct device *dev, void *vaddr, size_t size, 442 + enum dma_data_direction dir) 443 + { 444 + const struct dma_map_ops *ops = get_dma_ops(dev); 445 + 446 + BUG_ON(!valid_dma_direction(dir)); 447 + if (ops->cache_sync) 448 + ops->cache_sync(dev, vaddr, size, dir); 449 + } 441 450 442 451 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 443 452 void *cpu_addr, dma_addr_t dma_addr, size_t size);