Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: consolidate dma_supported

Most architectures just call into ->dma_supported, but some also return 1
if the method is not present, or 0 if no dma ops are present (although
that should never happeb). Consolidate this more broad version into
common code.

Also fix h8300 which inorrectly always returned 0, which would have been
a problem if it's dma_set_mask implementation wasn't a similarly buggy
noop.

As a few architectures have much more elaborate implementations, we
still allow for arch overrides.

[jcmvbkbc@gmail.com: fix xtensa]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Linus Torvalds
ee196371 efa21e43

+25 -98
-5
arch/alpha/include/asm/dma-mapping.h
··· 12 12 13 13 #include <asm-generic/dma-mapping-common.h> 14 14 15 - static inline int dma_supported(struct device *dev, u64 mask) 16 - { 17 - return get_dma_ops(dev)->dma_supported(dev, mask); 18 - } 19 - 20 15 static inline int dma_set_mask(struct device *dev, u64 mask) 21 16 { 22 17 return get_dma_ops(dev)->set_dma_mask(dev, mask);
+3 -3
arch/arm/include/asm/dma-mapping.h
··· 38 38 dev->archdata.dma_ops = ops; 39 39 } 40 40 41 + #define HAVE_ARCH_DMA_SUPPORTED 1 42 + extern int dma_supported(struct device *dev, u64 mask); 43 + 41 44 /* 42 45 * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent 43 46 * implementations, we don't provide a dma_cache_sync function so drivers using 44 47 * this API are highlighted with build warnings. 45 48 */ 46 - 47 49 #include <asm-generic/dma-mapping-common.h> 48 50 49 51 static inline int dma_set_mask(struct device *dev, u64 mask) ··· 173 171 } 174 172 175 173 static inline void dma_mark_clean(void *addr, size_t size) { } 176 - 177 - extern int dma_supported(struct device *dev, u64 mask); 178 174 179 175 extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); 180 176
-6
arch/arm64/include/asm/dma-mapping.h
··· 84 84 return (phys_addr_t)dev_addr; 85 85 } 86 86 87 - static inline int dma_supported(struct device *dev, u64 mask) 88 - { 89 - struct dma_map_ops *ops = get_dma_ops(dev); 90 - return ops->dma_supported(dev, mask); 91 - } 92 - 93 87 static inline int dma_set_mask(struct device *dev, u64 mask) 94 88 { 95 89 if (!dev->dma_mask || !dma_supported(dev, mask))
-5
arch/h8300/include/asm/dma-mapping.h
··· 10 10 11 11 #include <asm-generic/dma-mapping-common.h> 12 12 13 - static inline int dma_supported(struct device *dev, u64 mask) 14 - { 15 - return 0; 16 - } 17 - 18 13 static inline int dma_set_mask(struct device *dev, u64 mask) 19 14 { 20 15 return 0;
+1
arch/hexagon/include/asm/dma-mapping.h
··· 43 43 return dma_ops; 44 44 } 45 45 46 + #define HAVE_ARCH_DMA_SUPPORTED 1 46 47 extern int dma_supported(struct device *dev, u64 mask); 47 48 extern int dma_set_mask(struct device *dev, u64 mask); 48 49 extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
-6
arch/ia64/include/asm/dma-mapping.h
··· 27 27 28 28 #include <asm-generic/dma-mapping-common.h> 29 29 30 - static inline int dma_supported(struct device *dev, u64 mask) 31 - { 32 - struct dma_map_ops *ops = platform_dma_get_ops(dev); 33 - return ops->dma_supported(dev, mask); 34 - } 35 - 36 30 static inline int 37 31 dma_set_mask (struct device *dev, u64 mask) 38 32 {
+1 -12
arch/microblaze/include/asm/dma-mapping.h
··· 44 44 return &dma_direct_ops; 45 45 } 46 46 47 - static inline int dma_supported(struct device *dev, u64 mask) 48 - { 49 - struct dma_map_ops *ops = get_dma_ops(dev); 50 - 51 - if (unlikely(!ops)) 52 - return 0; 53 - if (!ops->dma_supported) 54 - return 1; 55 - return ops->dma_supported(dev, mask); 56 - } 47 + #include <asm-generic/dma-mapping-common.h> 57 48 58 49 static inline int dma_set_mask(struct device *dev, u64 dma_mask) 59 50 { ··· 59 68 *dev->dma_mask = dma_mask; 60 69 return 0; 61 70 } 62 - 63 - #include <asm-generic/dma-mapping-common.h> 64 71 65 72 static inline void __dma_sync(unsigned long paddr, 66 73 size_t size, enum dma_data_direction direction)
-6
arch/mips/include/asm/dma-mapping.h
··· 31 31 32 32 #include <asm-generic/dma-mapping-common.h> 33 33 34 - static inline int dma_supported(struct device *dev, u64 mask) 35 - { 36 - struct dma_map_ops *ops = get_dma_ops(dev); 37 - return ops->dma_supported(dev, mask); 38 - } 39 - 40 34 static inline int 41 35 dma_set_mask(struct device *dev, u64 mask) 42 36 {
+3 -2
arch/openrisc/include/asm/dma-mapping.h
··· 35 35 return &or1k_dma_map_ops; 36 36 } 37 37 38 - #include <asm-generic/dma-mapping-common.h> 39 - 38 + #define HAVE_ARCH_DMA_SUPPORTED 1 40 39 static inline int dma_supported(struct device *dev, u64 dma_mask) 41 40 { 42 41 /* Support 32 bit DMA mask exclusively */ 43 42 return dma_mask == DMA_BIT_MASK(32); 44 43 } 44 + 45 + #include <asm-generic/dma-mapping-common.h> 45 46 46 47 static inline int dma_set_mask(struct device *dev, u64 dma_mask) 47 48 {
-11
arch/powerpc/include/asm/dma-mapping.h
··· 124 124 125 125 #include <asm-generic/dma-mapping-common.h> 126 126 127 - static inline int dma_supported(struct device *dev, u64 mask) 128 - { 129 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 130 - 131 - if (unlikely(dma_ops == NULL)) 132 - return 0; 133 - if (dma_ops->dma_supported == NULL) 134 - return 1; 135 - return dma_ops->dma_supported(dev, mask); 136 - } 137 - 138 127 extern int dma_set_mask(struct device *dev, u64 dma_mask); 139 128 extern int __dma_set_mask(struct device *dev, u64 dma_mask); 140 129 extern u64 __dma_get_required_mask(struct device *dev);
-9
arch/s390/include/asm/dma-mapping.h
··· 27 27 28 28 #include <asm-generic/dma-mapping-common.h> 29 29 30 - static inline int dma_supported(struct device *dev, u64 mask) 31 - { 32 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 33 - 34 - if (dma_ops->dma_supported == NULL) 35 - return 1; 36 - return dma_ops->dma_supported(dev, mask); 37 - } 38 - 39 30 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 40 31 { 41 32 if (!dev->dma_mask)
-10
arch/sh/include/asm/dma-mapping.h
··· 13 13 14 14 #include <asm-generic/dma-mapping-common.h> 15 15 16 - static inline int dma_supported(struct device *dev, u64 mask) 17 - { 18 - struct dma_map_ops *ops = get_dma_ops(dev); 19 - 20 - if (ops->dma_supported) 21 - return ops->dma_supported(dev, mask); 22 - 23 - return 1; 24 - } 25 - 26 16 static inline int dma_set_mask(struct device *dev, u64 mask) 27 17 { 28 18 struct dma_map_ops *ops = get_dma_ops(dev);
+1
arch/sparc/include/asm/dma-mapping.h
··· 7 7 8 8 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 9 9 10 + #define HAVE_ARCH_DMA_SUPPORTED 1 10 11 int dma_supported(struct device *dev, u64 mask); 11 12 12 13 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
-6
arch/tile/include/asm/dma-mapping.h
··· 75 75 } 76 76 77 77 static inline int 78 - dma_supported(struct device *dev, u64 mask) 79 - { 80 - return get_dma_ops(dev)->dma_supported(dev, mask); 81 - } 82 - 83 - static inline int 84 78 dma_set_mask(struct device *dev, u64 mask) 85 79 { 86 80 struct dma_map_ops *dma_ops = get_dma_ops(dev);
-10
arch/unicore32/include/asm/dma-mapping.h
··· 28 28 return &swiotlb_dma_map_ops; 29 29 } 30 30 31 - static inline int dma_supported(struct device *dev, u64 mask) 32 - { 33 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 34 - 35 - if (unlikely(dma_ops == NULL)) 36 - return 0; 37 - 38 - return dma_ops->dma_supported(dev, mask); 39 - } 40 - 41 31 #include <asm-generic/dma-mapping-common.h> 42 32 43 33 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+3 -1
arch/x86/include/asm/dma-mapping.h
··· 43 43 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); 44 44 #define arch_dma_alloc_attrs arch_dma_alloc_attrs 45 45 46 + #define HAVE_ARCH_DMA_SUPPORTED 1 47 + extern int dma_supported(struct device *hwdev, u64 mask); 48 + 46 49 #include <asm-generic/dma-mapping-common.h> 47 50 48 - extern int dma_supported(struct device *hwdev, u64 mask); 49 51 extern int dma_set_mask(struct device *dev, u64 mask); 50 52 51 53 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
-6
arch/xtensa/include/asm/dma-mapping.h
··· 33 33 #include <asm-generic/dma-mapping-common.h> 34 34 35 35 static inline int 36 - dma_supported(struct device *dev, u64 mask) 37 - { 38 - return 1; 39 - } 40 - 41 - static inline int 42 36 dma_set_mask(struct device *dev, u64 mask) 43 37 { 44 38 if(!dev->dma_mask || !dma_supported(dev, mask))
+13
include/asm-generic/dma-mapping-common.h
··· 327 327 #endif 328 328 } 329 329 330 + #ifndef HAVE_ARCH_DMA_SUPPORTED 331 + static inline int dma_supported(struct device *dev, u64 mask) 332 + { 333 + struct dma_map_ops *ops = get_dma_ops(dev); 334 + 335 + if (!ops) 336 + return 0; 337 + if (!ops->dma_supported) 338 + return 1; 339 + return ops->dma_supported(dev, mask); 340 + } 341 + #endif 342 + 330 343 #endif