Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: consolidate dma_set_mask

Almost everyone implements dma_set_mask the same way, although some time
that's hidden in ->set_dma_mask methods.

This patch consolidates those into a common implementation that either
calls ->set_dma_mask if present or otherwise uses the default
implementation. Some architectures used to only call ->set_dma_mask
after the initial checks, and those instance have been fixed to do the
full work. h8300 implemented dma_set_mask bogusly as a no-ops and has
been fixed.

Unfortunately some architectures overload unrelated semantics like changing
the dma_ops into it so we still need to allow for an architecture override
for now.

[jcmvbkbc@gmail.com: fix xtensa]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Linus Torvalds
452e06af ee196371

+28 -169
-5
arch/alpha/include/asm/dma-mapping.h
··· 12 12 13 13 #include <asm-generic/dma-mapping-common.h> 14 14 15 - static inline int dma_set_mask(struct device *dev, u64 mask) 16 - { 17 - return get_dma_ops(dev)->set_dma_mask(dev, mask); 18 - } 19 - 20 15 #define dma_cache_sync(dev, va, size, dir) ((void)0) 21 16 22 17 #endif /* _ALPHA_DMA_MAPPING_H */
-10
arch/alpha/kernel/pci-noop.c
··· 166 166 return mask < 0x00ffffffUL ? 0 : 1; 167 167 } 168 168 169 - static int alpha_noop_set_mask(struct device *dev, u64 mask) 170 - { 171 - if (!dev->dma_mask || !dma_supported(dev, mask)) 172 - return -EIO; 173 - 174 - *dev->dma_mask = mask; 175 - return 0; 176 - } 177 - 178 169 struct dma_map_ops alpha_noop_ops = { 179 170 .alloc = alpha_noop_alloc_coherent, 180 171 .free = alpha_noop_free_coherent, ··· 173 182 .map_sg = alpha_noop_map_sg, 174 183 .mapping_error = alpha_noop_mapping_error, 175 184 .dma_supported = alpha_noop_supported, 176 - .set_dma_mask = alpha_noop_set_mask, 177 185 }; 178 186 179 187 struct dma_map_ops *dma_ops = &alpha_noop_ops;
-11
arch/alpha/kernel/pci_iommu.c
··· 939 939 return dma_addr == 0; 940 940 } 941 941 942 - static int alpha_pci_set_mask(struct device *dev, u64 mask) 943 - { 944 - if (!dev->dma_mask || 945 - !pci_dma_supported(alpha_gendev_to_pci(dev), mask)) 946 - return -EIO; 947 - 948 - *dev->dma_mask = mask; 949 - return 0; 950 - } 951 - 952 942 struct dma_map_ops alpha_pci_ops = { 953 943 .alloc = alpha_pci_alloc_coherent, 954 944 .free = alpha_pci_free_coherent, ··· 948 958 .unmap_sg = alpha_pci_unmap_sg, 949 959 .mapping_error = alpha_pci_mapping_error, 950 960 .dma_supported = alpha_pci_supported, 951 - .set_dma_mask = alpha_pci_set_mask, 952 961 }; 953 962 954 963 struct dma_map_ops *dma_ops = &alpha_pci_ops;
-5
arch/arm/include/asm/dma-mapping.h
··· 48 48 */ 49 49 #include <asm-generic/dma-mapping-common.h> 50 50 51 - static inline int dma_set_mask(struct device *dev, u64 mask) 52 - { 53 - return get_dma_ops(dev)->set_dma_mask(dev, mask); 54 - } 55 - 56 51 #ifdef __arch_page_to_dma 57 52 #error Please update to __arch_pfn_to_dma 58 53 #endif
-9
arch/arm64/include/asm/dma-mapping.h
··· 84 84 return (phys_addr_t)dev_addr; 85 85 } 86 86 87 - static inline int dma_set_mask(struct device *dev, u64 mask) 88 - { 89 - if (!dev->dma_mask || !dma_supported(dev, mask)) 90 - return -EIO; 91 - *dev->dma_mask = mask; 92 - 93 - return 0; 94 - } 95 - 96 87 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 97 88 { 98 89 if (!dev->dma_mask)
-5
arch/h8300/include/asm/dma-mapping.h
··· 10 10 11 11 #include <asm-generic/dma-mapping-common.h> 12 12 13 - static inline int dma_set_mask(struct device *dev, u64 mask) 14 - { 15 - return 0; 16 - } 17 - 18 13 #endif
-1
arch/hexagon/include/asm/dma-mapping.h
··· 45 45 46 46 #define HAVE_ARCH_DMA_SUPPORTED 1 47 47 extern int dma_supported(struct device *dev, u64 mask); 48 - extern int dma_set_mask(struct device *dev, u64 mask); 49 48 extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle); 50 49 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 51 50 enum dma_data_direction direction);
-11
arch/hexagon/kernel/dma.c
··· 44 44 } 45 45 EXPORT_SYMBOL(dma_supported); 46 46 47 - int dma_set_mask(struct device *dev, u64 mask) 48 - { 49 - if (!dev->dma_mask || !dma_supported(dev, mask)) 50 - return -EIO; 51 - 52 - *dev->dma_mask = mask; 53 - 54 - return 0; 55 - } 56 - EXPORT_SYMBOL(dma_set_mask); 57 - 58 47 static struct gen_pool *coherent_pool; 59 48 60 49
-9
arch/ia64/include/asm/dma-mapping.h
··· 27 27 28 28 #include <asm-generic/dma-mapping-common.h> 29 29 30 - static inline int 31 - dma_set_mask (struct device *dev, u64 mask) 32 - { 33 - if (!dev->dma_mask || !dma_supported(dev, mask)) 34 - return -EIO; 35 - *dev->dma_mask = mask; 36 - return 0; 37 - } 38 - 39 30 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 40 31 { 41 32 if (!dev->dma_mask)
-14
arch/microblaze/include/asm/dma-mapping.h
··· 46 46 47 47 #include <asm-generic/dma-mapping-common.h> 48 48 49 - static inline int dma_set_mask(struct device *dev, u64 dma_mask) 50 - { 51 - struct dma_map_ops *ops = get_dma_ops(dev); 52 - 53 - if (unlikely(ops == NULL)) 54 - return -EIO; 55 - if (ops->set_dma_mask) 56 - return ops->set_dma_mask(dev, dma_mask); 57 - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 58 - return -EIO; 59 - *dev->dma_mask = dma_mask; 60 - return 0; 61 - } 62 - 63 49 static inline void __dma_sync(unsigned long paddr, 64 50 size_t size, enum dma_data_direction direction) 65 51 {
-16
arch/mips/include/asm/dma-mapping.h
··· 31 31 32 32 #include <asm-generic/dma-mapping-common.h> 33 33 34 - static inline int 35 - dma_set_mask(struct device *dev, u64 mask) 36 - { 37 - struct dma_map_ops *ops = get_dma_ops(dev); 38 - 39 - if(!dev->dma_mask || !dma_supported(dev, mask)) 40 - return -EIO; 41 - 42 - if (ops->set_dma_mask) 43 - return ops->set_dma_mask(dev, mask); 44 - 45 - *dev->dma_mask = mask; 46 - 47 - return 0; 48 - } 49 - 50 34 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 51 35 enum dma_data_direction direction); 52 36
+3
arch/mips/loongson64/common/dma-swiotlb.c
··· 85 85 86 86 static int loongson_dma_set_mask(struct device *dev, u64 mask) 87 87 { 88 + if (!dev->dma_mask || !dma_supported(dev, mask)) 89 + return -EIO; 90 + 88 91 if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) { 89 92 *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits); 90 93 return -EIO;
-9
arch/openrisc/include/asm/dma-mapping.h
··· 44 44 45 45 #include <asm-generic/dma-mapping-common.h> 46 46 47 - static inline int dma_set_mask(struct device *dev, u64 dma_mask) 48 - { 49 - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 50 - return -EIO; 51 - 52 - *dev->dma_mask = dma_mask; 53 - 54 - return 0; 55 - } 56 47 #endif /* __ASM_OPENRISC_DMA_MAPPING_H */
+3 -1
arch/powerpc/include/asm/dma-mapping.h
··· 122 122 /* this will be removed soon */ 123 123 #define flush_write_buffers() 124 124 125 + #define HAVE_ARCH_DMA_SET_MASK 1 126 + extern int dma_set_mask(struct device *dev, u64 dma_mask); 127 + 125 128 #include <asm-generic/dma-mapping-common.h> 126 129 127 - extern int dma_set_mask(struct device *dev, u64 dma_mask); 128 130 extern int __dma_set_mask(struct device *dev, u64 dma_mask); 129 131 extern u64 __dma_get_required_mask(struct device *dev); 130 132
-2
arch/s390/include/asm/dma-mapping.h
··· 18 18 return &s390_dma_ops; 19 19 } 20 20 21 - extern int dma_set_mask(struct device *dev, u64 mask); 22 - 23 21 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 24 22 enum dma_data_direction direction) 25 23 {
-10
arch/s390/pci/pci_dma.c
··· 262 262 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 263 263 } 264 264 265 - int dma_set_mask(struct device *dev, u64 mask) 266 - { 267 - if (!dev->dma_mask || !dma_supported(dev, mask)) 268 - return -EIO; 269 - 270 - *dev->dma_mask = mask; 271 - return 0; 272 - } 273 - EXPORT_SYMBOL_GPL(dma_set_mask); 274 - 275 265 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 276 266 unsigned long offset, size_t size, 277 267 enum dma_data_direction direction,
-14
arch/sh/include/asm/dma-mapping.h
··· 13 13 14 14 #include <asm-generic/dma-mapping-common.h> 15 15 16 - static inline int dma_set_mask(struct device *dev, u64 mask) 17 - { 18 - struct dma_map_ops *ops = get_dma_ops(dev); 19 - 20 - if (!dev->dma_mask || !dma_supported(dev, mask)) 21 - return -EIO; 22 - if (ops->set_dma_mask) 23 - return ops->set_dma_mask(dev, mask); 24 - 25 - *dev->dma_mask = mask; 26 - 27 - return 0; 28 - } 29 - 30 16 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 31 17 enum dma_data_direction dir); 32 18
+3 -1
arch/sparc/include/asm/dma-mapping.h
··· 37 37 return dma_ops; 38 38 } 39 39 40 - #include <asm-generic/dma-mapping-common.h> 40 + #define HAVE_ARCH_DMA_SET_MASK 1 41 41 42 42 static inline int dma_set_mask(struct device *dev, u64 mask) 43 43 { ··· 51 51 #endif 52 52 return -EINVAL; 53 53 } 54 + 55 + #include <asm-generic/dma-mapping-common.h> 54 56 55 57 #endif
+4 -2
arch/tile/include/asm/dma-mapping.h
··· 59 59 60 60 static inline void dma_mark_clean(void *addr, size_t size) {} 61 61 62 - #include <asm-generic/dma-mapping-common.h> 63 - 64 62 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 65 63 { 66 64 dev->archdata.dma_ops = ops; ··· 71 73 72 74 return addr + size - 1 <= *dev->dma_mask; 73 75 } 76 + 77 + #define HAVE_ARCH_DMA_SET_MASK 1 78 + 79 + #include <asm-generic/dma-mapping-common.h> 74 80 75 81 static inline int 76 82 dma_set_mask(struct device *dev, u64 mask)
-10
arch/unicore32/include/asm/dma-mapping.h
··· 50 50 51 51 static inline void dma_mark_clean(void *addr, size_t size) {} 52 52 53 - static inline int dma_set_mask(struct device *dev, u64 dma_mask) 54 - { 55 - if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 56 - return -EIO; 57 - 58 - *dev->dma_mask = dma_mask; 59 - 60 - return 0; 61 - } 62 - 63 53 static inline void dma_cache_sync(struct device *dev, void *vaddr, 64 54 size_t size, enum dma_data_direction direction) 65 55 {
-2
arch/x86/include/asm/dma-mapping.h
··· 48 48 49 49 #include <asm-generic/dma-mapping-common.h> 50 50 51 - extern int dma_set_mask(struct device *dev, u64 mask); 52 - 53 51 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 54 52 dma_addr_t *dma_addr, gfp_t flag, 55 53 struct dma_attrs *attrs);
-11
arch/x86/kernel/pci-dma.c
··· 58 58 /* Number of entries preallocated for DMA-API debugging */ 59 59 #define PREALLOC_DMA_DEBUG_ENTRIES 65536 60 60 61 - int dma_set_mask(struct device *dev, u64 mask) 62 - { 63 - if (!dev->dma_mask || !dma_supported(dev, mask)) 64 - return -EIO; 65 - 66 - *dev->dma_mask = mask; 67 - 68 - return 0; 69 - } 70 - EXPORT_SYMBOL(dma_set_mask); 71 - 72 61 void __init pci_iommu_alloc(void) 73 62 { 74 63 struct iommu_table_entry *p;
-11
arch/xtensa/include/asm/dma-mapping.h
··· 32 32 33 33 #include <asm-generic/dma-mapping-common.h> 34 34 35 - static inline int 36 - dma_set_mask(struct device *dev, u64 mask) 37 - { 38 - if(!dev->dma_mask || !dma_supported(dev, mask)) 39 - return -EIO; 40 - 41 - *dev->dma_mask = mask; 42 - 43 - return 0; 44 - } 45 - 46 35 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 47 36 enum dma_data_direction direction); 48 37
+15
include/asm-generic/dma-mapping-common.h
··· 340 340 } 341 341 #endif 342 342 343 + #ifndef HAVE_ARCH_DMA_SET_MASK 344 + static inline int dma_set_mask(struct device *dev, u64 mask) 345 + { 346 + struct dma_map_ops *ops = get_dma_ops(dev); 347 + 348 + if (ops->set_dma_mask) 349 + return ops->set_dma_mask(dev, mask); 350 + 351 + if (!dev->dma_mask || !dma_supported(dev, mask)) 352 + return -EIO; 353 + *dev->dma_mask = mask; 354 + return 0; 355 + } 356 + #endif 357 + 343 358 #endif