Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping

Pull dma mapping updates from Christoph Hellwig:
"Except for a runtime warning fix from Christian this is all about
consolidation of the generic no-IOMMU code, a well as the glue code
for swiotlb.

All the code is based on the x86 implementation with hooks to allow
all architectures that aren't cache coherent to use it.

The x86 conversion itself has been deferred because the x86
maintainers were a little busy in the last months"

* tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping: (57 commits)
MAINTAINERS: add the iommu list for swiotlb and xen-swiotlb
arm64: use swiotlb_alloc and swiotlb_free
arm64: replace ZONE_DMA with ZONE_DMA32
mips: use swiotlb_{alloc,free}
mips/netlogic: remove swiotlb support
tile: use generic swiotlb_ops
tile: replace ZONE_DMA with ZONE_DMA32
unicore32: use generic swiotlb_ops
ia64: remove an ifdef around the content of pci-dma.c
ia64: clean up swiotlb support
ia64: use generic swiotlb_ops
ia64: replace ZONE_DMA with ZONE_DMA32
swiotlb: remove various exports
swiotlb: refactor coherent buffer allocation
swiotlb: refactor coherent buffer freeing
swiotlb: wire up ->dma_supported in swiotlb_dma_ops
swiotlb: add common swiotlb_map_ops
swiotlb: rename swiotlb_free to swiotlb_exit
x86: rename swiotlb_dma_ops
powerpc: rename swiotlb_dma_ops
...

+731 -1269
+5 -2
MAINTAINERS
··· 4343 4343 W: http://git.infradead.org/users/hch/dma-mapping.git 4344 4344 S: Supported 4345 4345 F: lib/dma-debug.c 4346 - F: lib/dma-noop.c 4346 + F: lib/dma-direct.c 4347 4347 F: lib/dma-virt.c 4348 4348 F: drivers/base/dma-mapping.c 4349 4349 F: drivers/base/dma-coherent.c 4350 + F: include/asm-generic/dma-mapping.h 4351 + F: include/linux/dma-direct.h 4350 4352 F: include/linux/dma-mapping.h 4351 4353 4352 4354 DME1737 HARDWARE MONITOR DRIVER ··· 13073 13071 13074 13072 SWIOTLB SUBSYSTEM 13075 13073 M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 13076 - L: linux-kernel@vger.kernel.org 13074 + L: iommu@lists.linux-foundation.org 13077 13075 T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git 13078 13076 S: Supported 13079 13077 F: lib/swiotlb.c ··· 15028 15026 XEN SWIOTLB SUBSYSTEM 15029 15027 M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 15030 15028 L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 15029 + L: iommu@lists.linux-foundation.org 15031 15030 S: Supported 15032 15031 F: arch/x86/xen/*swiotlb* 15033 15032 F: drivers/xen/*swiotlb*
+4
arch/Kconfig
··· 938 938 and non-text memory will be made non-executable. This provides 939 939 protection against certain security exploits (e.g. writing to text) 940 940 941 + # select if the architecture provides an asm/dma-direct.h header 942 + config ARCH_HAS_PHYS_TO_DMA 943 + bool 944 + 941 945 config ARCH_HAS_REFCOUNT 942 946 bool 943 947 help
+1
arch/alpha/Kconfig
··· 209 209 210 210 config ALPHA_JENSEN 211 211 bool "Jensen" 212 + depends on BROKEN 212 213 help 213 214 DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one 214 215 of the first-generation Alpha systems. A number of these systems
-3
arch/arc/Kconfig
··· 463 463 config ARCH_DMA_ADDR_T_64BIT 464 464 bool 465 465 466 - config ARC_PLAT_NEEDS_PHYS_TO_DMA 467 - bool 468 - 469 466 config ARC_KVADDR_SIZE 470 467 int "Kernel Virtual Address Space size (MB)" 471 468 range 0 512
-7
arch/arc/include/asm/dma-mapping.h
··· 11 11 #ifndef ASM_ARC_DMA_MAPPING_H 12 12 #define ASM_ARC_DMA_MAPPING_H 13 13 14 - #ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA 15 - #define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle)) 16 - #define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr)) 17 - #else 18 - #include <plat/dma.h> 19 - #endif 20 - 21 14 extern const struct dma_map_ops arc_dma_ops; 22 15 23 16 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+7 -7
arch/arc/mm/dma.c
··· 60 60 /* This is linear addr (0x8000_0000 based) */ 61 61 paddr = page_to_phys(page); 62 62 63 - *dma_handle = plat_phys_to_dma(dev, paddr); 63 + *dma_handle = paddr; 64 64 65 65 /* This is kernel Virtual address (0x7000_0000 based) */ 66 66 if (need_kvaddr) { ··· 92 92 static void arc_dma_free(struct device *dev, size_t size, void *vaddr, 93 93 dma_addr_t dma_handle, unsigned long attrs) 94 94 { 95 - phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); 95 + phys_addr_t paddr = dma_handle; 96 96 struct page *page = virt_to_page(paddr); 97 97 int is_non_coh = 1; 98 98 ··· 111 111 { 112 112 unsigned long user_count = vma_pages(vma); 113 113 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 114 - unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr)); 114 + unsigned long pfn = __phys_to_pfn(dma_addr); 115 115 unsigned long off = vma->vm_pgoff; 116 116 int ret = -ENXIO; 117 117 ··· 175 175 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 176 176 _dma_cache_sync(paddr, size, dir); 177 177 178 - return plat_phys_to_dma(dev, paddr); 178 + return paddr; 179 179 } 180 180 181 181 /* ··· 190 190 size_t size, enum dma_data_direction dir, 191 191 unsigned long attrs) 192 192 { 193 - phys_addr_t paddr = plat_dma_to_phys(dev, handle); 193 + phys_addr_t paddr = handle; 194 194 195 195 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 196 196 _dma_cache_sync(paddr, size, dir); ··· 224 224 static void arc_dma_sync_single_for_cpu(struct device *dev, 225 225 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 226 226 { 227 - _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE); 227 + _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE); 228 228 } 229 229 230 230 static void arc_dma_sync_single_for_device(struct device *dev, 231 231 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 232 232 { 233 - _dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE); 233 + _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE); 234 234 } 235 235 236 236 static void arc_dma_sync_sg_for_cpu(struct device *dev,
+2 -1
arch/arm/Kconfig
··· 8 8 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 9 select ARCH_HAS_ELF_RANDOMIZE 10 10 select ARCH_HAS_SET_MEMORY 11 + select ARCH_HAS_PHYS_TO_DMA 11 12 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 12 13 select ARCH_HAS_STRICT_MODULE_RWX if MMU 13 14 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST ··· 25 24 select CLONE_BACKWARDS 26 25 select CPU_PM if (SUSPEND || CPU_IDLE) 27 26 select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS 28 - select DMA_NOOP_OPS if !MMU 27 + select DMA_DIRECT_OPS if !MMU 29 28 select EDAC_SUPPORT 30 29 select EDAC_ATOMIC_SCRUB 31 30 select GENERIC_ALLOCATOR
+36
arch/arm/include/asm/dma-direct.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef ASM_ARM_DMA_DIRECT_H 3 + #define ASM_ARM_DMA_DIRECT_H 1 4 + 5 + static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 6 + { 7 + unsigned int offset = paddr & ~PAGE_MASK; 8 + return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; 9 + } 10 + 11 + static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) 12 + { 13 + unsigned int offset = dev_addr & ~PAGE_MASK; 14 + return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; 15 + } 16 + 17 + static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 18 + { 19 + u64 limit, mask; 20 + 21 + if (!dev->dma_mask) 22 + return 0; 23 + 24 + mask = *dev->dma_mask; 25 + 26 + limit = (mask + 1) & ~mask; 27 + if (limit && size > limit) 28 + return 0; 29 + 30 + if ((addr | (addr + size - 1)) & ~mask) 31 + return 0; 32 + 33 + return 1; 34 + } 35 + 36 + #endif /* ASM_ARM_DMA_DIRECT_H */
+1 -34
arch/arm/include/asm/dma-mapping.h
··· 18 18 19 19 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 20 20 { 21 - return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops; 21 + return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops; 22 22 } 23 23 24 24 #ifdef __arch_page_to_dma ··· 108 108 { 109 109 return dev->archdata.dma_coherent; 110 110 } 111 - 112 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 113 - { 114 - unsigned int offset = paddr & ~PAGE_MASK; 115 - return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; 116 - } 117 - 118 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) 119 - { 120 - unsigned int offset = dev_addr & ~PAGE_MASK; 121 - return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; 122 - } 123 - 124 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 125 - { 126 - u64 limit, mask; 127 - 128 - if (!dev->dma_mask) 129 - return 0; 130 - 131 - mask = *dev->dma_mask; 132 - 133 - limit = (mask + 1) & ~mask; 134 - if (limit && size > limit) 135 - return 0; 136 - 137 - if ((addr | (addr + size - 1)) & ~mask) 138 - return 0; 139 - 140 - return 1; 141 - } 142 - 143 - static inline void dma_mark_clean(void *addr, size_t size) { } 144 111 145 112 /** 146 113 * arm_dma_alloc - allocate consistent memory for DMA
+5 -8
arch/arm/mm/dma-mapping-nommu.c
··· 11 11 12 12 #include <linux/export.h> 13 13 #include <linux/mm.h> 14 - #include <linux/dma-mapping.h> 14 + #include <linux/dma-direct.h> 15 15 #include <linux/scatterlist.h> 16 16 17 17 #include <asm/cachetype.h> ··· 22 22 #include "dma.h" 23 23 24 24 /* 25 - * dma_noop_ops is used if 25 + * dma_direct_ops is used if 26 26 * - MMU/MPU is off 27 27 * - cpu is v7m w/o cache support 28 28 * - device is coherent ··· 39 39 unsigned long attrs) 40 40 41 41 { 42 - const struct dma_map_ops *ops = &dma_noop_ops; 43 42 void *ret; 44 43 45 44 /* ··· 47 48 */ 48 49 49 50 if (attrs & DMA_ATTR_NON_CONSISTENT) 50 - return ops->alloc(dev, size, dma_handle, gfp, attrs); 51 + return dma_direct_alloc(dev, size, dma_handle, gfp, attrs); 51 52 52 53 ret = dma_alloc_from_global_coherent(size, dma_handle); 53 54 ··· 69 70 void *cpu_addr, dma_addr_t dma_addr, 70 71 unsigned long attrs) 71 72 { 72 - const struct dma_map_ops *ops = &dma_noop_ops; 73 - 74 73 if (attrs & DMA_ATTR_NON_CONSISTENT) { 75 - ops->free(dev, size, cpu_addr, dma_addr, attrs); 74 + dma_direct_free(dev, size, cpu_addr, dma_addr, attrs); 76 75 } else { 77 76 int ret = dma_release_from_global_coherent(get_order(size), 78 77 cpu_addr); ··· 210 213 211 214 static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) 212 215 { 213 - return coherent ? &dma_noop_ops : &arm_nommu_dma_ops; 216 + return coherent ? &dma_direct_ops : &arm_nommu_dma_ops; 214 217 } 215 218 216 219 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+2 -1
arch/arm64/Kconfig
··· 59 59 select COMMON_CLK 60 60 select CPU_PM if (SUSPEND || CPU_IDLE) 61 61 select DCACHE_WORD_ACCESS 62 + select DMA_DIRECT_OPS 62 63 select EDAC_SUPPORT 63 64 select FRAME_POINTER 64 65 select GENERIC_ALLOCATOR ··· 228 227 config GENERIC_CALIBRATE_DELAY 229 228 def_bool y 230 229 231 - config ZONE_DMA 230 + config ZONE_DMA32 232 231 def_bool y 233 232 234 233 config HAVE_GENERIC_GUP
-35
arch/arm64/include/asm/dma-mapping.h
··· 50 50 return dev->archdata.dma_coherent; 51 51 } 52 52 53 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 54 - { 55 - dma_addr_t dev_addr = (dma_addr_t)paddr; 56 - 57 - return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 58 - } 59 - 60 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) 61 - { 62 - phys_addr_t paddr = (phys_addr_t)dev_addr; 63 - 64 - return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 65 - } 66 - 67 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 68 - { 69 - if (!dev->dma_mask) 70 - return false; 71 - 72 - return addr + size - 1 <= *dev->dma_mask; 73 - } 74 - 75 - static inline void dma_mark_clean(void *addr, size_t size) 76 - { 77 - } 78 - 79 - /* Override for dma_max_pfn() */ 80 - static inline unsigned long dma_max_pfn(struct device *dev) 81 - { 82 - dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask; 83 - 84 - return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT; 85 - } 86 - #define dma_max_pfn(dev) dma_max_pfn(dev) 87 - 88 53 #endif /* __KERNEL__ */ 89 54 #endif /* __ASM_DMA_MAPPING_H */
+7 -47
arch/arm64/mm/dma-mapping.c
··· 24 24 #include <linux/export.h> 25 25 #include <linux/slab.h> 26 26 #include <linux/genalloc.h> 27 - #include <linux/dma-mapping.h> 27 + #include <linux/dma-direct.h> 28 28 #include <linux/dma-contiguous.h> 29 29 #include <linux/vmalloc.h> 30 30 #include <linux/swiotlb.h> ··· 91 91 return 1; 92 92 } 93 93 94 - static void *__dma_alloc_coherent(struct device *dev, size_t size, 95 - dma_addr_t *dma_handle, gfp_t flags, 96 - unsigned long attrs) 97 - { 98 - if (IS_ENABLED(CONFIG_ZONE_DMA) && 99 - dev->coherent_dma_mask <= DMA_BIT_MASK(32)) 100 - flags |= GFP_DMA; 101 - if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) { 102 - struct page *page; 103 - void *addr; 104 - 105 - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, 106 - get_order(size), flags); 107 - if (!page) 108 - return NULL; 109 - 110 - *dma_handle = phys_to_dma(dev, page_to_phys(page)); 111 - addr = page_address(page); 112 - memset(addr, 0, size); 113 - return addr; 114 - } else { 115 - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 116 - } 117 - } 118 - 119 - static void __dma_free_coherent(struct device *dev, size_t size, 120 - void *vaddr, dma_addr_t dma_handle, 121 - unsigned long attrs) 122 - { 123 - bool freed; 124 - phys_addr_t paddr = dma_to_phys(dev, dma_handle); 125 - 126 - 127 - freed = dma_release_from_contiguous(dev, 128 - phys_to_page(paddr), 129 - size >> PAGE_SHIFT); 130 - if (!freed) 131 - swiotlb_free_coherent(dev, size, vaddr, dma_handle); 132 - } 133 - 134 94 static void *__dma_alloc(struct device *dev, size_t size, 135 95 dma_addr_t *dma_handle, gfp_t flags, 136 96 unsigned long attrs) ··· 112 152 return addr; 113 153 } 114 154 115 - ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); 155 + ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs); 116 156 if (!ptr) 117 157 goto no_mem; 118 158 ··· 133 173 return coherent_ptr; 134 174 135 175 no_map: 136 - __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); 176 + swiotlb_free(dev, size, ptr, *dma_handle, attrs); 137 177 no_mem: 138 178 return NULL; 139 179 } ··· 151 191 return; 152 192 vunmap(vaddr); 153 193 } 154 - __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); 194 + swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs); 155 195 } 156 196 157 197 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, ··· 328 368 return 0; 329 369 } 330 370 331 - static const struct dma_map_ops swiotlb_dma_ops = { 371 + static const struct dma_map_ops arm64_swiotlb_dma_ops = { 332 372 .alloc = __dma_alloc, 333 373 .free = __dma_free, 334 374 .mmap = __swiotlb_mmap, ··· 357 397 page = dma_alloc_from_contiguous(NULL, nr_pages, 358 398 pool_size_order, GFP_KERNEL); 359 399 else 360 - page = alloc_pages(GFP_DMA, pool_size_order); 400 + page = alloc_pages(GFP_DMA32, pool_size_order); 361 401 362 402 if (page) { 363 403 int ret; ··· 883 923 const struct iommu_ops *iommu, bool coherent) 884 924 { 885 925 if (!dev->dma_ops) 886 - dev->dma_ops = &swiotlb_dma_ops; 926 + dev->dma_ops = &arm64_swiotlb_dma_ops; 887 927 888 928 dev->archdata.dma_coherent = coherent; 889 929 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+8 -8
arch/arm64/mm/init.c
··· 217 217 } 218 218 #endif /* CONFIG_CRASH_DUMP */ 219 219 /* 220 - * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It 220 + * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It 221 221 * currently assumes that for memory starting above 4G, 32-bit devices will 222 222 * use a DMA offset. 223 223 */ ··· 233 233 { 234 234 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; 235 235 236 - if (IS_ENABLED(CONFIG_ZONE_DMA)) 237 - max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys()); 236 + if (IS_ENABLED(CONFIG_ZONE_DMA32)) 237 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); 238 238 max_zone_pfns[ZONE_NORMAL] = max; 239 239 240 240 free_area_init_nodes(max_zone_pfns); ··· 251 251 memset(zone_size, 0, sizeof(zone_size)); 252 252 253 253 /* 4GB maximum for 32-bit only capable devices */ 254 - #ifdef CONFIG_ZONE_DMA 254 + #ifdef CONFIG_ZONE_DMA32 255 255 max_dma = PFN_DOWN(arm64_dma_phys_limit); 256 - zone_size[ZONE_DMA] = max_dma - min; 256 + zone_size[ZONE_DMA32] = max_dma - min; 257 257 #endif 258 258 zone_size[ZONE_NORMAL] = max - max_dma; 259 259 ··· 266 266 if (start >= max) 267 267 continue; 268 268 269 - #ifdef CONFIG_ZONE_DMA 269 + #ifdef CONFIG_ZONE_DMA32 270 270 if (start < max_dma) { 271 271 unsigned long dma_end = min(end, max_dma); 272 - zhole_size[ZONE_DMA] -= dma_end - start; 272 + zhole_size[ZONE_DMA32] -= dma_end - start; 273 273 } 274 274 #endif 275 275 if (end > max_dma) { ··· 470 470 early_init_fdt_scan_reserved_mem(); 471 471 472 472 /* 4GB maximum for 32-bit only capable devices */ 473 - if (IS_ENABLED(CONFIG_ZONE_DMA)) 473 + if (IS_ENABLED(CONFIG_ZONE_DMA32)) 474 474 arm64_dma_phys_limit = max_zone_dma_phys(); 475 475 else 476 476 arm64_dma_phys_limit = PHYS_MASK + 1;
+4
arch/cris/Kconfig
··· 33 33 config NO_IOPORT_MAP 34 34 def_bool y if !PCI 35 35 36 + config NO_DMA 37 + def_bool y if !PCI 38 + 36 39 config FORCE_MAX_ZONEORDER 37 40 int 38 41 default 6 ··· 75 72 select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32 76 73 select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32 77 74 select HAVE_NMI 75 + select DMA_DIRECT_OPS if PCI 78 76 79 77 config HZ 80 78 int
+1 -1
arch/cris/arch-v32/drivers/pci/Makefile
··· 2 2 # Makefile for Etrax cardbus driver 3 3 # 4 4 5 - obj-$(CONFIG_ETRAX_CARDBUS) += bios.o dma.o 5 + obj-$(CONFIG_ETRAX_CARDBUS) += bios.o
-80
arch/cris/arch-v32/drivers/pci/dma.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * Dynamic DMA mapping support. 4 - * 5 - * On cris there is no hardware dynamic DMA address translation, 6 - * so consistent alloc/free are merely page allocation/freeing. 7 - * The rest of the dynamic DMA mapping interface is implemented 8 - * in asm/pci.h. 9 - * 10 - * Borrowed from i386. 11 - */ 12 - 13 - #include <linux/types.h> 14 - #include <linux/mm.h> 15 - #include <linux/string.h> 16 - #include <linux/pci.h> 17 - #include <linux/gfp.h> 18 - #include <asm/io.h> 19 - 20 - static void *v32_dma_alloc(struct device *dev, size_t size, 21 - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 22 - { 23 - void *ret; 24 - 25 - /* ignore region specifiers */ 26 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 27 - 28 - if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 29 - gfp |= GFP_DMA; 30 - 31 - ret = (void *)__get_free_pages(gfp, get_order(size)); 32 - 33 - if (ret != NULL) { 34 - memset(ret, 0, size); 35 - *dma_handle = virt_to_phys(ret); 36 - } 37 - return ret; 38 - } 39 - 40 - static void v32_dma_free(struct device *dev, size_t size, void *vaddr, 41 - dma_addr_t dma_handle, unsigned long attrs) 42 - { 43 - free_pages((unsigned long)vaddr, get_order(size)); 44 - } 45 - 46 - static inline dma_addr_t v32_dma_map_page(struct device *dev, 47 - struct page *page, unsigned long offset, size_t size, 48 - enum dma_data_direction direction, unsigned long attrs) 49 - { 50 - return page_to_phys(page) + offset; 51 - } 52 - 53 - static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, 54 - int nents, enum dma_data_direction direction, 55 - unsigned long attrs) 56 - { 57 - printk("Map sg\n"); 58 - return nents; 59 - } 60 - 61 - static inline int v32_dma_supported(struct device *dev, u64 mask) 62 - { 63 - /* 64 - * we fall back to GFP_DMA when the mask isn't all 1s, 65 - * so we can't guarantee allocations that must be 66 - * within a tighter range than GFP_DMA.. 67 - */ 68 - if (mask < 0x00ffffff) 69 - return 0; 70 - return 1; 71 - } 72 - 73 - const struct dma_map_ops v32_dma_ops = { 74 - .alloc = v32_dma_alloc, 75 - .free = v32_dma_free, 76 - .map_page = v32_dma_map_page, 77 - .map_sg = v32_dma_map_sg, 78 - .dma_supported = v32_dma_supported, 79 - }; 80 - EXPORT_SYMBOL(v32_dma_ops);
+1
arch/cris/include/asm/Kbuild
··· 5 5 generic-y += current.h 6 6 generic-y += device.h 7 7 generic-y += div64.h 8 + generic-y += dma-mapping.h 8 9 generic-y += emergency-restart.h 9 10 generic-y += exec.h 10 11 generic-y += extable.h
-20
arch/cris/include/asm/dma-mapping.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_CRIS_DMA_MAPPING_H 3 - #define _ASM_CRIS_DMA_MAPPING_H 4 - 5 - #ifdef CONFIG_PCI 6 - extern const struct dma_map_ops v32_dma_ops; 7 - 8 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 9 - { 10 - return &v32_dma_ops; 11 - } 12 - #else 13 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 14 - { 15 - BUG(); 16 - return NULL; 17 - } 18 - #endif 19 - 20 - #endif
+1
arch/h8300/Kconfig
··· 23 23 select HAVE_ARCH_KGDB 24 24 select HAVE_ARCH_HASH 25 25 select CPU_NO_EFFICIENT_FFS 26 + select DMA_DIRECT_OPS 26 27 27 28 config CPU_BIG_ENDIAN 28 29 def_bool y
+1
arch/h8300/include/asm/Kbuild
··· 9 9 generic-y += device.h 10 10 generic-y += div64.h 11 11 generic-y += dma.h 12 + generic-y += dma-mapping.h 12 13 generic-y += emergency-restart.h 13 14 generic-y += exec.h 14 15 generic-y += extable.h
-12
arch/h8300/include/asm/dma-mapping.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _H8300_DMA_MAPPING_H 3 - #define _H8300_DMA_MAPPING_H 4 - 5 - extern const struct dma_map_ops h8300_dma_map_ops; 6 - 7 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 8 - { 9 - return &h8300_dma_map_ops; 10 - } 11 - 12 - #endif
+1 -1
arch/h8300/kernel/Makefile
··· 7 7 8 8 obj-y := process.o traps.o ptrace.o \ 9 9 signal.o setup.o syscalls.o \ 10 - irq.o entry.o dma.o 10 + irq.o entry.o 11 11 12 12 obj-$(CONFIG_ROMKERNEL) += head_rom.o 13 13 obj-$(CONFIG_RAMKERNEL) += head_ram.o
-69
arch/h8300/kernel/dma.c
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file COPYING in the main directory of this archive 4 - * for more details. 5 - */ 6 - 7 - #include <linux/dma-mapping.h> 8 - #include <linux/kernel.h> 9 - #include <linux/scatterlist.h> 10 - #include <linux/module.h> 11 - #include <asm/pgalloc.h> 12 - 13 - static void *dma_alloc(struct device *dev, size_t size, 14 - dma_addr_t *dma_handle, gfp_t gfp, 15 - unsigned long attrs) 16 - { 17 - void *ret; 18 - 19 - /* ignore region specifiers */ 20 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 21 - 22 - if (dev == NULL || (*dev->dma_mask < 0xffffffff)) 23 - gfp |= GFP_DMA; 24 - ret = (void *)__get_free_pages(gfp, get_order(size)); 25 - 26 - if (ret != NULL) { 27 - memset(ret, 0, size); 28 - *dma_handle = virt_to_phys(ret); 29 - } 30 - return ret; 31 - } 32 - 33 - static void dma_free(struct device *dev, size_t size, 34 - void *vaddr, dma_addr_t dma_handle, 35 - unsigned long attrs) 36 - 37 - { 38 - free_pages((unsigned long)vaddr, get_order(size)); 39 - } 40 - 41 - static dma_addr_t map_page(struct device *dev, struct page *page, 42 - unsigned long offset, size_t size, 43 - enum dma_data_direction direction, 44 - unsigned long attrs) 45 - { 46 - return page_to_phys(page) + offset; 47 - } 48 - 49 - static int map_sg(struct device *dev, struct scatterlist *sgl, 50 - int nents, enum dma_data_direction direction, 51 - unsigned long attrs) 52 - { 53 - struct scatterlist *sg; 54 - int i; 55 - 56 - for_each_sg(sgl, sg, nents, i) { 57 - sg->dma_address = sg_phys(sg); 58 - } 59 - 60 - return nents; 61 - } 62 - 63 - const struct dma_map_ops h8300_dma_map_ops = { 64 - .alloc = dma_alloc, 65 - .free = dma_free, 66 - .map_page = map_page, 67 - .map_sg = map_sg, 68 - }; 69 - EXPORT_SYMBOL(h8300_dma_map_ops);
-7
arch/hexagon/include/asm/dma-mapping.h
··· 37 37 return dma_ops; 38 38 } 39 39 40 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 41 - { 42 - if (!dev->dma_mask) 43 - return 0; 44 - return addr + size - 1 <= *dev->dma_mask; 45 - } 46 - 47 40 #endif
-2
arch/hexagon/include/asm/io.h
··· 330 330 } 331 331 } 332 332 333 - #define flush_write_buffers() do { } while (0) 334 - 335 333 #endif /* __KERNEL__ */ 336 334 337 335 #endif
+1
arch/hexagon/kernel/dma.c
··· 19 19 */ 20 20 21 21 #include <linux/dma-mapping.h> 22 + #include <linux/dma-direct.h> 22 23 #include <linux/bootmem.h> 23 24 #include <linux/genalloc.h> 24 25 #include <asm/dma-mapping.h>
+7 -1
arch/ia64/Kconfig
··· 33 33 select HAVE_MEMBLOCK 34 34 select HAVE_MEMBLOCK_NODE_MAP 35 35 select HAVE_VIRT_CPU_ACCOUNTING 36 + select ARCH_HAS_DMA_MARK_CLEAN 36 37 select ARCH_HAS_SG_CHAIN 37 38 select VIRT_TO_BUS 38 39 select ARCH_DISCARD_MEMBLOCK ··· 66 65 select ATA_NONSTANDARD if ATA 67 66 default y 68 67 69 - config ZONE_DMA 68 + config ZONE_DMA32 70 69 def_bool y 71 70 depends on !IA64_SGI_SN2 72 71 ··· 146 145 bool "generic" 147 146 select NUMA 148 147 select ACPI_NUMA 148 + select DMA_DIRECT_OPS 149 149 select SWIOTLB 150 150 select PCI_MSI 151 151 help ··· 167 165 168 166 config IA64_DIG 169 167 bool "DIG-compliant" 168 + select DMA_DIRECT_OPS 170 169 select SWIOTLB 171 170 172 171 config IA64_DIG_VTD ··· 183 180 184 181 config IA64_HP_ZX1_SWIOTLB 185 182 bool "HP-zx1/sx1000 with software I/O TLB" 183 + select DMA_DIRECT_OPS 186 184 select SWIOTLB 187 185 help 188 186 Build a kernel that runs on HP zx1 and sx1000 systems even when they ··· 207 203 bool "SGI-UV" 208 204 select NUMA 209 205 select ACPI_NUMA 206 + select DMA_DIRECT_OPS 210 207 select SWIOTLB 211 208 help 212 209 Selecting this option will optimize the kernel for use on UV based ··· 218 213 219 214 config IA64_HP_SIM 220 215 bool "Ski-simulator" 216 + select DMA_DIRECT_OPS 221 217 select SWIOTLB 222 218 depends on !PM 223 219
+1 -1
arch/ia64/hp/common/hwsw_iommu.c
··· 19 19 #include <linux/export.h> 20 20 #include <asm/machvec.h> 21 21 22 - extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; 22 + extern const struct dma_map_ops sba_dma_ops; 23 23 24 24 /* swiotlb declarations & definitions: */ 25 25 extern int swiotlb_late_init_with_default_size (size_t size);
-19
arch/ia64/include/asm/dma-mapping.h
··· 8 8 */ 9 9 #include <asm/machvec.h> 10 10 #include <linux/scatterlist.h> 11 - #include <asm/swiotlb.h> 12 11 #include <linux/dma-debug.h> 13 12 14 13 #define ARCH_HAS_DMA_GET_REQUIRED_MASK ··· 24 25 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 25 26 { 26 27 return platform_dma_get_ops(NULL); 27 - } 28 - 29 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 30 - { 31 - if (!dev->dma_mask) 32 - return 0; 33 - 34 - return addr + size - 1 <= *dev->dma_mask; 35 - } 36 - 37 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 38 - { 39 - return paddr; 40 - } 41 - 42 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 43 - { 44 - return daddr; 45 28 } 46 29 47 30 #endif /* _ASM_IA64_DMA_MAPPING_H */
-2
arch/ia64/include/asm/dma.h
··· 20 20 21 21 #define free_dma(x) 22 22 23 - void dma_mark_clean(void *addr, size_t size); 24 - 25 23 #endif /* _ASM_IA64_DMA_H */
-18
arch/ia64/include/asm/swiotlb.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef ASM_IA64__SWIOTLB_H 3 - #define ASM_IA64__SWIOTLB_H 4 - 5 - #include <linux/dma-mapping.h> 6 - #include <linux/swiotlb.h> 7 - 8 - #ifdef CONFIG_SWIOTLB 9 - extern int swiotlb; 10 - extern void pci_swiotlb_init(void); 11 - #else 12 - #define swiotlb 0 13 - static inline void pci_swiotlb_init(void) 14 - { 15 - } 16 - #endif 17 - 18 - #endif /* ASM_IA64__SWIOTLB_H */
+9
arch/ia64/kernel/dma-mapping.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/dma-mapping.h> 3 + #include <linux/swiotlb.h> 3 4 #include <linux/export.h> 4 5 5 6 /* Set this to 1 if there is a HW IOMMU in the system */ ··· 24 23 return dma_ops; 25 24 } 26 25 EXPORT_SYMBOL(dma_get_ops); 26 + 27 + #ifdef CONFIG_SWIOTLB 28 + void __init swiotlb_dma_init(void) 29 + { 30 + dma_ops = &swiotlb_dma_ops; 31 + swiotlb_init(1); 32 + } 33 + #endif
+10 -9
arch/ia64/kernel/pci-dma.c
··· 12 12 #include <asm/iommu.h> 13 13 #include <asm/machvec.h> 14 14 #include <linux/dma-mapping.h> 15 - 16 - 17 - #ifdef CONFIG_INTEL_IOMMU 18 - 19 15 #include <linux/kernel.h> 20 - 21 16 #include <asm/page.h> 22 17 23 18 dma_addr_t bad_dma_address __read_mostly; ··· 99 104 detect_intel_iommu(); 100 105 101 106 #ifdef CONFIG_SWIOTLB 102 - pci_swiotlb_init(); 103 - #endif 107 + if (!iommu_detected) { 108 + #ifdef CONFIG_IA64_GENERIC 109 + printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); 110 + machvec_init("dig"); 111 + swiotlb_dma_init(); 112 + #else 113 + panic("Unable to find Intel IOMMU"); 114 + #endif /* CONFIG_IA64_GENERIC */ 115 + } 116 + #endif /* CONFIG_SWIOTLB */ 104 117 } 105 - 106 - #endif
-68
arch/ia64/kernel/pci-swiotlb.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* Glue code to lib/swiotlb.c */ 3 - 4 - #include <linux/pci.h> 5 - #include <linux/gfp.h> 6 - #include <linux/cache.h> 7 - #include <linux/module.h> 8 - #include <linux/dma-mapping.h> 9 - 10 - #include <asm/swiotlb.h> 11 - #include <asm/dma.h> 12 - #include <asm/iommu.h> 13 - #include <asm/machvec.h> 14 - 15 - int swiotlb __read_mostly; 16 - EXPORT_SYMBOL(swiotlb); 17 - 18 - static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, 19 - dma_addr_t *dma_handle, gfp_t gfp, 20 - unsigned long attrs) 21 - { 22 - if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) 23 - gfp |= GFP_DMA; 24 - return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 25 - } 26 - 27 - static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, 28 - void *vaddr, dma_addr_t dma_addr, 29 - unsigned long attrs) 30 - { 31 - swiotlb_free_coherent(dev, size, vaddr, dma_addr); 32 - } 33 - 34 - const struct dma_map_ops swiotlb_dma_ops = { 35 - .alloc = ia64_swiotlb_alloc_coherent, 36 - .free = ia64_swiotlb_free_coherent, 37 - .map_page = swiotlb_map_page, 38 - .unmap_page = swiotlb_unmap_page, 39 - .map_sg = swiotlb_map_sg_attrs, 40 - .unmap_sg = swiotlb_unmap_sg_attrs, 41 - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 42 - .sync_single_for_device = swiotlb_sync_single_for_device, 43 - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 44 - .sync_sg_for_device = swiotlb_sync_sg_for_device, 45 - .dma_supported = swiotlb_dma_supported, 46 - .mapping_error = swiotlb_dma_mapping_error, 47 - }; 48 - 49 - void __init swiotlb_dma_init(void) 50 - { 51 - dma_ops = &swiotlb_dma_ops; 52 - swiotlb_init(1); 53 - } 54 - 55 - void __init pci_swiotlb_init(void) 56 - { 57 - if (!iommu_detected) { 58 - #ifdef CONFIG_IA64_GENERIC 59 - swiotlb = 1; 60 - printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); 61 - machvec_init("dig"); 62 - swiotlb_init(1); 63 - dma_ops = &swiotlb_dma_ops; 64 - #else 65 - panic("Unable to find Intel IOMMU"); 66 - #endif 67 - } 68 - }
+2 -2
arch/ia64/mm/contig.c
··· 237 237 unsigned long max_zone_pfns[MAX_NR_ZONES]; 238 238 239 239 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 240 - #ifdef CONFIG_ZONE_DMA 240 + #ifdef CONFIG_ZONE_DMA32 241 241 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; 242 - max_zone_pfns[ZONE_DMA] = max_dma; 242 + max_zone_pfns[ZONE_DMA32] = max_dma; 243 243 #endif 244 244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 245 245
+4 -4
arch/ia64/mm/discontig.c
··· 38 38 struct ia64_node_data *node_data; 39 39 unsigned long pernode_addr; 40 40 unsigned long pernode_size; 41 - #ifdef CONFIG_ZONE_DMA 41 + #ifdef CONFIG_ZONE_DMA32 42 42 unsigned long num_dma_physpages; 43 43 #endif 44 44 unsigned long min_pfn; ··· 669 669 { 670 670 unsigned long end = start + len; 671 671 672 - #ifdef CONFIG_ZONE_DMA 672 + #ifdef CONFIG_ZONE_DMA32 673 673 if (start <= __pa(MAX_DMA_ADDRESS)) 674 674 mem_data[node].num_dma_physpages += 675 675 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; ··· 724 724 } 725 725 726 726 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 727 - #ifdef CONFIG_ZONE_DMA 728 - max_zone_pfns[ZONE_DMA] = max_dma; 727 + #ifdef CONFIG_ZONE_DMA32 728 + max_zone_pfns[ZONE_DMA32] = max_dma; 729 729 #endif 730 730 max_zone_pfns[ZONE_NORMAL] = max_pfn; 731 731 free_area_init_nodes(max_zone_pfns);
+1 -1
arch/m32r/Kconfig
··· 19 19 select MODULES_USE_ELF_RELA 20 20 select HAVE_DEBUG_STACKOVERFLOW 21 21 select CPU_NO_EFFICIENT_FFS 22 - select DMA_NOOP_OPS 22 + select DMA_DIRECT_OPS 23 23 select ARCH_NO_COHERENT_DMA_MMAP if !MMU 24 24 25 25 config SBUS
+1
arch/m32r/include/asm/Kbuild
··· 1 1 generic-y += clkdev.h 2 2 generic-y += current.h 3 + generic-y += dma-mapping.h 3 4 generic-y += exec.h 4 5 generic-y += extable.h 5 6 generic-y += irq_work.h
-24
arch/m32r/include/asm/dma-mapping.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_M32R_DMA_MAPPING_H 3 - #define _ASM_M32R_DMA_MAPPING_H 4 - 5 - #include <linux/kernel.h> 6 - #include <linux/types.h> 7 - #include <linux/mm.h> 8 - #include <linux/scatterlist.h> 9 - #include <linux/dma-debug.h> 10 - #include <linux/io.h> 11 - 12 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 13 - { 14 - return &dma_noop_ops; 15 - } 16 - 17 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 18 - { 19 - if (!dev->dma_mask) 20 - return false; 21 - return addr + size - 1 <= *dev->dma_mask; 22 - } 23 - 24 - #endif /* _ASM_M32R_DMA_MAPPING_H */
-2
arch/m32r/include/asm/io.h
··· 191 191 192 192 #define mmiowb() 193 193 194 - #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 195 - 196 194 static inline void 197 195 memset_io(volatile void __iomem *addr, unsigned char val, int count) 198 196 {
-2
arch/m68k/kernel/dma.c
··· 76 76 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 77 77 { 78 78 void *ret; 79 - /* ignore region specifiers */ 80 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 81 79 82 80 if (dev == NULL || (*dev->dma_mask < 0xffffffff)) 83 81 gfp |= GFP_DMA;
+2 -2
arch/microblaze/include/asm/dma-mapping.h
··· 18 18 /* 19 19 * Available generic sets of operations 20 20 */ 21 - extern const struct dma_map_ops dma_direct_ops; 21 + extern const struct dma_map_ops dma_nommu_ops; 22 22 23 23 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 24 { 25 - return &dma_direct_ops; 25 + return &dma_nommu_ops; 26 26 } 27 27 28 28 #endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
+22 -56
arch/microblaze/kernel/dma.c
··· 15 15 #include <linux/bug.h> 16 16 #include <asm/cacheflush.h> 17 17 18 - #define NOT_COHERENT_CACHE 19 - 20 - static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 18 + static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, 21 19 dma_addr_t *dma_handle, gfp_t flag, 22 20 unsigned long attrs) 23 21 { 24 - #ifdef NOT_COHERENT_CACHE 25 22 return consistent_alloc(flag, size, dma_handle); 26 - #else 27 - void *ret; 28 - struct page *page; 29 - int node = dev_to_node(dev); 30 - 31 - /* ignore region specifiers */ 32 - flag &= ~(__GFP_HIGHMEM); 33 - 34 - page = alloc_pages_node(node, flag, get_order(size)); 35 - if (page == NULL) 36 - return NULL; 37 - ret = page_address(page); 38 - memset(ret, 0, size); 39 - *dma_handle = virt_to_phys(ret); 40 - 41 - return ret; 42 - #endif 43 23 } 44 24 45 - static void dma_direct_free_coherent(struct device *dev, size_t size, 25 + static void dma_nommu_free_coherent(struct device *dev, size_t size, 46 26 void *vaddr, dma_addr_t dma_handle, 47 27 unsigned long attrs) 48 28 { 49 - #ifdef NOT_COHERENT_CACHE 50 29 consistent_free(size, vaddr); 51 - #else 52 - free_pages((unsigned long)vaddr, get_order(size)); 53 - #endif 54 30 } 55 31 56 32 static inline void __dma_sync(unsigned long paddr, ··· 45 69 } 46 70 } 47 71 48 - static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 72 + static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, 49 73 int nents, enum dma_data_direction direction, 50 74 unsigned long attrs) 51 75 { ··· 65 89 return nents; 66 90 } 67 91 68 - static int dma_direct_dma_supported(struct device *dev, u64 mask) 69 - { 70 - return 1; 71 - } 72 - 73 - static inline dma_addr_t dma_direct_map_page(struct device *dev, 92 + static inline dma_addr_t dma_nommu_map_page(struct device *dev, 74 93 struct page *page, 75 94 unsigned long offset, 76 95 size_t size, ··· 77 106 return page_to_phys(page) + offset; 78 107 } 79 108 80 - static inline void dma_direct_unmap_page(struct device *dev, 109 + static inline void dma_nommu_unmap_page(struct device *dev, 81 110 dma_addr_t dma_address, 82 111 size_t size, 83 112 enum dma_data_direction direction, ··· 93 122 } 94 123 95 124 static inline void 96 - dma_direct_sync_single_for_cpu(struct device *dev, 125 + dma_nommu_sync_single_for_cpu(struct device *dev, 97 126 dma_addr_t dma_handle, size_t size, 98 127 enum dma_data_direction direction) 99 128 { ··· 107 136 } 108 137 109 138 static inline void 110 - dma_direct_sync_single_for_device(struct device *dev, 139 + dma_nommu_sync_single_for_device(struct device *dev, 111 140 dma_addr_t dma_handle, size_t size, 112 141 enum dma_data_direction direction) 113 142 { ··· 121 150 } 122 151 123 152 static inline void 124 - dma_direct_sync_sg_for_cpu(struct device *dev, 153 + dma_nommu_sync_sg_for_cpu(struct device *dev, 125 154 struct scatterlist *sgl, int nents, 126 155 enum dma_data_direction direction) 127 156 { ··· 135 164 } 136 165 137 166 static inline void 138 - dma_direct_sync_sg_for_device(struct device *dev, 167 + dma_nommu_sync_sg_for_device(struct device *dev, 139 168 struct scatterlist *sgl, int nents, 140 169 enum dma_data_direction direction) 141 170 { ··· 149 178 } 150 179 151 180 static 152 - int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 181 + int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 153 182 void *cpu_addr, dma_addr_t handle, size_t size, 154 183 unsigned long attrs) 155 184 { ··· 162 191 if (off >= count || user_count > (count - off)) 163 192 return -ENXIO; 164 193 165 - #ifdef NOT_COHERENT_CACHE 166 194 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 167 195 pfn = consistent_virt_to_pfn(cpu_addr); 168 - #else 169 - pfn = virt_to_pfn(cpu_addr); 170 - #endif 171 196 return remap_pfn_range(vma, vma->vm_start, pfn + off, 172 197 vma->vm_end - vma->vm_start, vma->vm_page_prot); 173 198 #else ··· 171 204 #endif 172 205 } 173 206 174 - const struct dma_map_ops dma_direct_ops = { 175 - .alloc = dma_direct_alloc_coherent, 176 - .free = dma_direct_free_coherent, 177 - .mmap = dma_direct_mmap_coherent, 178 - .map_sg = dma_direct_map_sg, 179 - .dma_supported = dma_direct_dma_supported, 180 - .map_page = dma_direct_map_page, 181 - .unmap_page = dma_direct_unmap_page, 182 - .sync_single_for_cpu = dma_direct_sync_single_for_cpu, 183 - .sync_single_for_device = dma_direct_sync_single_for_device, 184 - .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, 185 - .sync_sg_for_device = dma_direct_sync_sg_for_device, 207 + const struct dma_map_ops dma_nommu_ops = { 208 + .alloc = dma_nommu_alloc_coherent, 209 + .free = dma_nommu_free_coherent, 210 + .mmap = dma_nommu_mmap_coherent, 211 + .map_sg = dma_nommu_map_sg, 212 + .map_page = dma_nommu_map_page, 213 + .unmap_page = dma_nommu_unmap_page, 214 + .sync_single_for_cpu = dma_nommu_sync_single_for_cpu, 215 + .sync_single_for_device = dma_nommu_sync_single_for_device, 216 + .sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu, 217 + .sync_sg_for_device = dma_nommu_sync_sg_for_device, 186 218 }; 187 - EXPORT_SYMBOL(dma_direct_ops); 219 + EXPORT_SYMBOL(dma_nommu_ops); 188 220 189 221 /* Number of entries preallocated for DMA-API debugging */ 190 222 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+2
arch/mips/Kconfig
··· 431 431 432 432 config MACH_LOONGSON64 433 433 bool "Loongson-2/3 family of machines" 434 + select ARCH_HAS_PHYS_TO_DMA 434 435 select SYS_SUPPORTS_ZBOOT 435 436 help 436 437 This enables the support of Loongson-2/3 family of machines. ··· 881 880 config CAVIUM_OCTEON_SOC 882 881 bool "Cavium Networks Octeon SoC based boards" 883 882 select CEVT_R4K 883 + select ARCH_HAS_PHYS_TO_DMA 884 884 select ARCH_PHYS_ADDR_T_64BIT 885 885 select DMA_COHERENT 886 886 select SYS_SUPPORTS_64BIT_KERNEL
+1
arch/mips/cavium-octeon/Kconfig
··· 75 75 76 76 config SWIOTLB 77 77 def_bool y 78 + select DMA_DIRECT_OPS 78 79 select IOMMU_HELPER 79 80 select NEED_SG_DMA_LENGTH 80 81
+3 -26
arch/mips/cavium-octeon/dma-octeon.c
··· 159 159 static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, 160 160 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 161 161 { 162 - void *ret; 163 - 164 - /* ignore region specifiers */ 165 - gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 166 - 167 - if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL) 168 - gfp |= __GFP_DMA; 169 - else if (IS_ENABLED(CONFIG_ZONE_DMA) && 170 - dev->coherent_dma_mask <= DMA_BIT_MASK(24)) 171 - gfp |= __GFP_DMA; 172 - else if (IS_ENABLED(CONFIG_ZONE_DMA32) && 173 - dev->coherent_dma_mask <= DMA_BIT_MASK(32)) 174 - gfp |= __GFP_DMA32; 175 - 176 - /* Don't invoke OOM killer */ 177 - gfp |= __GFP_NORETRY; 178 - 179 - ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 162 + void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs); 180 163 181 164 mb(); 182 165 183 166 return ret; 184 - } 185 - 186 - static void octeon_dma_free_coherent(struct device *dev, size_t size, 187 - void *vaddr, dma_addr_t dma_handle, unsigned long attrs) 188 - { 189 - swiotlb_free_coherent(dev, size, vaddr, dma_handle); 190 167 } 191 168 192 169 static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr) ··· 205 228 static struct octeon_dma_map_ops octeon_linear_dma_map_ops = { 206 229 .dma_map_ops = { 207 230 .alloc = octeon_dma_alloc_coherent, 208 - .free = octeon_dma_free_coherent, 231 + .free = swiotlb_free, 209 232 .map_page = octeon_dma_map_page, 210 233 .unmap_page = swiotlb_unmap_page, 211 234 .map_sg = octeon_dma_map_sg, ··· 291 314 static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = { 292 315 .dma_map_ops = { 293 316 .alloc = octeon_dma_alloc_coherent, 294 - .free = octeon_dma_free_coherent, 317 + .free = swiotlb_free, 295 318 .map_page = octeon_dma_map_page, 296 319 .unmap_page = swiotlb_unmap_page, 297 320 .map_sg = octeon_dma_map_sg,
+1
arch/mips/include/asm/dma-direct.h
··· 1 + #include <asm/dma-coherence.h>
-10
arch/mips/include/asm/dma-mapping.h
··· 17 17 return mips_dma_map_ops; 18 18 } 19 19 20 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 21 - { 22 - if (!dev->dma_mask) 23 - return false; 24 - 25 - return addr + size <= *dev->dma_mask; 26 - } 27 - 28 - static inline void dma_mark_clean(void *addr, size_t size) {} 29 - 30 20 #define arch_setup_dma_ops arch_setup_dma_ops 31 21 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, 32 22 u64 size, const struct iommu_ops *iommu,
+8
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
··· 61 61 { 62 62 } 63 63 64 + static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 65 + { 66 + if (!dev->dma_mask) 67 + return false; 68 + 69 + return addr + size - 1 <= *dev->dma_mask; 70 + } 71 + 64 72 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 65 73 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 66 74
-12
arch/mips/include/asm/mach-generic/dma-coherence.h
··· 70 70 } 71 71 #endif 72 72 73 - #ifdef CONFIG_SWIOTLB 74 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 75 - { 76 - return paddr; 77 - } 78 - 79 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 80 - { 81 - return daddr; 82 - } 83 - #endif 84 - 85 73 #endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
+8
arch/mips/include/asm/mach-loongson64/dma-coherence.h
··· 17 17 18 18 struct device; 19 19 20 + static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 21 + { 22 + if (!dev->dma_mask) 23 + return false; 24 + 25 + return addr + size - 1 <= *dev->dma_mask; 26 + } 27 + 20 28 extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 21 29 extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 22 30 static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
-3
arch/mips/include/asm/netlogic/common.h
··· 87 87 extern const struct plat_smp_ops nlm_smp_ops; 88 88 extern char nlm_reset_entry[], nlm_reset_entry_end[]; 89 89 90 - /* SWIOTLB */ 91 - extern const struct dma_map_ops nlm_swiotlb_dma_ops; 92 - 93 90 extern unsigned int nlm_threads_per_core; 94 91 extern cpumask_t nlm_cpumask; 95 92
+1
arch/mips/loongson64/Kconfig
··· 136 136 bool "Soft IOMMU Support for All-Memory DMA" 137 137 default y 138 138 depends on CPU_LOONGSON3 139 + select DMA_DIRECT_OPS 139 140 select IOMMU_HELPER 140 141 select NEED_SG_DMA_LENGTH 141 142 select NEED_DMA_MAP_STATE
+2 -22
arch/mips/loongson64/common/dma-swiotlb.c
··· 13 13 static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, 14 14 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 15 15 { 16 - void *ret; 16 + void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs); 17 17 18 - /* ignore region specifiers */ 19 - gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 20 - 21 - if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) || 22 - (IS_ENABLED(CONFIG_ZONE_DMA) && 23 - dev->coherent_dma_mask < DMA_BIT_MASK(32))) 24 - gfp |= __GFP_DMA; 25 - else if (IS_ENABLED(CONFIG_ZONE_DMA32) && 26 - dev->coherent_dma_mask < DMA_BIT_MASK(40)) 27 - gfp |= __GFP_DMA32; 28 - 29 - gfp |= __GFP_NORETRY; 30 - 31 - ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 32 18 mb(); 33 19 return ret; 34 - } 35 - 36 - static void loongson_dma_free_coherent(struct device *dev, size_t size, 37 - void *vaddr, dma_addr_t dma_handle, unsigned long attrs) 38 - { 39 - swiotlb_free_coherent(dev, size, vaddr, dma_handle); 40 20 } 41 21 42 22 static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, ··· 89 109 90 110 static const struct dma_map_ops loongson_dma_map_ops = { 91 111 .alloc = loongson_dma_alloc_coherent, 92 - .free = loongson_dma_free_coherent, 112 + .free = swiotlb_free, 93 113 .map_page = loongson_dma_map_page, 94 114 .unmap_page = swiotlb_unmap_page, 95 115 .map_sg = loongson_dma_map_sg,
-3
arch/mips/mm/dma-default.c
··· 93 93 { 94 94 gfp_t dma_flag; 95 95 96 - /* ignore region specifiers */ 97 - gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 98 - 99 96 #ifdef CONFIG_ISA 100 97 if (dev == NULL) 101 98 dma_flag = __GFP_DMA;
-5
arch/mips/netlogic/Kconfig
··· 89 89 config NEED_SG_DMA_LENGTH 90 90 bool 91 91 92 - config SWIOTLB 93 - def_bool y 94 - select NEED_SG_DMA_LENGTH 95 - select IOMMU_HELPER 96 - 97 92 endif
-1
arch/mips/netlogic/common/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-y += irq.o time.o 3 - obj-y += nlm-dma.o 4 3 obj-y += reset.o 5 4 obj-$(CONFIG_SMP) += smp.o smpboot.o 6 5 obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
-97
arch/mips/netlogic/common/nlm-dma.c
··· 1 - /* 2 - * Copyright (C) 2003-2013 Broadcom Corporation 3 - * All Rights Reserved 4 - * 5 - * This software is available to you under a choice of one of two 6 - * licenses. You may choose to be licensed under the terms of the GNU 7 - * General Public License (GPL) Version 2, available from the file 8 - * COPYING in the main directory of this source tree, or the Broadcom 9 - * license below: 10 - * 11 - * Redistribution and use in source and binary forms, with or without 12 - * modification, are permitted provided that the following conditions 13 - * are met: 14 - * 15 - * 1. Redistributions of source code must retain the above copyright 16 - * notice, this list of conditions and the following disclaimer. 17 - * 2. Redistributions in binary form must reproduce the above copyright 18 - * notice, this list of conditions and the following disclaimer in 19 - * the documentation and/or other materials provided with the 20 - * distribution. 21 - * 22 - * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR 23 - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 - * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE 26 - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 29 - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 31 - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 32 - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 - */ 34 - #include <linux/dma-mapping.h> 35 - #include <linux/scatterlist.h> 36 - #include <linux/bootmem.h> 37 - #include <linux/export.h> 38 - #include <linux/swiotlb.h> 39 - #include <linux/types.h> 40 - #include <linux/init.h> 41 - #include <linux/mm.h> 42 - 43 - #include <asm/bootinfo.h> 44 - 45 - static char *nlm_swiotlb; 46 - 47 - static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, 48 - dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 49 - { 50 - /* ignore region specifiers */ 51 - gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); 52 - 53 - #ifdef CONFIG_ZONE_DMA32 54 - if (dev->coherent_dma_mask <= DMA_BIT_MASK(32)) 55 - gfp |= __GFP_DMA32; 56 - #endif 57 - 58 - /* Don't invoke OOM killer */ 59 - gfp |= __GFP_NORETRY; 60 - 61 - return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 62 - } 63 - 64 - static void nlm_dma_free_coherent(struct device *dev, size_t size, 65 - void *vaddr, dma_addr_t dma_handle, unsigned long attrs) 66 - { 67 - swiotlb_free_coherent(dev, size, vaddr, dma_handle); 68 - } 69 - 70 - const struct dma_map_ops nlm_swiotlb_dma_ops = { 71 - .alloc = nlm_dma_alloc_coherent, 72 - .free = nlm_dma_free_coherent, 73 - .map_page = swiotlb_map_page, 74 - .unmap_page = swiotlb_unmap_page, 75 - .map_sg = swiotlb_map_sg_attrs, 76 - .unmap_sg = swiotlb_unmap_sg_attrs, 77 - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 78 - .sync_single_for_device = swiotlb_sync_single_for_device, 79 - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 80 - .sync_sg_for_device = swiotlb_sync_sg_for_device, 81 - .mapping_error = swiotlb_dma_mapping_error, 82 - .dma_supported = swiotlb_dma_supported 83 - }; 84 - 85 - void __init plat_swiotlb_setup(void) 86 - { 87 - size_t swiotlbsize; 88 - unsigned long swiotlb_nslabs; 89 - 90 - swiotlbsize = 1 << 20; /* 1 MB for now */ 91 - swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; 92 - swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE); 93 - swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; 94 - 95 - nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize); 96 - swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1); 97 - }
-3
arch/mn10300/mm/dma-alloc.c
··· 37 37 goto done; 38 38 } 39 39 40 - /* ignore region specifiers */ 41 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 42 - 43 40 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff) 44 41 gfp |= GFP_DMA; 45 42
-3
arch/nios2/mm/dma-mapping.c
··· 63 63 { 64 64 void *ret; 65 65 66 - /* ignore region specifiers */ 67 - gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 68 - 69 66 /* optimized page clearing */ 70 67 gfp |= __GFP_ZERO; 71 68
-7
arch/parisc/kernel/pci-dma.c
··· 75 75 static inline void dump_resmap(void) {;} 76 76 #endif 77 77 78 - static int pa11_dma_supported( struct device *dev, u64 mask) 79 - { 80 - return 1; 81 - } 82 - 83 78 static inline int map_pte_uncached(pte_t * pte, 84 79 unsigned long vaddr, 85 80 unsigned long size, unsigned long *paddr_ptr) ··· 574 579 } 575 580 576 581 const struct dma_map_ops pcxl_dma_ops = { 577 - .dma_supported = pa11_dma_supported, 578 582 .alloc = pa11_dma_alloc, 579 583 .free = pa11_dma_free, 580 584 .map_page = pa11_dma_map_page, ··· 610 616 } 611 617 612 618 const struct dma_map_ops pcx_dma_ops = { 613 - .dma_supported = pa11_dma_supported, 614 619 .alloc = pcx_dma_alloc, 615 620 .free = pcx_dma_free, 616 621 .map_page = pa11_dma_map_page,
+1
arch/powerpc/Kconfig
··· 139 139 select ARCH_HAS_ELF_RANDOMIZE 140 140 select ARCH_HAS_FORTIFY_SOURCE 141 141 select ARCH_HAS_GCOV_PROFILE_ALL 142 + select ARCH_HAS_PHYS_TO_DMA 142 143 select ARCH_HAS_PMEM_API if PPC64 143 144 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE 144 145 select ARCH_HAS_SG_CHAIN
+29
arch/powerpc/include/asm/dma-direct.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef ASM_POWERPC_DMA_DIRECT_H 3 + #define ASM_POWERPC_DMA_DIRECT_H 1 4 + 5 + static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 6 + { 7 + #ifdef CONFIG_SWIOTLB 8 + struct dev_archdata *sd = &dev->archdata; 9 + 10 + if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) 11 + return false; 12 + #endif 13 + 14 + if (!dev->dma_mask) 15 + return false; 16 + 17 + return addr + size - 1 <= *dev->dma_mask; 18 + } 19 + 20 + static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 21 + { 22 + return paddr + get_dma_offset(dev); 23 + } 24 + 25 + static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 26 + { 27 + return daddr - get_dma_offset(dev); 28 + } 29 + #endif /* ASM_POWERPC_DMA_DIRECT_H */
+4 -32
arch/powerpc/include/asm/dma-mapping.h
··· 19 19 #include <asm/swiotlb.h> 20 20 21 21 /* Some dma direct funcs must be visible for use in other dma_ops */ 22 - extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, 22 + extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, 23 23 dma_addr_t *dma_handle, gfp_t flag, 24 24 unsigned long attrs); 25 - extern void __dma_direct_free_coherent(struct device *dev, size_t size, 25 + extern void __dma_nommu_free_coherent(struct device *dev, size_t size, 26 26 void *vaddr, dma_addr_t dma_handle, 27 27 unsigned long attrs); 28 - extern int dma_direct_mmap_coherent(struct device *dev, 28 + extern int dma_nommu_mmap_coherent(struct device *dev, 29 29 struct vm_area_struct *vma, 30 30 void *cpu_addr, dma_addr_t handle, 31 31 size_t size, unsigned long attrs); ··· 73 73 #ifdef CONFIG_PPC64 74 74 extern struct dma_map_ops dma_iommu_ops; 75 75 #endif 76 - extern const struct dma_map_ops dma_direct_ops; 76 + extern const struct dma_map_ops dma_nommu_ops; 77 77 78 78 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 79 79 { ··· 107 107 dev->archdata.dma_offset = off; 108 108 } 109 109 110 - /* this will be removed soon */ 111 - #define flush_write_buffers() 112 - 113 110 #define HAVE_ARCH_DMA_SET_MASK 1 114 111 extern int dma_set_mask(struct device *dev, u64 dma_mask); 115 112 116 113 extern u64 __dma_get_required_mask(struct device *dev); 117 - 118 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 119 - { 120 - #ifdef CONFIG_SWIOTLB 121 - struct dev_archdata *sd = &dev->archdata; 122 - 123 - if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr) 124 - return false; 125 - #endif 126 - 127 - if (!dev->dma_mask) 128 - return false; 129 - 130 - return addr + size - 1 <= *dev->dma_mask; 131 - } 132 - 133 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 134 - { 135 - return paddr + get_dma_offset(dev); 136 - } 137 - 138 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 139 - { 140 - return daddr - get_dma_offset(dev); 141 - } 142 114 143 115 #define ARCH_HAS_DMA_MMAP_COHERENT 144 116
+1 -3
arch/powerpc/include/asm/swiotlb.h
··· 13 13 14 14 #include <linux/swiotlb.h> 15 15 16 - extern const struct dma_map_ops swiotlb_dma_ops; 17 - 18 - static inline void dma_mark_clean(void *addr, size_t size) {} 16 + extern const struct dma_map_ops powerpc_swiotlb_dma_ops; 19 17 20 18 extern unsigned int ppc_swiotlb_enable; 21 19 int __init swiotlb_setup_bus_notifier(void);
+1 -1
arch/powerpc/kernel/dma-iommu.c
··· 114 114 struct dma_map_ops dma_iommu_ops = { 115 115 .alloc = dma_iommu_alloc_coherent, 116 116 .free = dma_iommu_free_coherent, 117 - .mmap = dma_direct_mmap_coherent, 117 + .mmap = dma_nommu_mmap_coherent, 118 118 .map_sg = dma_iommu_map_sg, 119 119 .unmap_sg = dma_iommu_unmap_sg, 120 120 .dma_supported = dma_iommu_dma_supported,
+6 -6
arch/powerpc/kernel/dma-swiotlb.c
··· 46 46 * map_page, and unmap_page on highmem, use normal dma_ops 47 47 * for everything else. 48 48 */ 49 - const struct dma_map_ops swiotlb_dma_ops = { 50 - .alloc = __dma_direct_alloc_coherent, 51 - .free = __dma_direct_free_coherent, 52 - .mmap = dma_direct_mmap_coherent, 49 + const struct dma_map_ops powerpc_swiotlb_dma_ops = { 50 + .alloc = __dma_nommu_alloc_coherent, 51 + .free = __dma_nommu_free_coherent, 52 + .mmap = dma_nommu_mmap_coherent, 53 53 .map_sg = swiotlb_map_sg_attrs, 54 54 .unmap_sg = swiotlb_unmap_sg_attrs, 55 55 .dma_supported = swiotlb_dma_supported, ··· 89 89 90 90 /* May need to bounce if the device can't address all of DRAM */ 91 91 if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM()) 92 - set_dma_ops(dev, &swiotlb_dma_ops); 92 + set_dma_ops(dev, &powerpc_swiotlb_dma_ops); 93 93 94 94 return NOTIFY_DONE; 95 95 } ··· 121 121 if (ppc_swiotlb_enable) 122 122 swiotlb_print_info(); 123 123 else 124 - swiotlb_free(); 124 + swiotlb_exit(); 125 125 126 126 return 0; 127 127 }
+35 -38
arch/powerpc/kernel/dma.c
··· 33 33 struct dev_archdata __maybe_unused *sd = &dev->archdata; 34 34 35 35 #ifdef CONFIG_SWIOTLB 36 - if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops) 36 + if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops) 37 37 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); 38 38 #endif 39 39 40 40 return pfn; 41 41 } 42 42 43 - static int dma_direct_dma_supported(struct device *dev, u64 mask) 43 + static int dma_nommu_dma_supported(struct device *dev, u64 mask) 44 44 { 45 45 #ifdef CONFIG_PPC64 46 46 u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); ··· 62 62 #endif 63 63 } 64 64 65 - void *__dma_direct_alloc_coherent(struct device *dev, size_t size, 65 + void *__dma_nommu_alloc_coherent(struct device *dev, size_t size, 66 66 dma_addr_t *dma_handle, gfp_t flag, 67 67 unsigned long attrs) 68 68 { ··· 105 105 }; 106 106 #endif /* CONFIG_FSL_SOC */ 107 107 108 - /* ignore region specifiers */ 109 - flag &= ~(__GFP_HIGHMEM); 110 - 111 108 page = alloc_pages_node(node, flag, get_order(size)); 112 109 if (page == NULL) 113 110 return NULL; ··· 116 119 #endif 117 120 } 118 121 119 - void __dma_direct_free_coherent(struct device *dev, size_t size, 122 + void __dma_nommu_free_coherent(struct device *dev, size_t size, 120 123 void *vaddr, dma_addr_t dma_handle, 121 124 unsigned long attrs) 122 125 { ··· 127 130 #endif 128 131 } 129 132 130 - static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 133 + static void *dma_nommu_alloc_coherent(struct device *dev, size_t size, 131 134 dma_addr_t *dma_handle, gfp_t flag, 132 135 unsigned long attrs) 133 136 { ··· 136 139 /* The coherent mask may be smaller than the real mask, check if 137 140 * we can really use the direct ops 138 141 */ 139 - if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) 140 - return __dma_direct_alloc_coherent(dev, size, dma_handle, 142 + if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) 143 + return __dma_nommu_alloc_coherent(dev, size, dma_handle, 141 144 flag, attrs); 142 145 143 146 /* Ok we can't ... do we have an iommu ? If not, fail */ ··· 151 154 dev_to_node(dev)); 152 155 } 153 156 154 - static void dma_direct_free_coherent(struct device *dev, size_t size, 157 + static void dma_nommu_free_coherent(struct device *dev, size_t size, 155 158 void *vaddr, dma_addr_t dma_handle, 156 159 unsigned long attrs) 157 160 { 158 161 struct iommu_table *iommu; 159 162 160 - /* See comments in dma_direct_alloc_coherent() */ 161 - if (dma_direct_dma_supported(dev, dev->coherent_dma_mask)) 162 - return __dma_direct_free_coherent(dev, size, vaddr, dma_handle, 163 + /* See comments in dma_nommu_alloc_coherent() */ 164 + if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask)) 165 + return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, 163 166 attrs); 164 167 /* Maybe we used an iommu ... */ 165 168 iommu = get_iommu_table_base(dev); ··· 172 175 iommu_free_coherent(iommu, size, vaddr, dma_handle); 173 176 } 174 177 175 - int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 178 + int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 176 179 void *cpu_addr, dma_addr_t handle, size_t size, 177 180 unsigned long attrs) 178 181 { ··· 190 193 vma->vm_page_prot); 191 194 } 192 195 193 - static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 196 + static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl, 194 197 int nents, enum dma_data_direction direction, 195 198 unsigned long attrs) 196 199 { ··· 210 213 return nents; 211 214 } 212 215 213 - static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 216 + static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg, 214 217 int nents, enum dma_data_direction direction, 215 218 unsigned long attrs) 216 219 { 217 220 } 218 221 219 - static u64 dma_direct_get_required_mask(struct device *dev) 222 + static u64 dma_nommu_get_required_mask(struct device *dev) 220 223 { 221 224 u64 end, mask; 222 225 ··· 228 231 return mask; 229 232 } 230 233 231 - static inline dma_addr_t dma_direct_map_page(struct device *dev, 234 + static inline dma_addr_t dma_nommu_map_page(struct device *dev, 232 235 struct page *page, 233 236 unsigned long offset, 234 237 size_t size, ··· 243 246 return page_to_phys(page) + offset + get_dma_offset(dev); 244 247 } 245 248 246 - static inline void dma_direct_unmap_page(struct device *dev, 249 + static inline void dma_nommu_unmap_page(struct device *dev, 247 250 dma_addr_t dma_address, 248 251 size_t size, 249 252 enum dma_data_direction direction, ··· 252 255 } 253 256 254 257 #ifdef CONFIG_NOT_COHERENT_CACHE 255 - static inline void dma_direct_sync_sg(struct device *dev, 258 + static inline void dma_nommu_sync_sg(struct device *dev, 256 259 struct scatterlist *sgl, int nents, 257 260 enum dma_data_direction direction) 258 261 { ··· 263 266 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 264 267 } 265 268 266 - static inline void dma_direct_sync_single(struct device *dev, 269 + static inline void dma_nommu_sync_single(struct device *dev, 267 270 dma_addr_t dma_handle, size_t size, 268 271 enum dma_data_direction direction) 269 272 { ··· 271 274 } 272 275 #endif 273 276 274 - const struct dma_map_ops dma_direct_ops = { 275 - .alloc = dma_direct_alloc_coherent, 276 - .free = dma_direct_free_coherent, 277 - .mmap = dma_direct_mmap_coherent, 278 - .map_sg = dma_direct_map_sg, 279 - .unmap_sg = dma_direct_unmap_sg, 280 - .dma_supported = dma_direct_dma_supported, 281 - .map_page = dma_direct_map_page, 282 - .unmap_page = dma_direct_unmap_page, 283 - .get_required_mask = dma_direct_get_required_mask, 277 + const struct dma_map_ops dma_nommu_ops = { 278 + .alloc = dma_nommu_alloc_coherent, 279 + .free = dma_nommu_free_coherent, 280 + .mmap = dma_nommu_mmap_coherent, 281 + .map_sg = dma_nommu_map_sg, 282 + .unmap_sg = dma_nommu_unmap_sg, 283 + .dma_supported = dma_nommu_dma_supported, 284 + .map_page = dma_nommu_map_page, 285 + .unmap_page = dma_nommu_unmap_page, 286 + .get_required_mask = dma_nommu_get_required_mask, 284 287 #ifdef CONFIG_NOT_COHERENT_CACHE 285 - .sync_single_for_cpu = dma_direct_sync_single, 286 - .sync_single_for_device = dma_direct_sync_single, 287 - .sync_sg_for_cpu = dma_direct_sync_sg, 288 - .sync_sg_for_device = dma_direct_sync_sg, 288 + .sync_single_for_cpu = dma_nommu_sync_single, 289 + .sync_single_for_device = dma_nommu_sync_single, 290 + .sync_sg_for_cpu = dma_nommu_sync_sg, 291 + .sync_sg_for_device = dma_nommu_sync_sg, 289 292 #endif 290 293 }; 291 - EXPORT_SYMBOL(dma_direct_ops); 294 + EXPORT_SYMBOL(dma_nommu_ops); 292 295 293 296 int dma_set_coherent_mask(struct device *dev, u64 mask) 294 297 { ··· 299 302 * is no dma_op->set_coherent_mask() so we have to do 300 303 * things the hard way: 301 304 */ 302 - if (get_dma_ops(dev) != &dma_direct_ops || 305 + if (get_dma_ops(dev) != &dma_nommu_ops || 303 306 get_iommu_table_base(dev) == NULL || 304 307 !dma_iommu_dma_supported(dev, mask)) 305 308 return -EIO;
+1 -1
arch/powerpc/kernel/pci-common.c
··· 60 60 EXPORT_SYMBOL(isa_mem_base); 61 61 62 62 63 - static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 63 + static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops; 64 64 65 65 void set_pci_dma_ops(const struct dma_map_ops *dma_ops) 66 66 {
+1 -1
arch/powerpc/kernel/setup-common.c
··· 780 780 { 781 781 pdev->archdata.dma_mask = DMA_BIT_MASK(32); 782 782 pdev->dev.dma_mask = &pdev->archdata.dma_mask; 783 - set_dma_ops(&pdev->dev, &dma_direct_ops); 783 + set_dma_ops(&pdev->dev, &dma_nommu_ops); 784 784 } 785 785 786 786 static __init void print_system_info(void)
+14 -14
arch/powerpc/platforms/cell/iommu.c
··· 541 541 return NULL; 542 542 } 543 543 544 - static unsigned long cell_dma_direct_offset; 544 + static unsigned long cell_dma_nommu_offset; 545 545 546 546 static unsigned long dma_iommu_fixed_base; 547 547 ··· 580 580 device_to_mask(dev), flag, 581 581 dev_to_node(dev)); 582 582 else 583 - return dma_direct_ops.alloc(dev, size, dma_handle, flag, 583 + return dma_nommu_ops.alloc(dev, size, dma_handle, flag, 584 584 attrs); 585 585 } 586 586 ··· 592 592 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, 593 593 dma_handle); 594 594 else 595 - dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs); 595 + dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs); 596 596 } 597 597 598 598 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, ··· 601 601 unsigned long attrs) 602 602 { 603 603 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 604 - return dma_direct_ops.map_page(dev, page, offset, size, 604 + return dma_nommu_ops.map_page(dev, page, offset, size, 605 605 direction, attrs); 606 606 else 607 607 return iommu_map_page(dev, cell_get_iommu_table(dev), page, ··· 614 614 unsigned long attrs) 615 615 { 616 616 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 617 - dma_direct_ops.unmap_page(dev, dma_addr, size, direction, 617 + dma_nommu_ops.unmap_page(dev, dma_addr, size, direction, 618 618 attrs); 619 619 else 620 620 iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size, ··· 626 626 unsigned long attrs) 627 627 { 628 628 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 629 - return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); 629 + return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs); 630 630 else 631 631 return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, 632 632 nents, device_to_mask(dev), ··· 638 638 unsigned long attrs) 639 639 { 640 640 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 641 - dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); 641 + dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs); 642 642 else 643 643 ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, 644 644 direction, attrs); ··· 661 661 { 662 662 if (get_pci_dma_ops() == &dma_iommu_ops) 663 663 set_iommu_table_base(dev, cell_get_iommu_table(dev)); 664 - else if (get_pci_dma_ops() == &dma_direct_ops) 665 - set_dma_offset(dev, cell_dma_direct_offset); 664 + else if (get_pci_dma_ops() == &dma_nommu_ops) 665 + set_dma_offset(dev, cell_dma_nommu_offset); 666 666 else 667 667 BUG(); 668 668 } ··· 810 810 unsigned long base = 0, size; 811 811 812 812 /* When no iommu is present, we use direct DMA ops */ 813 - set_pci_dma_ops(&dma_direct_ops); 813 + set_pci_dma_ops(&dma_nommu_ops); 814 814 815 815 /* First make sure all IOC translation is turned off */ 816 816 cell_disable_iommus(); 817 817 818 818 /* If we have no Axon, we set up the spider DMA magic offset */ 819 819 if (of_find_node_by_name(NULL, "axon") == NULL) 820 - cell_dma_direct_offset = SPIDER_DMA_OFFSET; 820 + cell_dma_nommu_offset = SPIDER_DMA_OFFSET; 821 821 822 822 /* Now we need to check to see where the memory is mapped 823 823 * in PCI space. We assume that all busses use the same dma ··· 851 851 return -ENODEV; 852 852 } 853 853 854 - cell_dma_direct_offset += base; 854 + cell_dma_nommu_offset += base; 855 855 856 - if (cell_dma_direct_offset != 0) 856 + if (cell_dma_nommu_offset != 0) 857 857 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup; 858 858 859 859 printk("iommu: disabled, direct DMA offset is 0x%lx\n", 860 - cell_dma_direct_offset); 860 + cell_dma_nommu_offset); 861 861 862 862 return 0; 863 863 }
+1 -1
arch/powerpc/platforms/pasemi/iommu.c
··· 186 186 */ 187 187 if (dev->vendor == 0x1959 && dev->device == 0xa007 && 188 188 !firmware_has_feature(FW_FEATURE_LPAR)) { 189 - dev->dev.dma_ops = &dma_direct_ops; 189 + dev->dev.dma_ops = &dma_nommu_ops; 190 190 /* 191 191 * Set the coherent DMA mask to prevent the iommu 192 192 * being used unnecessarily
+1 -1
arch/powerpc/platforms/pasemi/setup.c
··· 363 363 return 0; 364 364 365 365 /* We use the direct ops for localbus */ 366 - dev->dma_ops = &dma_direct_ops; 366 + dev->dma_ops = &dma_nommu_ops; 367 367 368 368 return 0; 369 369 }
+2 -2
arch/powerpc/platforms/powernv/pci-ioda.c
··· 1850 1850 1851 1851 if (bypass) { 1852 1852 dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n"); 1853 - set_dma_ops(&pdev->dev, &dma_direct_ops); 1853 + set_dma_ops(&pdev->dev, &dma_nommu_ops); 1854 1854 } else { 1855 1855 /* 1856 1856 * If the device can't set the TCE bypass bit but still wants ··· 1868 1868 return rc; 1869 1869 /* 4GB offset bypasses 32-bit space */ 1870 1870 set_dma_offset(&pdev->dev, (1ULL << 32)); 1871 - set_dma_ops(&pdev->dev, &dma_direct_ops); 1871 + set_dma_ops(&pdev->dev, &dma_nommu_ops); 1872 1872 } else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) { 1873 1873 /* 1874 1874 * Fail the request if a DMA mask between 32 and 64 bits
+1 -1
arch/powerpc/platforms/pseries/iommu.c
··· 1231 1231 if (dma_offset != 0) { 1232 1232 dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); 1233 1233 set_dma_offset(dev, dma_offset); 1234 - set_dma_ops(dev, &dma_direct_ops); 1234 + set_dma_ops(dev, &dma_nommu_ops); 1235 1235 ddw_enabled = true; 1236 1236 } 1237 1237 }
+1 -1
arch/powerpc/platforms/pseries/vio.c
··· 618 618 static const struct dma_map_ops vio_dma_mapping_ops = { 619 619 .alloc = vio_dma_iommu_alloc_coherent, 620 620 .free = vio_dma_iommu_free_coherent, 621 - .mmap = dma_direct_mmap_coherent, 621 + .mmap = dma_nommu_mmap_coherent, 622 622 .map_sg = vio_dma_iommu_map_sg, 623 623 .unmap_sg = vio_dma_iommu_unmap_sg, 624 624 .map_page = vio_dma_iommu_map_page,
+2 -2
arch/powerpc/sysdev/dart_iommu.c
··· 402 402 */ 403 403 if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) { 404 404 dev_info(dev, "Using 64-bit DMA iommu bypass\n"); 405 - set_dma_ops(dev, &dma_direct_ops); 405 + set_dma_ops(dev, &dma_nommu_ops); 406 406 } else { 407 407 dev_info(dev, "Using 32-bit DMA via iommu\n"); 408 408 set_dma_ops(dev, &dma_iommu_ops); ··· 446 446 controller_ops->dma_bus_setup = NULL; 447 447 448 448 /* Setup pci_dma ops */ 449 - set_pci_dma_ops(&dma_direct_ops); 449 + set_pci_dma_ops(&dma_nommu_ops); 450 450 } 451 451 452 452 #ifdef CONFIG_PM
+2 -2
arch/powerpc/sysdev/fsl_pci.c
··· 118 118 { 119 119 if (ppc_swiotlb_enable) { 120 120 hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb; 121 - set_pci_dma_ops(&swiotlb_dma_ops); 121 + set_pci_dma_ops(&powerpc_swiotlb_dma_ops); 122 122 } 123 123 } 124 124 #else ··· 135 135 * mapping that allows addressing any RAM address from across PCI. 136 136 */ 137 137 if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) { 138 - set_dma_ops(dev, &dma_direct_ops); 138 + set_dma_ops(dev, &dma_nommu_ops); 139 139 set_dma_offset(dev, pci64_dma_offset); 140 140 } 141 141
+1 -1
arch/riscv/Kconfig
··· 83 83 config HAVE_KPROBES 84 84 def_bool n 85 85 86 - config DMA_NOOP_OPS 86 + config DMA_DIRECT_OPS 87 87 def_bool y 88 88 89 89 menu "Platform type"
+1
arch/riscv/include/asm/Kbuild
··· 7 7 generic-y += div64.h 8 8 generic-y += dma.h 9 9 generic-y += dma-contiguous.h 10 + generic-y += dma-mapping.h 10 11 generic-y += emergency-restart.h 11 12 generic-y += errno.h 12 13 generic-y += exec.h
-38
arch/riscv/include/asm/dma-mapping.h
··· 1 - /* 2 - * Copyright (C) 2003-2004 Hewlett-Packard Co 3 - * David Mosberger-Tang <davidm@hpl.hp.com> 4 - * Copyright (C) 2012 ARM Ltd. 5 - * Copyright (C) 2016 SiFive, Inc. 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License version 2 as 9 - * published by the Free Software Foundation. 10 - * 11 - * This program is distributed in the hope that it will be useful, 12 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 - * GNU General Public License for more details. 15 - * 16 - * You should have received a copy of the GNU General Public License 17 - * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 - */ 19 - #ifndef __ASM_RISCV_DMA_MAPPING_H 20 - #define __ASM_RISCV_DMA_MAPPING_H 21 - 22 - /* Use ops->dma_mapping_error (if it exists) or assume success */ 23 - // #undef DMA_ERROR_CODE 24 - 25 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 26 - { 27 - return &dma_noop_ops; 28 - } 29 - 30 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 31 - { 32 - if (!dev->dma_mask) 33 - return false; 34 - 35 - return addr + size - 1 <= *dev->dma_mask; 36 - } 37 - 38 - #endif /* __ASM_RISCV_DMA_MAPPING_H */
+1 -1
arch/s390/Kconfig
··· 140 140 select HAVE_DEBUG_KMEMLEAK 141 141 select HAVE_DMA_API_DEBUG 142 142 select HAVE_DMA_CONTIGUOUS 143 - select DMA_NOOP_OPS 143 + select DMA_DIRECT_OPS 144 144 select HAVE_DYNAMIC_FTRACE 145 145 select HAVE_DYNAMIC_FTRACE_WITH_REGS 146 146 select HAVE_EFFICIENT_UNALIGNED_ACCESS
+1
arch/s390/include/asm/Kbuild
··· 4 4 generic-y += clkdev.h 5 5 generic-y += device.h 6 6 generic-y += dma-contiguous.h 7 + generic-y += dma-mapping.h 7 8 generic-y += div64.h 8 9 generic-y += emergency-restart.h 9 10 generic-y += export.h
-26
arch/s390/include/asm/dma-mapping.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_S390_DMA_MAPPING_H 3 - #define _ASM_S390_DMA_MAPPING_H 4 - 5 - #include <linux/kernel.h> 6 - #include <linux/types.h> 7 - #include <linux/mm.h> 8 - #include <linux/scatterlist.h> 9 - #include <linux/dma-debug.h> 10 - #include <linux/io.h> 11 - 12 - extern const struct dma_map_ops s390_pci_dma_ops; 13 - 14 - static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 15 - { 16 - return &dma_noop_ops; 17 - } 18 - 19 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 20 - { 21 - if (!dev->dma_mask) 22 - return false; 23 - return addr + size - 1 <= *dev->dma_mask; 24 - } 25 - 26 - #endif /* _ASM_S390_DMA_MAPPING_H */
+3
arch/s390/include/asm/pci_dma.h
··· 201 201 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr); 202 202 void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags); 203 203 204 + extern const struct dma_map_ops s390_pci_dma_ops; 205 + 206 + 204 207 #endif
+2 -1
arch/tile/Kconfig
··· 249 249 250 250 If unsure, say "true". 251 251 252 - config ZONE_DMA 252 + config ZONE_DMA32 253 253 def_bool y 254 254 255 255 config IOMMU_HELPER ··· 261 261 config SWIOTLB 262 262 bool 263 263 default TILEGX 264 + select DMA_DIRECT_OPS 264 265 select IOMMU_HELPER 265 266 select NEED_SG_DMA_LENGTH 266 267 select ARCH_HAS_DMA_SET_COHERENT_MASK
-20
arch/tile/include/asm/dma-mapping.h
··· 44 44 dev->archdata.dma_offset = off; 45 45 } 46 46 47 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 48 - { 49 - return paddr; 50 - } 51 - 52 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 53 - { 54 - return daddr; 55 - } 56 - 57 - static inline void dma_mark_clean(void *addr, size_t size) {} 58 - 59 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 60 - { 61 - if (!dev->dma_mask) 62 - return 0; 63 - 64 - return addr + size - 1 <= *dev->dma_mask; 65 - } 66 - 67 47 #define HAVE_ARCH_DMA_SET_MASK 1 68 48 int dma_set_mask(struct device *dev, u64 mask); 69 49
+4 -34
arch/tile/kernel/pci-dma.c
··· 54 54 * which case we will return NULL. But such devices are uncommon. 55 55 */ 56 56 if (dma_mask <= DMA_BIT_MASK(32)) { 57 - gfp |= GFP_DMA; 57 + gfp |= GFP_DMA32; 58 58 node = 0; 59 59 } 60 60 ··· 509 509 /* PCI DMA mapping functions for legacy PCI devices */ 510 510 511 511 #ifdef CONFIG_SWIOTLB 512 - static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, 513 - dma_addr_t *dma_handle, gfp_t gfp, 514 - unsigned long attrs) 515 - { 516 - gfp |= GFP_DMA; 517 - return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 518 - } 519 - 520 - static void tile_swiotlb_free_coherent(struct device *dev, size_t size, 521 - void *vaddr, dma_addr_t dma_addr, 522 - unsigned long attrs) 523 - { 524 - swiotlb_free_coherent(dev, size, vaddr, dma_addr); 525 - } 526 - 527 - static const struct dma_map_ops pci_swiotlb_dma_ops = { 528 - .alloc = tile_swiotlb_alloc_coherent, 529 - .free = tile_swiotlb_free_coherent, 530 - .map_page = swiotlb_map_page, 531 - .unmap_page = swiotlb_unmap_page, 532 - .map_sg = swiotlb_map_sg_attrs, 533 - .unmap_sg = swiotlb_unmap_sg_attrs, 534 - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 535 - .sync_single_for_device = swiotlb_sync_single_for_device, 536 - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 537 - .sync_sg_for_device = swiotlb_sync_sg_for_device, 538 - .dma_supported = swiotlb_dma_supported, 539 - .mapping_error = swiotlb_dma_mapping_error, 540 - }; 541 - 542 512 static const struct dma_map_ops pci_hybrid_dma_ops = { 543 - .alloc = tile_swiotlb_alloc_coherent, 544 - .free = tile_swiotlb_free_coherent, 513 + .alloc = swiotlb_alloc, 514 + .free = swiotlb_free, 545 515 .map_page = tile_pci_dma_map_page, 546 516 .unmap_page = tile_pci_dma_unmap_page, 547 517 .map_sg = tile_pci_dma_map_sg, ··· 522 552 .sync_sg_for_device = tile_pci_dma_sync_sg_for_device, 523 553 }; 524 554 525 - const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; 555 + const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &swiotlb_dma_ops; 526 556 const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; 527 557 #else 528 558 const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+4 -4
arch/tile/kernel/setup.c
··· 814 814 #endif 815 815 816 816 if (start < dma_end) { 817 - zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL], 817 + zones_size[ZONE_DMA32] = min(zones_size[ZONE_NORMAL], 818 818 dma_end - start); 819 - zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA]; 819 + zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA32]; 820 820 } else { 821 - zones_size[ZONE_DMA] = 0; 821 + zones_size[ZONE_DMA32] = 0; 822 822 } 823 823 824 824 /* Take zone metadata from controller 0 if we're isolnode. */ ··· 830 830 PFN_UP(node_percpu[i])); 831 831 832 832 /* Track the type of memory on each node */ 833 - if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA]) 833 + if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA32]) 834 834 node_set_state(i, N_NORMAL_MEMORY); 835 835 #ifdef CONFIG_HIGHMEM 836 836 if (end != start)
+1 -28
arch/unicore32/include/asm/dma-mapping.h
··· 12 12 #ifndef __UNICORE_DMA_MAPPING_H__ 13 13 #define __UNICORE_DMA_MAPPING_H__ 14 14 15 - #ifdef __KERNEL__ 16 - 17 - #include <linux/mm_types.h> 18 - #include <linux/scatterlist.h> 19 15 #include <linux/swiotlb.h> 20 - 21 - extern const struct dma_map_ops swiotlb_dma_map_ops; 22 16 23 17 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 18 { 25 - return &swiotlb_dma_map_ops; 19 + return &swiotlb_dma_ops; 26 20 } 27 21 28 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 29 - { 30 - if (dev && dev->dma_mask) 31 - return addr + size - 1 <= *dev->dma_mask; 32 - 33 - return 1; 34 - } 35 - 36 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 37 - { 38 - return paddr; 39 - } 40 - 41 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 42 - { 43 - return daddr; 44 - } 45 - 46 - static inline void dma_mark_clean(void *addr, size_t size) {} 47 - 48 - #endif /* __KERNEL__ */ 49 22 #endif
+1
arch/unicore32/mm/Kconfig
··· 42 42 43 43 config SWIOTLB 44 44 def_bool y 45 + select DMA_DIRECT_OPS 45 46 46 47 config IOMMU_HELPER 47 48 def_bool SWIOTLB
-2
arch/unicore32/mm/Makefile
··· 6 6 obj-y := extable.o fault.o init.o pgd.o mmu.o 7 7 obj-y += flush.o ioremap.o 8 8 9 - obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o 10 - 11 9 obj-$(CONFIG_MODULES) += proc-syms.o 12 10 13 11 obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
-48
arch/unicore32/mm/dma-swiotlb.c
··· 1 - /* 2 - * Contains routines needed to support swiotlb for UniCore32. 3 - * 4 - * Copyright (C) 2010 Guan Xuetao 5 - * 6 - * This program is free software; you can redistribute it and/or modify it 7 - * under the terms of the GNU General Public License as published by the 8 - * Free Software Foundation; either version 2 of the License, or (at your 9 - * option) any later version. 10 - */ 11 - #include <linux/pci.h> 12 - #include <linux/cache.h> 13 - #include <linux/module.h> 14 - #include <linux/dma-mapping.h> 15 - #include <linux/swiotlb.h> 16 - #include <linux/bootmem.h> 17 - 18 - #include <asm/dma.h> 19 - 20 - static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, 21 - dma_addr_t *dma_handle, gfp_t flags, 22 - unsigned long attrs) 23 - { 24 - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 25 - } 26 - 27 - static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, 28 - void *vaddr, dma_addr_t dma_addr, 29 - unsigned long attrs) 30 - { 31 - swiotlb_free_coherent(dev, size, vaddr, dma_addr); 32 - } 33 - 34 - const struct dma_map_ops swiotlb_dma_map_ops = { 35 - .alloc = unicore_swiotlb_alloc_coherent, 36 - .free = unicore_swiotlb_free_coherent, 37 - .map_sg = swiotlb_map_sg_attrs, 38 - .unmap_sg = swiotlb_unmap_sg_attrs, 39 - .dma_supported = swiotlb_dma_supported, 40 - .map_page = swiotlb_map_page, 41 - .unmap_page = swiotlb_unmap_page, 42 - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 43 - .sync_single_for_device = swiotlb_sync_single_for_device, 44 - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 45 - .sync_sg_for_device = swiotlb_sync_sg_for_device, 46 - .mapping_error = swiotlb_dma_mapping_error, 47 - }; 48 - EXPORT_SYMBOL(swiotlb_dma_map_ops);
+1
arch/x86/Kconfig
··· 54 54 select ARCH_HAS_FORTIFY_SOURCE 55 55 select ARCH_HAS_GCOV_PROFILE_ALL 56 56 select ARCH_HAS_KCOV if X86_64 57 + select ARCH_HAS_PHYS_TO_DMA 57 58 select ARCH_HAS_PMEM_API if X86_64 58 59 select ARCH_HAS_REFCOUNT 59 60 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
+30
arch/x86/include/asm/dma-direct.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef ASM_X86_DMA_DIRECT_H 3 + #define ASM_X86_DMA_DIRECT_H 1 4 + 5 + #include <linux/mem_encrypt.h> 6 + 7 + #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 8 + bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 9 + dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 10 + phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 11 + #else 12 + static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 13 + { 14 + if (!dev->dma_mask) 15 + return 0; 16 + 17 + return addr + size - 1 <= *dev->dma_mask; 18 + } 19 + 20 + static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 21 + { 22 + return __sme_set(paddr); 23 + } 24 + 25 + static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 26 + { 27 + return __sme_clr(daddr); 28 + } 29 + #endif /* CONFIG_X86_DMA_REMAP */ 30 + #endif /* ASM_X86_DMA_DIRECT_H */
+3 -26
arch/x86/include/asm/dma-mapping.h
··· 12 12 #include <asm/io.h> 13 13 #include <asm/swiotlb.h> 14 14 #include <linux/dma-contiguous.h> 15 - #include <linux/mem_encrypt.h> 16 15 17 16 #ifdef CONFIG_ISA 18 17 # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) ··· 30 31 return dma_ops; 31 32 } 32 33 34 + int arch_dma_supported(struct device *dev, u64 mask); 35 + #define arch_dma_supported arch_dma_supported 36 + 33 37 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); 34 38 #define arch_dma_alloc_attrs arch_dma_alloc_attrs 35 39 ··· 43 41 extern void dma_generic_free_coherent(struct device *dev, size_t size, 44 42 void *vaddr, dma_addr_t dma_addr, 45 43 unsigned long attrs); 46 - 47 - #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 48 - extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 49 - extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 50 - extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 51 - #else 52 - 53 - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 54 - { 55 - if (!dev->dma_mask) 56 - return 0; 57 - 58 - return addr + size - 1 <= *dev->dma_mask; 59 - } 60 - 61 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 62 - { 63 - return __sme_set(paddr); 64 - } 65 - 66 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 67 - { 68 - return __sme_clr(daddr); 69 - } 70 - #endif /* CONFIG_X86_DMA_REMAP */ 71 44 72 45 static inline unsigned long dma_alloc_coherent_mask(struct device *dev, 73 46 gfp_t gfp)
-2
arch/x86/include/asm/swiotlb.h
··· 28 28 } 29 29 #endif 30 30 31 - static inline void dma_mark_clean(void *addr, size_t size) {} 32 - 33 31 extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 34 32 dma_addr_t *dma_handle, gfp_t flags, 35 33 unsigned long attrs);
+1
arch/x86/kernel/amd_gart_64.c
··· 31 31 #include <linux/io.h> 32 32 #include <linux/gfp.h> 33 33 #include <linux/atomic.h> 34 + #include <linux/dma-direct.h> 34 35 #include <asm/mtrr.h> 35 36 #include <asm/pgtable.h> 36 37 #include <asm/proto.h>
+13 -10
arch/x86/kernel/pci-dma.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 - #include <linux/dma-mapping.h> 2 + #include <linux/dma-direct.h> 3 3 #include <linux/dma-debug.h> 4 4 #include <linux/dmar.h> 5 5 #include <linux/export.h> ··· 87 87 88 88 dma_mask = dma_alloc_coherent_mask(dev, flag); 89 89 90 - flag &= ~__GFP_ZERO; 91 90 again: 92 91 page = NULL; 93 92 /* CMA can be used only in the context which permits sleeping */ ··· 138 139 if (!*dev) 139 140 *dev = &x86_dma_fallback_dev; 140 141 141 - *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 142 142 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); 143 143 144 144 if (!is_device_dma_capable(*dev)) ··· 215 217 } 216 218 early_param("iommu", iommu_setup); 217 219 218 - int x86_dma_supported(struct device *dev, u64 mask) 220 + int arch_dma_supported(struct device *dev, u64 mask) 219 221 { 220 222 #ifdef CONFIG_PCI 221 223 if (mask > 0xffffffff && forbid_dac > 0) { ··· 223 225 return 0; 224 226 } 225 227 #endif 226 - 227 - /* Copied from i386. Doesn't make much sense, because it will 228 - only work for pci_alloc_coherent. 229 - The caller just has to use GFP_DMA in this case. */ 230 - if (mask < DMA_BIT_MASK(24)) 231 - return 0; 232 228 233 229 /* Tell the device to use SAC when IOMMU force is on. This 234 230 allows the driver to use cheaper accesses in some cases. ··· 241 249 return 0; 242 250 } 243 251 252 + return 1; 253 + } 254 + EXPORT_SYMBOL(arch_dma_supported); 255 + 256 + int x86_dma_supported(struct device *dev, u64 mask) 257 + { 258 + /* Copied from i386. Doesn't make much sense, because it will 259 + only work for pci_alloc_coherent. 260 + The caller just has to use GFP_DMA in this case. */ 261 + if (mask < DMA_BIT_MASK(24)) 262 + return 0; 244 263 return 1; 245 264 } 246 265
+1 -1
arch/x86/kernel/pci-nommu.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 /* Fallback functions when the main IOMMU code is not compiled in. This 3 3 code is roughly equivalent to i386. */ 4 - #include <linux/dma-mapping.h> 4 + #include <linux/dma-direct.h> 5 5 #include <linux/scatterlist.h> 6 6 #include <linux/string.h> 7 7 #include <linux/gfp.h>
+4 -4
arch/x86/kernel/pci-swiotlb.c
··· 6 6 #include <linux/init.h> 7 7 #include <linux/swiotlb.h> 8 8 #include <linux/bootmem.h> 9 - #include <linux/dma-mapping.h> 9 + #include <linux/dma-direct.h> 10 10 #include <linux/mem_encrypt.h> 11 11 12 12 #include <asm/iommu.h> ··· 48 48 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); 49 49 } 50 50 51 - static const struct dma_map_ops swiotlb_dma_ops = { 51 + static const struct dma_map_ops x86_swiotlb_dma_ops = { 52 52 .mapping_error = swiotlb_dma_mapping_error, 53 53 .alloc = x86_swiotlb_alloc_coherent, 54 54 .free = x86_swiotlb_free_coherent, ··· 112 112 { 113 113 if (swiotlb) { 114 114 swiotlb_init(0); 115 - dma_ops = &swiotlb_dma_ops; 115 + dma_ops = &x86_swiotlb_dma_ops; 116 116 } 117 117 } 118 118 ··· 120 120 { 121 121 /* An IOMMU turned us off. */ 122 122 if (!swiotlb) 123 - swiotlb_free(); 123 + swiotlb_exit(); 124 124 else { 125 125 printk(KERN_INFO "PCI-DMA: " 126 126 "Using software bounce buffering for IO (SWIOTLB)\n");
+1 -1
arch/x86/mm/mem_encrypt.c
··· 15 15 #include <linux/linkage.h> 16 16 #include <linux/init.h> 17 17 #include <linux/mm.h> 18 - #include <linux/dma-mapping.h> 18 + #include <linux/dma-direct.h> 19 19 #include <linux/swiotlb.h> 20 20 #include <linux/mem_encrypt.h> 21 21
+1
arch/x86/pci/sta2x11-fixup.c
··· 26 26 #include <linux/pci_ids.h> 27 27 #include <linux/export.h> 28 28 #include <linux/list.h> 29 + #include <linux/dma-direct.h> 29 30 #include <asm/iommu.h> 30 31 31 32 #define STA2X11_SWIOTLB_SIZE (4*1024*1024)
-10
arch/xtensa/include/asm/dma-mapping.h
··· 23 23 return &xtensa_dma_map_ops; 24 24 } 25 25 26 - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 27 - { 28 - return (dma_addr_t)paddr; 29 - } 30 - 31 - static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 32 - { 33 - return (phys_addr_t)daddr; 34 - } 35 - 36 26 #endif /* _XTENSA_DMA_MAPPING_H */
+1
drivers/crypto/marvell/cesa.c
··· 24 24 #include <linux/scatterlist.h> 25 25 #include <linux/slab.h> 26 26 #include <linux/module.h> 27 + #include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */ 27 28 #include <linux/clk.h> 28 29 #include <linux/of.h> 29 30 #include <linux/of_platform.h>
+1 -1
drivers/iommu/intel-iommu.c
··· 4808 4808 up_write(&dmar_global_lock); 4809 4809 pr_info("Intel(R) Virtualization Technology for Directed I/O\n"); 4810 4810 4811 - #ifdef CONFIG_SWIOTLB 4811 + #if defined(CONFIG_X86) && defined(CONFIG_SWIOTLB) 4812 4812 swiotlb = 0; 4813 4813 #endif 4814 4814 dma_ops = &intel_dma_ops;
+1 -1
drivers/misc/cxl/vphb.c
··· 54 54 return false; 55 55 } 56 56 57 - set_dma_ops(&dev->dev, &dma_direct_ops); 57 + set_dma_ops(&dev->dev, &dma_nommu_ops); 58 58 set_dma_offset(&dev->dev, PAGE_OFFSET); 59 59 60 60 return _cxl_pci_associate_default_context(dev, afu);
+1
drivers/mtd/nand/qcom_nandc.c
··· 23 23 #include <linux/of_device.h> 24 24 #include <linux/delay.h> 25 25 #include <linux/dma/qcom_bam_dma.h> 26 + #include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */ 26 27 27 28 /* NANDc reg offsets */ 28 29 #define NAND_FLASH_CMD 0x00
+1 -1
drivers/xen/swiotlb-xen.c
··· 36 36 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt 37 37 38 38 #include <linux/bootmem.h> 39 - #include <linux/dma-mapping.h> 39 + #include <linux/dma-direct.h> 40 40 #include <linux/export.h> 41 41 #include <xen/swiotlb-xen.h> 42 42 #include <xen/page.h>
+10
include/asm-generic/dma-mapping.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_GENERIC_DMA_MAPPING_H 3 + #define _ASM_GENERIC_DMA_MAPPING_H 4 + 5 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 6 + { 7 + return &dma_direct_ops; 8 + } 9 + 10 + #endif /* _ASM_GENERIC_DMA_MAPPING_H */
+47
include/linux/dma-direct.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_DMA_DIRECT_H 3 + #define _LINUX_DMA_DIRECT_H 1 4 + 5 + #include <linux/dma-mapping.h> 6 + 7 + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA 8 + #include <asm/dma-direct.h> 9 + #else 10 + static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 11 + { 12 + dma_addr_t dev_addr = (dma_addr_t)paddr; 13 + 14 + return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 15 + } 16 + 17 + static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) 18 + { 19 + phys_addr_t paddr = (phys_addr_t)dev_addr; 20 + 21 + return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); 22 + } 23 + 24 + static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 25 + { 26 + if (!dev->dma_mask) 27 + return false; 28 + 29 + return addr + size - 1 <= *dev->dma_mask; 30 + } 31 + #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ 32 + 33 + #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN 34 + void dma_mark_clean(void *addr, size_t size); 35 + #else 36 + static inline void dma_mark_clean(void *addr, size_t size) 37 + { 38 + } 39 + #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ 40 + 41 + void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 42 + gfp_t gfp, unsigned long attrs); 43 + void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 44 + dma_addr_t dma_addr, unsigned long attrs); 45 + int dma_direct_supported(struct device *dev, u64 mask); 46 + 47 + #endif /* _LINUX_DMA_DIRECT_H */
+21 -2
include/linux/dma-mapping.h
··· 136 136 int is_phys; 137 137 }; 138 138 139 - extern const struct dma_map_ops dma_noop_ops; 139 + extern const struct dma_map_ops dma_direct_ops; 140 140 extern const struct dma_map_ops dma_virt_ops; 141 141 142 142 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) ··· 513 513 void *cpu_addr; 514 514 515 515 BUG_ON(!ops); 516 + WARN_ON_ONCE(dev && !dev->coherent_dma_mask); 516 517 517 518 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) 518 519 return cpu_addr; 520 + 521 + /* 522 + * Let the implementation decide on the zone to allocate from, and 523 + * decide on the way of zeroing the memory given that the memory 524 + * returned should always be zeroed. 525 + */ 526 + flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO); 519 527 520 528 if (!arch_dma_alloc_attrs(&dev, &flag)) 521 529 return NULL; ··· 576 568 return 0; 577 569 } 578 570 571 + /* 572 + * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please 573 + * don't use this is new code. 574 + */ 575 + #ifndef arch_dma_supported 576 + #define arch_dma_supported(dev, mask) (1) 577 + #endif 578 + 579 579 static inline void dma_check_mask(struct device *dev, u64 mask) 580 580 { 581 581 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) ··· 596 580 597 581 if (!ops) 598 582 return 0; 583 + if (!arch_dma_supported(dev, mask)) 584 + return 0; 585 + 599 586 if (!ops->dma_supported) 600 587 return 1; 601 588 return ops->dma_supported(dev, mask); ··· 711 692 #ifndef dma_max_pfn 712 693 static inline unsigned long dma_max_pfn(struct device *dev) 713 694 { 714 - return *dev->dma_mask >> PAGE_SHIFT; 695 + return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; 715 696 } 716 697 #endif 717 698
+10 -2
include/linux/swiotlb.h
··· 66 66 enum dma_sync_target target); 67 67 68 68 /* Accessory functions. */ 69 + 70 + void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, 71 + gfp_t flags, unsigned long attrs); 72 + void swiotlb_free(struct device *dev, size_t size, void *vaddr, 73 + dma_addr_t dma_addr, unsigned long attrs); 74 + 69 75 extern void 70 76 *swiotlb_alloc_coherent(struct device *hwdev, size_t size, 71 77 dma_addr_t *dma_handle, gfp_t flags); ··· 121 115 swiotlb_dma_supported(struct device *hwdev, u64 mask); 122 116 123 117 #ifdef CONFIG_SWIOTLB 124 - extern void __init swiotlb_free(void); 118 + extern void __init swiotlb_exit(void); 125 119 unsigned int swiotlb_max_segment(void); 126 120 #else 127 - static inline void swiotlb_free(void) { } 121 + static inline void swiotlb_exit(void) { } 128 122 static inline unsigned int swiotlb_max_segment(void) { return 0; } 129 123 #endif 130 124 131 125 extern void swiotlb_print_info(void); 132 126 extern int is_swiotlb_buffer(phys_addr_t paddr); 133 127 extern void swiotlb_set_max_segment(unsigned int); 128 + 129 + extern const struct dma_map_ops swiotlb_dma_ops; 134 130 135 131 #endif /* __LINUX_SWIOTLB_H */
+1 -1
lib/Kconfig
··· 413 413 bool 414 414 default n 415 415 416 - config DMA_NOOP_OPS 416 + config DMA_DIRECT_OPS 417 417 bool 418 418 depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) 419 419 default n
+1 -1
lib/Makefile
··· 28 28 29 29 lib-$(CONFIG_MMU) += ioremap.o 30 30 lib-$(CONFIG_SMP) += cpumask.o 31 - lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o 31 + lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o 32 32 lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o 33 33 34 34 lib-y += kobject.o klist.o
+156
lib/dma-direct.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * DMA operations that map physical memory directly without using an IOMMU or 4 + * flushing caches. 5 + */ 6 + #include <linux/export.h> 7 + #include <linux/mm.h> 8 + #include <linux/dma-direct.h> 9 + #include <linux/scatterlist.h> 10 + #include <linux/dma-contiguous.h> 11 + #include <linux/pfn.h> 12 + 13 + #define DIRECT_MAPPING_ERROR 0 14 + 15 + /* 16 + * Most architectures use ZONE_DMA for the first 16 Megabytes, but 17 + * some use it for entirely different regions: 18 + */ 19 + #ifndef ARCH_ZONE_DMA_BITS 20 + #define ARCH_ZONE_DMA_BITS 24 21 + #endif 22 + 23 + static bool 24 + check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, 25 + const char *caller) 26 + { 27 + if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { 28 + if (*dev->dma_mask >= DMA_BIT_MASK(32)) { 29 + dev_err(dev, 30 + "%s: overflow %pad+%zu of device mask %llx\n", 31 + caller, &dma_addr, size, *dev->dma_mask); 32 + } 33 + return false; 34 + } 35 + return true; 36 + } 37 + 38 + static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 39 + { 40 + return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; 41 + } 42 + 43 + void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 44 + gfp_t gfp, unsigned long attrs) 45 + { 46 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 47 + int page_order = get_order(size); 48 + struct page *page = NULL; 49 + 50 + /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ 51 + if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 52 + gfp |= GFP_DMA; 53 + if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) 54 + gfp |= GFP_DMA32; 55 + 56 + again: 57 + /* CMA can be used only in the context which permits sleeping */ 58 + if (gfpflags_allow_blocking(gfp)) { 59 + page = dma_alloc_from_contiguous(dev, count, page_order, gfp); 60 + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 61 + dma_release_from_contiguous(dev, page, count); 62 + page = NULL; 63 + } 64 + } 65 + if (!page) 66 + page = alloc_pages_node(dev_to_node(dev), gfp, page_order); 67 + 68 + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 69 + __free_pages(page, page_order); 70 + page = NULL; 71 + 72 + if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 73 + !(gfp & GFP_DMA)) { 74 + gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 75 + goto again; 76 + } 77 + } 78 + 79 + if (!page) 80 + return NULL; 81 + 82 + *dma_handle = phys_to_dma(dev, page_to_phys(page)); 83 + memset(page_address(page), 0, size); 84 + return page_address(page); 85 + } 86 + 87 + void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, 88 + dma_addr_t dma_addr, unsigned long attrs) 89 + { 90 + unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 91 + 92 + if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) 93 + free_pages((unsigned long)cpu_addr, get_order(size)); 94 + } 95 + 96 + static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 97 + unsigned long offset, size_t size, enum dma_data_direction dir, 98 + unsigned long attrs) 99 + { 100 + dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset; 101 + 102 + if (!check_addr(dev, dma_addr, size, __func__)) 103 + return DIRECT_MAPPING_ERROR; 104 + return dma_addr; 105 + } 106 + 107 + static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 108 + int nents, enum dma_data_direction dir, unsigned long attrs) 109 + { 110 + int i; 111 + struct scatterlist *sg; 112 + 113 + for_each_sg(sgl, sg, nents, i) { 114 + BUG_ON(!sg_page(sg)); 115 + 116 + sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); 117 + if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) 118 + return 0; 119 + sg_dma_len(sg) = sg->length; 120 + } 121 + 122 + return nents; 123 + } 124 + 125 + int dma_direct_supported(struct device *dev, u64 mask) 126 + { 127 + #ifdef CONFIG_ZONE_DMA 128 + if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 129 + return 0; 130 + #else 131 + /* 132 + * Because 32-bit DMA masks are so common we expect every architecture 133 + * to be able to satisfy them - either by not supporting more physical 134 + * memory, or by providing a ZONE_DMA32. If neither is the case, the 135 + * architecture needs to use an IOMMU instead of the direct mapping. 136 + */ 137 + if (mask < DMA_BIT_MASK(32)) 138 + return 0; 139 + #endif 140 + return 1; 141 + } 142 + 143 + static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) 144 + { 145 + return dma_addr == DIRECT_MAPPING_ERROR; 146 + } 147 + 148 + const struct dma_map_ops dma_direct_ops = { 149 + .alloc = dma_direct_alloc, 150 + .free = dma_direct_free, 151 + .map_page = dma_direct_map_page, 152 + .map_sg = dma_direct_map_sg, 153 + .dma_supported = dma_direct_supported, 154 + .mapping_error = dma_direct_mapping_error, 155 + }; 156 + EXPORT_SYMBOL(dma_direct_ops);
-68
lib/dma-noop.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* 3 - * lib/dma-noop.c 4 - * 5 - * DMA operations that map to physical addresses without flushing memory. 6 - */ 7 - #include <linux/export.h> 8 - #include <linux/mm.h> 9 - #include <linux/dma-mapping.h> 10 - #include <linux/scatterlist.h> 11 - #include <linux/pfn.h> 12 - 13 - static void *dma_noop_alloc(struct device *dev, size_t size, 14 - dma_addr_t *dma_handle, gfp_t gfp, 15 - unsigned long attrs) 16 - { 17 - void *ret; 18 - 19 - ret = (void *)__get_free_pages(gfp, get_order(size)); 20 - if (ret) 21 - *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); 22 - 23 - return ret; 24 - } 25 - 26 - static void dma_noop_free(struct device *dev, size_t size, 27 - void *cpu_addr, dma_addr_t dma_addr, 28 - unsigned long attrs) 29 - { 30 - free_pages((unsigned long)cpu_addr, get_order(size)); 31 - } 32 - 33 - static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, 34 - unsigned long offset, size_t size, 35 - enum dma_data_direction dir, 36 - unsigned long attrs) 37 - { 38 - return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset); 39 - } 40 - 41 - static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 42 - enum dma_data_direction dir, 43 - unsigned long attrs) 44 - { 45 - int i; 46 - struct scatterlist *sg; 47 - 48 - for_each_sg(sgl, sg, nents, i) { 49 - dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); 50 - void *va; 51 - 52 - BUG_ON(!sg_page(sg)); 53 - va = sg_virt(sg); 54 - sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset; 55 - sg_dma_len(sg) = sg->length; 56 - } 57 - 58 - return nents; 59 - } 60 - 61 - const struct dma_map_ops dma_noop_ops = { 62 - .alloc = dma_noop_alloc, 63 - .free = dma_noop_free, 64 - .map_page = dma_noop_map_page, 65 - .map_sg = dma_noop_map_sg, 66 - }; 67 - 68 - EXPORT_SYMBOL(dma_noop_ops);
+130 -81
lib/swiotlb.c
··· 18 18 */ 19 19 20 20 #include <linux/cache.h> 21 - #include <linux/dma-mapping.h> 21 + #include <linux/dma-direct.h> 22 22 #include <linux/mm.h> 23 23 #include <linux/export.h> 24 24 #include <linux/spinlock.h> ··· 417 417 return -ENOMEM; 418 418 } 419 419 420 - void __init swiotlb_free(void) 420 + void __init swiotlb_exit(void) 421 421 { 422 422 if (!io_tlb_orig_addr) 423 423 return; ··· 586 586 587 587 not_found: 588 588 spin_unlock_irqrestore(&io_tlb_lock, flags); 589 - if (printk_ratelimit()) 589 + if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) 590 590 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); 591 591 return SWIOTLB_MAP_ERROR; 592 592 found: ··· 605 605 606 606 return tlb_addr; 607 607 } 608 - EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 609 608 610 609 /* 611 610 * Allocates bounce buffer and returns its kernel virtual address. ··· 674 675 } 675 676 spin_unlock_irqrestore(&io_tlb_lock, flags); 676 677 } 677 - EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); 678 678 679 679 void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, 680 680 size_t size, enum dma_data_direction dir, ··· 705 707 BUG(); 706 708 } 707 709 } 708 - EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); 710 + 711 + static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, 712 + size_t size) 713 + { 714 + u64 mask = DMA_BIT_MASK(32); 715 + 716 + if (dev && dev->coherent_dma_mask) 717 + mask = dev->coherent_dma_mask; 718 + return addr + size - 1 <= mask; 719 + } 720 + 721 + static void * 722 + swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle, 723 + unsigned long attrs) 724 + { 725 + phys_addr_t phys_addr; 726 + 727 + if (swiotlb_force == SWIOTLB_NO_FORCE) 728 + goto out_warn; 729 + 730 + phys_addr = swiotlb_tbl_map_single(dev, 731 + swiotlb_phys_to_dma(dev, io_tlb_start), 732 + 0, size, DMA_FROM_DEVICE, 0); 733 + if (phys_addr == SWIOTLB_MAP_ERROR) 734 + goto out_warn; 735 + 736 + *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); 737 + if (dma_coherent_ok(dev, *dma_handle, size)) 738 + goto out_unmap; 739 + 740 + memset(phys_to_virt(phys_addr), 0, size); 741 + return phys_to_virt(phys_addr); 742 + 743 + out_unmap: 744 + dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 745 + (unsigned long long)(dev ? dev->coherent_dma_mask : 0), 746 + (unsigned long long)*dma_handle); 747 + 748 + /* 749 + * DMA_TO_DEVICE to avoid memcpy in unmap_single. 750 + * DMA_ATTR_SKIP_CPU_SYNC is optional. 751 + */ 752 + swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, 753 + DMA_ATTR_SKIP_CPU_SYNC); 754 + out_warn: 755 + if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { 756 + dev_warn(dev, 757 + "swiotlb: coherent allocation failed, size=%zu\n", 758 + size); 759 + dump_stack(); 760 + } 761 + return NULL; 762 + } 709 763 710 764 void * 711 765 swiotlb_alloc_coherent(struct device *hwdev, size_t size, 712 766 dma_addr_t *dma_handle, gfp_t flags) 713 767 { 714 - dma_addr_t dev_addr; 715 - void *ret; 716 768 int order = get_order(size); 717 - u64 dma_mask = DMA_BIT_MASK(32); 718 - 719 - if (hwdev && hwdev->coherent_dma_mask) 720 - dma_mask = hwdev->coherent_dma_mask; 769 + unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0; 770 + void *ret; 721 771 722 772 ret = (void *)__get_free_pages(flags, order); 723 773 if (ret) { 724 - dev_addr = swiotlb_virt_to_bus(hwdev, ret); 725 - if (dev_addr + size - 1 > dma_mask) { 726 - /* 727 - * The allocated memory isn't reachable by the device. 728 - */ 729 - free_pages((unsigned long) ret, order); 730 - ret = NULL; 774 + *dma_handle = swiotlb_virt_to_bus(hwdev, ret); 775 + if (dma_coherent_ok(hwdev, *dma_handle, size)) { 776 + memset(ret, 0, size); 777 + return ret; 731 778 } 732 - } 733 - if (!ret) { 734 - /* 735 - * We are either out of memory or the device can't DMA to 736 - * GFP_DMA memory; fall back on map_single(), which 737 - * will grab memory from the lowest available address range. 738 - */ 739 - phys_addr_t paddr = map_single(hwdev, 0, size, 740 - DMA_FROM_DEVICE, 0); 741 - if (paddr == SWIOTLB_MAP_ERROR) 742 - goto err_warn; 743 - 744 - ret = phys_to_virt(paddr); 745 - dev_addr = swiotlb_phys_to_dma(hwdev, paddr); 746 - 747 - /* Confirm address can be DMA'd by device */ 748 - if (dev_addr + size - 1 > dma_mask) { 749 - printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 750 - (unsigned long long)dma_mask, 751 - (unsigned long long)dev_addr); 752 - 753 - /* 754 - * DMA_TO_DEVICE to avoid memcpy in unmap_single. 755 - * The DMA_ATTR_SKIP_CPU_SYNC is optional. 756 - */ 757 - swiotlb_tbl_unmap_single(hwdev, paddr, 758 - size, DMA_TO_DEVICE, 759 - DMA_ATTR_SKIP_CPU_SYNC); 760 - goto err_warn; 761 - } 779 + free_pages((unsigned long)ret, order); 762 780 } 763 781 764 - *dma_handle = dev_addr; 765 - memset(ret, 0, size); 766 - 767 - return ret; 768 - 769 - err_warn: 770 - pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", 771 - dev_name(hwdev), size); 772 - dump_stack(); 773 - 774 - return NULL; 782 + return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs); 775 783 } 776 784 EXPORT_SYMBOL(swiotlb_alloc_coherent); 785 + 786 + static bool swiotlb_free_buffer(struct device *dev, size_t size, 787 + dma_addr_t dma_addr) 788 + { 789 + phys_addr_t phys_addr = dma_to_phys(dev, dma_addr); 790 + 791 + WARN_ON_ONCE(irqs_disabled()); 792 + 793 + if (!is_swiotlb_buffer(phys_addr)) 794 + return false; 795 + 796 + /* 797 + * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. 798 + * DMA_ATTR_SKIP_CPU_SYNC is optional. 799 + */ 800 + swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, 801 + DMA_ATTR_SKIP_CPU_SYNC); 802 + return true; 803 + } 777 804 778 805 void 779 806 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 780 807 dma_addr_t dev_addr) 781 808 { 782 - phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 783 - 784 - WARN_ON(irqs_disabled()); 785 - if (!is_swiotlb_buffer(paddr)) 809 + if (!swiotlb_free_buffer(hwdev, size, dev_addr)) 786 810 free_pages((unsigned long)vaddr, get_order(size)); 787 - else 788 - /* 789 - * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. 790 - * DMA_ATTR_SKIP_CPU_SYNC is optional. 791 - */ 792 - swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE, 793 - DMA_ATTR_SKIP_CPU_SYNC); 794 811 } 795 812 EXPORT_SYMBOL(swiotlb_free_coherent); 796 813 ··· 881 868 882 869 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 883 870 } 884 - EXPORT_SYMBOL_GPL(swiotlb_map_page); 885 871 886 872 /* 887 873 * Unmap a single streaming mode DMA translation. The dma_addr and size must ··· 921 909 { 922 910 unmap_single(hwdev, dev_addr, size, dir, attrs); 923 911 } 924 - EXPORT_SYMBOL_GPL(swiotlb_unmap_page); 925 912 926 913 /* 927 914 * Make physical memory consistent for a single streaming mode DMA translation ··· 958 947 { 959 948 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 960 949 } 961 - EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); 962 950 963 951 void 964 952 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, ··· 965 955 { 966 956 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 967 957 } 968 - EXPORT_SYMBOL(swiotlb_sync_single_for_device); 969 958 970 959 /* 971 960 * Map a set of buffers described by scatterlist in streaming mode for DMA. ··· 1016 1007 } 1017 1008 return nelems; 1018 1009 } 1019 - EXPORT_SYMBOL(swiotlb_map_sg_attrs); 1020 1010 1021 1011 /* 1022 1012 * Unmap a set of streaming mode DMA translations. Again, cpu read rules ··· 1035 1027 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, 1036 1028 attrs); 1037 1029 } 1038 - EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 1039 1030 1040 1031 /* 1041 1032 * Make physical memory consistent for a set of streaming mode DMA translations ··· 1062 1055 { 1063 1056 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 1064 1057 } 1065 - EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 1066 1058 1067 1059 void 1068 1060 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, ··· 1069 1063 { 1070 1064 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 1071 1065 } 1072 - EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 1073 1066 1074 1067 int 1075 1068 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1076 1069 { 1077 1070 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1078 1071 } 1079 - EXPORT_SYMBOL(swiotlb_dma_mapping_error); 1080 1072 1081 1073 /* 1082 1074 * Return whether the given device DMA address mask can be supported ··· 1087 1083 { 1088 1084 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1089 1085 } 1090 - EXPORT_SYMBOL(swiotlb_dma_supported); 1086 + 1087 + #ifdef CONFIG_DMA_DIRECT_OPS 1088 + void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 1089 + gfp_t gfp, unsigned long attrs) 1090 + { 1091 + void *vaddr; 1092 + 1093 + /* temporary workaround: */ 1094 + if (gfp & __GFP_NOWARN) 1095 + attrs |= DMA_ATTR_NO_WARN; 1096 + 1097 + /* 1098 + * Don't print a warning when the first allocation attempt fails. 1099 + * swiotlb_alloc_coherent() will print a warning when the DMA memory 1100 + * allocation ultimately failed. 1101 + */ 1102 + gfp |= __GFP_NOWARN; 1103 + 1104 + vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs); 1105 + if (!vaddr) 1106 + vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs); 1107 + return vaddr; 1108 + } 1109 + 1110 + void swiotlb_free(struct device *dev, size_t size, void *vaddr, 1111 + dma_addr_t dma_addr, unsigned long attrs) 1112 + { 1113 + if (!swiotlb_free_buffer(dev, size, dma_addr)) 1114 + dma_direct_free(dev, size, vaddr, dma_addr, attrs); 1115 + } 1116 + 1117 + const struct dma_map_ops swiotlb_dma_ops = { 1118 + .mapping_error = swiotlb_dma_mapping_error, 1119 + .alloc = swiotlb_alloc, 1120 + .free = swiotlb_free, 1121 + .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 1122 + .sync_single_for_device = swiotlb_sync_single_for_device, 1123 + .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, 1124 + .sync_sg_for_device = swiotlb_sync_sg_for_device, 1125 + .map_sg = swiotlb_map_sg_attrs, 1126 + .unmap_sg = swiotlb_unmap_sg_attrs, 1127 + .map_page = swiotlb_map_page, 1128 + .unmap_page = swiotlb_unmap_page, 1129 + .dma_supported = swiotlb_dma_supported, 1130 + }; 1131 + #endif /* CONFIG_DMA_DIRECT_OPS */