Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma DMA mapping updates from Doug Ledford:
"Drop IB DMA mapping code and use core DMA code instead.

Bart Van Assche noted that the ib DMA mapping code was significantly
similar enough to the core DMA mapping code that with a few changes it
was possible to remove the IB DMA mapping code entirely and switch the
RDMA stack to use the core DMA mapping code.

This resulted in a nice set of cleanups, but touched the entire tree
and has been kept separate for that reason."

* tag 'for-next-dma_ops' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (37 commits)
IB/rxe, IB/rdmavt: Use dma_virt_ops instead of duplicating it
IB/core: Remove ib_device.dma_device
nvme-rdma: Switch from dma_device to dev.parent
RDS: net: Switch from dma_device to dev.parent
IB/srpt: Modify a debug statement
IB/srp: Switch from dma_device to dev.parent
IB/iser: Switch from dma_device to dev.parent
IB/IPoIB: Switch from dma_device to dev.parent
IB/rxe: Switch from dma_device to dev.parent
IB/vmw_pvrdma: Switch from dma_device to dev.parent
IB/usnic: Switch from dma_device to dev.parent
IB/qib: Switch from dma_device to dev.parent
IB/qedr: Switch from dma_device to dev.parent
IB/ocrdma: Switch from dma_device to dev.parent
IB/nes: Remove a superfluous assignment statement
IB/mthca: Switch from dma_device to dev.parent
IB/mlx5: Switch from dma_device to dev.parent
IB/mlx4: Switch from dma_device to dev.parent
IB/i40iw: Remove a superfluous assignment statement
IB/hns: Switch from dma_device to dev.parent
...

+435 -1296
+2 -2
arch/alpha/include/asm/dma-mapping.h
··· 1 1 #ifndef _ALPHA_DMA_MAPPING_H 2 2 #define _ALPHA_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops *dma_ops; 4 + extern const struct dma_map_ops *dma_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 7 { 8 8 return dma_ops; 9 9 }
+2 -2
arch/alpha/kernel/pci-noop.c
··· 128 128 return mask < 0x00ffffffUL ? 0 : 1; 129 129 } 130 130 131 - struct dma_map_ops alpha_noop_ops = { 131 + const struct dma_map_ops alpha_noop_ops = { 132 132 .alloc = alpha_noop_alloc_coherent, 133 133 .free = dma_noop_free_coherent, 134 134 .map_page = dma_noop_map_page, ··· 137 137 .dma_supported = alpha_noop_supported, 138 138 }; 139 139 140 - struct dma_map_ops *dma_ops = &alpha_noop_ops; 140 + const struct dma_map_ops *dma_ops = &alpha_noop_ops; 141 141 EXPORT_SYMBOL(dma_ops);
+2 -2
arch/alpha/kernel/pci_iommu.c
··· 939 939 return dma_addr == 0; 940 940 } 941 941 942 - struct dma_map_ops alpha_pci_ops = { 942 + const struct dma_map_ops alpha_pci_ops = { 943 943 .alloc = alpha_pci_alloc_coherent, 944 944 .free = alpha_pci_free_coherent, 945 945 .map_page = alpha_pci_map_page, ··· 950 950 .dma_supported = alpha_pci_supported, 951 951 }; 952 952 953 - struct dma_map_ops *dma_ops = &alpha_pci_ops; 953 + const struct dma_map_ops *dma_ops = &alpha_pci_ops; 954 954 EXPORT_SYMBOL(dma_ops);
+2 -2
arch/arc/include/asm/dma-mapping.h
··· 18 18 #include <plat/dma.h> 19 19 #endif 20 20 21 - extern struct dma_map_ops arc_dma_ops; 21 + extern const struct dma_map_ops arc_dma_ops; 22 22 23 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 23 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 24 { 25 25 return &arc_dma_ops; 26 26 }
+1 -1
arch/arc/mm/dma.c
··· 218 218 return dma_mask == DMA_BIT_MASK(32); 219 219 } 220 220 221 - struct dma_map_ops arc_dma_ops = { 221 + const struct dma_map_ops arc_dma_ops = { 222 222 .alloc = arc_dma_alloc, 223 223 .free = arc_dma_free, 224 224 .mmap = arc_dma_mmap,
+1 -1
arch/arm/common/dmabounce.c
··· 452 452 return arm_dma_ops.set_dma_mask(dev, dma_mask); 453 453 } 454 454 455 - static struct dma_map_ops dmabounce_ops = { 455 + static const struct dma_map_ops dmabounce_ops = { 456 456 .alloc = arm_dma_alloc, 457 457 .free = arm_dma_free, 458 458 .mmap = arm_dma_mmap,
-1
arch/arm/include/asm/device.h
··· 7 7 #define ASMARM_DEVICE_H 8 8 9 9 struct dev_archdata { 10 - struct dma_map_ops *dma_ops; 11 10 #ifdef CONFIG_DMABOUNCE 12 11 struct dmabounce_device_info *dmabounce; 13 12 #endif
+7 -13
arch/arm/include/asm/dma-mapping.h
··· 13 13 #include <asm/xen/hypervisor.h> 14 14 15 15 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 16 - extern struct dma_map_ops arm_dma_ops; 17 - extern struct dma_map_ops arm_coherent_dma_ops; 16 + extern const struct dma_map_ops arm_dma_ops; 17 + extern const struct dma_map_ops arm_coherent_dma_ops; 18 18 19 - static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 19 + static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) 20 20 { 21 - if (dev && dev->archdata.dma_ops) 22 - return dev->archdata.dma_ops; 21 + if (dev && dev->dma_ops) 22 + return dev->dma_ops; 23 23 return &arm_dma_ops; 24 24 } 25 25 26 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 26 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 27 27 { 28 28 if (xen_initial_domain()) 29 29 return xen_dma_ops; 30 30 else 31 - return __generic_dma_ops(dev); 32 - } 33 - 34 - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 35 - { 36 - BUG_ON(!dev); 37 - dev->archdata.dma_ops = ops; 31 + return __generic_dma_ops(NULL); 38 32 } 39 33 40 34 #define HAVE_ARCH_DMA_SUPPORTED 1
+11 -11
arch/arm/mm/dma-mapping.c
··· 180 180 __dma_page_cpu_to_dev(page, offset, size, dir); 181 181 } 182 182 183 - struct dma_map_ops arm_dma_ops = { 183 + const struct dma_map_ops arm_dma_ops = { 184 184 .alloc = arm_dma_alloc, 185 185 .free = arm_dma_free, 186 186 .mmap = arm_dma_mmap, ··· 204 204 void *cpu_addr, dma_addr_t dma_addr, size_t size, 205 205 unsigned long attrs); 206 206 207 - struct dma_map_ops arm_coherent_dma_ops = { 207 + const struct dma_map_ops arm_coherent_dma_ops = { 208 208 .alloc = arm_coherent_dma_alloc, 209 209 .free = arm_coherent_dma_free, 210 210 .mmap = arm_coherent_dma_mmap, ··· 1069 1069 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1070 1070 enum dma_data_direction dir, unsigned long attrs) 1071 1071 { 1072 - struct dma_map_ops *ops = get_dma_ops(dev); 1072 + const struct dma_map_ops *ops = get_dma_ops(dev); 1073 1073 struct scatterlist *s; 1074 1074 int i, j; 1075 1075 ··· 1103 1103 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1104 1104 enum dma_data_direction dir, unsigned long attrs) 1105 1105 { 1106 - struct dma_map_ops *ops = get_dma_ops(dev); 1106 + const struct dma_map_ops *ops = get_dma_ops(dev); 1107 1107 struct scatterlist *s; 1108 1108 1109 1109 int i; ··· 1122 1122 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1123 1123 int nents, enum dma_data_direction dir) 1124 1124 { 1125 - struct dma_map_ops *ops = get_dma_ops(dev); 1125 + const struct dma_map_ops *ops = get_dma_ops(dev); 1126 1126 struct scatterlist *s; 1127 1127 int i; 1128 1128 ··· 1141 1141 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1142 1142 int nents, enum dma_data_direction dir) 1143 1143 { 1144 - struct dma_map_ops *ops = get_dma_ops(dev); 1144 + const struct dma_map_ops *ops = get_dma_ops(dev); 1145 1145 struct scatterlist *s; 1146 1146 int i; 1147 1147 ··· 2101 2101 __dma_page_cpu_to_dev(page, offset, size, dir); 2102 2102 } 2103 2103 2104 - struct dma_map_ops iommu_ops = { 2104 + const struct dma_map_ops iommu_ops = { 2105 2105 .alloc = arm_iommu_alloc_attrs, 2106 2106 .free = arm_iommu_free_attrs, 2107 2107 .mmap = arm_iommu_mmap_attrs, ··· 2121 2121 .unmap_resource = arm_iommu_unmap_resource, 2122 2122 }; 2123 2123 2124 - struct dma_map_ops iommu_coherent_ops = { 2124 + const struct dma_map_ops iommu_coherent_ops = { 2125 2125 .alloc = arm_coherent_iommu_alloc_attrs, 2126 2126 .free = arm_coherent_iommu_free_attrs, 2127 2127 .mmap = arm_coherent_iommu_mmap_attrs, ··· 2321 2321 } 2322 2322 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2323 2323 2324 - static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2324 + static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2325 2325 { 2326 2326 return coherent ? &iommu_coherent_ops : &iommu_ops; 2327 2327 } ··· 2376 2376 2377 2377 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 2378 2378 2379 - static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 2379 + static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 2380 2380 { 2381 2381 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 2382 2382 } ··· 2384 2384 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 2385 2385 const struct iommu_ops *iommu, bool coherent) 2386 2386 { 2387 - struct dma_map_ops *dma_ops; 2387 + const struct dma_map_ops *dma_ops; 2388 2388 2389 2389 dev->archdata.dma_coherent = coherent; 2390 2390 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
+2 -2
arch/arm/xen/mm.c
··· 182 182 } 183 183 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 184 184 185 - struct dma_map_ops *xen_dma_ops; 185 + const struct dma_map_ops *xen_dma_ops; 186 186 EXPORT_SYMBOL(xen_dma_ops); 187 187 188 - static struct dma_map_ops xen_swiotlb_dma_ops = { 188 + static const struct dma_map_ops xen_swiotlb_dma_ops = { 189 189 .alloc = xen_swiotlb_alloc_coherent, 190 190 .free = xen_swiotlb_free_coherent, 191 191 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-1
arch/arm64/include/asm/device.h
··· 17 17 #define __ASM_DEVICE_H 18 18 19 19 struct dev_archdata { 20 - struct dma_map_ops *dma_ops; 21 20 #ifdef CONFIG_IOMMU_API 22 21 void *iommu; /* private IOMMU data */ 23 22 #endif
+6 -6
arch/arm64/include/asm/dma-mapping.h
··· 25 25 #include <asm/xen/hypervisor.h> 26 26 27 27 #define DMA_ERROR_CODE (~(dma_addr_t)0) 28 - extern struct dma_map_ops dummy_dma_ops; 28 + extern const struct dma_map_ops dummy_dma_ops; 29 29 30 - static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 30 + static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) 31 31 { 32 - if (dev && dev->archdata.dma_ops) 33 - return dev->archdata.dma_ops; 32 + if (dev && dev->dma_ops) 33 + return dev->dma_ops; 34 34 35 35 /* 36 36 * We expect no ISA devices, and all other DMA masters are expected to ··· 39 39 return &dummy_dma_ops; 40 40 } 41 41 42 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 42 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 43 43 { 44 44 if (xen_initial_domain()) 45 45 return xen_dma_ops; 46 46 else 47 - return __generic_dma_ops(dev); 47 + return __generic_dma_ops(NULL); 48 48 } 49 49 50 50 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+7 -7
arch/arm64/mm/dma-mapping.c
··· 363 363 return 0; 364 364 } 365 365 366 - static struct dma_map_ops swiotlb_dma_ops = { 366 + static const struct dma_map_ops swiotlb_dma_ops = { 367 367 .alloc = __dma_alloc, 368 368 .free = __dma_free, 369 369 .mmap = __swiotlb_mmap, ··· 516 516 return 0; 517 517 } 518 518 519 - struct dma_map_ops dummy_dma_ops = { 519 + const struct dma_map_ops dummy_dma_ops = { 520 520 .alloc = __dummy_alloc, 521 521 .free = __dummy_free, 522 522 .mmap = __dummy_mmap, ··· 795 795 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); 796 796 } 797 797 798 - static struct dma_map_ops iommu_dma_ops = { 798 + static const struct dma_map_ops iommu_dma_ops = { 799 799 .alloc = __iommu_alloc_attrs, 800 800 .free = __iommu_free_attrs, 801 801 .mmap = __iommu_mmap_attrs, ··· 848 848 if (iommu_dma_init_domain(domain, dma_base, size, dev)) 849 849 goto out_err; 850 850 851 - dev->archdata.dma_ops = &iommu_dma_ops; 851 + dev->dma_ops = &iommu_dma_ops; 852 852 } 853 853 854 854 return true; ··· 958 958 959 959 void arch_teardown_dma_ops(struct device *dev) 960 960 { 961 - dev->archdata.dma_ops = NULL; 961 + dev->dma_ops = NULL; 962 962 } 963 963 964 964 #else ··· 972 972 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 973 973 const struct iommu_ops *iommu, bool coherent) 974 974 { 975 - if (!dev->archdata.dma_ops) 976 - dev->archdata.dma_ops = &swiotlb_dma_ops; 975 + if (!dev->dma_ops) 976 + dev->dma_ops = &swiotlb_dma_ops; 977 977 978 978 dev->archdata.dma_coherent = coherent; 979 979 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
+2 -2
arch/avr32/include/asm/dma-mapping.h
··· 4 4 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 5 5 int direction); 6 6 7 - extern struct dma_map_ops avr32_dma_ops; 7 + extern const struct dma_map_ops avr32_dma_ops; 8 8 9 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 9 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 10 10 { 11 11 return &avr32_dma_ops; 12 12 }
+1 -1
arch/avr32/mm/dma-coherent.c
··· 191 191 dma_cache_sync(dev, sg_virt(sg), sg->length, direction); 192 192 } 193 193 194 - struct dma_map_ops avr32_dma_ops = { 194 + const struct dma_map_ops avr32_dma_ops = { 195 195 .alloc = avr32_dma_alloc, 196 196 .free = avr32_dma_free, 197 197 .map_page = avr32_dma_map_page,
+2 -2
arch/blackfin/include/asm/dma-mapping.h
··· 36 36 __dma_sync(addr, size, dir); 37 37 } 38 38 39 - extern struct dma_map_ops bfin_dma_ops; 39 + extern const struct dma_map_ops bfin_dma_ops; 40 40 41 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 41 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 42 42 { 43 43 return &bfin_dma_ops; 44 44 }
+1 -1
arch/blackfin/kernel/dma-mapping.c
··· 159 159 _dma_sync(handle, size, dir); 160 160 } 161 161 162 - struct dma_map_ops bfin_dma_ops = { 162 + const struct dma_map_ops bfin_dma_ops = { 163 163 .alloc = bfin_dma_alloc, 164 164 .free = bfin_dma_free, 165 165
+2 -2
arch/c6x/include/asm/dma-mapping.h
··· 17 17 */ 18 18 #define DMA_ERROR_CODE ~0 19 19 20 - extern struct dma_map_ops c6x_dma_ops; 20 + extern const struct dma_map_ops c6x_dma_ops; 21 21 22 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 22 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 23 23 { 24 24 return &c6x_dma_ops; 25 25 }
+1 -1
arch/c6x/kernel/dma.c
··· 123 123 124 124 } 125 125 126 - struct dma_map_ops c6x_dma_ops = { 126 + const struct dma_map_ops c6x_dma_ops = { 127 127 .alloc = c6x_dma_alloc, 128 128 .free = c6x_dma_free, 129 129 .map_page = c6x_dma_map_page,
+1 -1
arch/cris/arch-v32/drivers/pci/dma.c
··· 69 69 return 1; 70 70 } 71 71 72 - struct dma_map_ops v32_dma_ops = { 72 + const struct dma_map_ops v32_dma_ops = { 73 73 .alloc = v32_dma_alloc, 74 74 .free = v32_dma_free, 75 75 .map_page = v32_dma_map_page,
+3 -3
arch/cris/include/asm/dma-mapping.h
··· 2 2 #define _ASM_CRIS_DMA_MAPPING_H 3 3 4 4 #ifdef CONFIG_PCI 5 - extern struct dma_map_ops v32_dma_ops; 5 + extern const struct dma_map_ops v32_dma_ops; 6 6 7 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 7 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 8 8 { 9 9 return &v32_dma_ops; 10 10 } 11 11 #else 12 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 12 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 13 13 { 14 14 BUG(); 15 15 return NULL;
+2 -2
arch/frv/include/asm/dma-mapping.h
··· 7 7 extern unsigned long __nongprelbss dma_coherent_mem_start; 8 8 extern unsigned long __nongprelbss dma_coherent_mem_end; 9 9 10 - extern struct dma_map_ops frv_dma_ops; 10 + extern const struct dma_map_ops frv_dma_ops; 11 11 12 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 12 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 13 13 { 14 14 return &frv_dma_ops; 15 15 }
+1 -1
arch/frv/mb93090-mb00/pci-dma-nommu.c
··· 164 164 return 1; 165 165 } 166 166 167 - struct dma_map_ops frv_dma_ops = { 167 + const struct dma_map_ops frv_dma_ops = { 168 168 .alloc = frv_dma_alloc, 169 169 .free = frv_dma_free, 170 170 .map_page = frv_dma_map_page,
+1 -1
arch/frv/mb93090-mb00/pci-dma.c
··· 106 106 return 1; 107 107 } 108 108 109 - struct dma_map_ops frv_dma_ops = { 109 + const struct dma_map_ops frv_dma_ops = { 110 110 .alloc = frv_dma_alloc, 111 111 .free = frv_dma_free, 112 112 .map_page = frv_dma_map_page,
+2 -2
arch/h8300/include/asm/dma-mapping.h
··· 1 1 #ifndef _H8300_DMA_MAPPING_H 2 2 #define _H8300_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops h8300_dma_map_ops; 4 + extern const struct dma_map_ops h8300_dma_map_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 7 { 8 8 return &h8300_dma_map_ops; 9 9 }
+1 -1
arch/h8300/kernel/dma.c
··· 60 60 return nents; 61 61 } 62 62 63 - struct dma_map_ops h8300_dma_map_ops = { 63 + const struct dma_map_ops h8300_dma_map_ops = { 64 64 .alloc = dma_alloc, 65 65 .free = dma_free, 66 66 .map_page = map_page,
+2 -5
arch/hexagon/include/asm/dma-mapping.h
··· 32 32 extern int bad_dma_address; 33 33 #define DMA_ERROR_CODE bad_dma_address 34 34 35 - extern struct dma_map_ops *dma_ops; 35 + extern const struct dma_map_ops *dma_ops; 36 36 37 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 37 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 38 38 { 39 - if (unlikely(dev == NULL)) 40 - return NULL; 41 - 42 39 return dma_ops; 43 40 } 44 41
+2 -2
arch/hexagon/kernel/dma.c
··· 25 25 #include <linux/module.h> 26 26 #include <asm/page.h> 27 27 28 - struct dma_map_ops *dma_ops; 28 + const struct dma_map_ops *dma_ops; 29 29 EXPORT_SYMBOL(dma_ops); 30 30 31 31 int bad_dma_address; /* globals are automatically initialized to zero */ ··· 203 203 dma_sync(dma_addr_to_virt(dma_handle), size, dir); 204 204 } 205 205 206 - struct dma_map_ops hexagon_dma_ops = { 206 + const struct dma_map_ops hexagon_dma_ops = { 207 207 .alloc = hexagon_dma_alloc_coherent, 208 208 .free = hexagon_free_coherent, 209 209 .map_sg = hexagon_map_sg,
+2 -2
arch/ia64/hp/common/hwsw_iommu.c
··· 18 18 #include <linux/export.h> 19 19 #include <asm/machvec.h> 20 20 21 - extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; 21 + extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; 22 22 23 23 /* swiotlb declarations & definitions: */ 24 24 extern int swiotlb_late_init_with_default_size (size_t size); ··· 34 34 !sba_dma_ops.dma_supported(dev, *dev->dma_mask); 35 35 } 36 36 37 - struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) 37 + const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) 38 38 { 39 39 if (use_swiotlb(dev)) 40 40 return &swiotlb_dma_ops;
+2 -2
arch/ia64/hp/common/sba_iommu.c
··· 2096 2096 /* This has to run before acpi_scan_init(). */ 2097 2097 arch_initcall(acpi_sba_ioc_init_acpi); 2098 2098 2099 - extern struct dma_map_ops swiotlb_dma_ops; 2099 + extern const struct dma_map_ops swiotlb_dma_ops; 2100 2100 2101 2101 static int __init 2102 2102 sba_init(void) ··· 2216 2216 2217 2217 __setup("sbapagesize=",sba_page_override); 2218 2218 2219 - struct dma_map_ops sba_dma_ops = { 2219 + const struct dma_map_ops sba_dma_ops = { 2220 2220 .alloc = sba_alloc_coherent, 2221 2221 .free = sba_free_coherent, 2222 2222 .map_page = sba_map_page,
+5 -2
arch/ia64/include/asm/dma-mapping.h
··· 14 14 15 15 #define DMA_ERROR_CODE 0 16 16 17 - extern struct dma_map_ops *dma_ops; 17 + extern const struct dma_map_ops *dma_ops; 18 18 extern struct ia64_machine_vector ia64_mv; 19 19 extern void set_iommu_machvec(void); 20 20 ··· 23 23 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, 24 24 enum dma_data_direction); 25 25 26 - #define get_dma_ops(dev) platform_dma_get_ops(dev) 26 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 27 + { 28 + return platform_dma_get_ops(NULL); 29 + } 27 30 28 31 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 29 32 {
+2 -2
arch/ia64/include/asm/machvec.h
··· 44 44 /* DMA-mapping interface: */ 45 45 typedef void ia64_mv_dma_init (void); 46 46 typedef u64 ia64_mv_dma_get_required_mask (struct device *); 47 - typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); 47 + typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); 48 48 49 49 /* 50 50 * WARNING: The legacy I/O space is _architected_. Platforms are ··· 248 248 # endif /* CONFIG_IA64_GENERIC */ 249 249 250 250 extern void swiotlb_dma_init(void); 251 - extern struct dma_map_ops *dma_get_ops(struct device *); 251 + extern const struct dma_map_ops *dma_get_ops(struct device *); 252 252 253 253 /* 254 254 * Define default versions so we can extend machvec for new platforms without having
+2 -2
arch/ia64/kernel/dma-mapping.c
··· 4 4 /* Set this to 1 if there is a HW IOMMU in the system */ 5 5 int iommu_detected __read_mostly; 6 6 7 - struct dma_map_ops *dma_ops; 7 + const struct dma_map_ops *dma_ops; 8 8 EXPORT_SYMBOL(dma_ops); 9 9 10 10 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) ··· 17 17 } 18 18 fs_initcall(dma_init); 19 19 20 - struct dma_map_ops *dma_get_ops(struct device *dev) 20 + const struct dma_map_ops *dma_get_ops(struct device *dev) 21 21 { 22 22 return dma_ops; 23 23 }
+5 -5
arch/ia64/kernel/pci-dma.c
··· 90 90 { 91 91 dma_ops = &intel_dma_ops; 92 92 93 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single; 94 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; 95 - dma_ops->sync_single_for_device = machvec_dma_sync_single; 96 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg; 97 - dma_ops->dma_supported = iommu_dma_supported; 93 + intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single; 94 + intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg; 95 + intel_dma_ops.sync_single_for_device = machvec_dma_sync_single; 96 + intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg; 97 + intel_dma_ops.dma_supported = iommu_dma_supported; 98 98 99 99 /* 100 100 * The order of these functions is important for
+1 -1
arch/ia64/kernel/pci-swiotlb.c
··· 30 30 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 31 31 } 32 32 33 - struct dma_map_ops swiotlb_dma_ops = { 33 + const struct dma_map_ops swiotlb_dma_ops = { 34 34 .alloc = ia64_swiotlb_alloc_coherent, 35 35 .free = ia64_swiotlb_free_coherent, 36 36 .map_page = swiotlb_map_page,
+1
arch/m32r/Kconfig
··· 18 18 select MODULES_USE_ELF_RELA 19 19 select HAVE_DEBUG_STACKOVERFLOW 20 20 select CPU_NO_EFFICIENT_FFS 21 + select DMA_NOOP_OPS 21 22 22 23 config SBUS 23 24 bool
-1
arch/m32r/include/asm/device.h
··· 4 4 * This file is released under the GPLv2 5 5 */ 6 6 struct dev_archdata { 7 - struct dma_map_ops *dma_ops; 8 7 }; 9 8 10 9 struct pdev_archdata {
+1 -3
arch/m32r/include/asm/dma-mapping.h
··· 10 10 11 11 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 12 12 13 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 13 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 14 14 { 15 - if (dev && dev->archdata.dma_ops) 16 - return dev->archdata.dma_ops; 17 15 return &dma_noop_ops; 18 16 } 19 17
+2 -2
arch/m68k/include/asm/dma-mapping.h
··· 1 1 #ifndef _M68K_DMA_MAPPING_H 2 2 #define _M68K_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops m68k_dma_ops; 4 + extern const struct dma_map_ops m68k_dma_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 7 { 8 8 return &m68k_dma_ops; 9 9 }
+1 -1
arch/m68k/kernel/dma.c
··· 158 158 return nents; 159 159 } 160 160 161 - struct dma_map_ops m68k_dma_ops = { 161 + const struct dma_map_ops m68k_dma_ops = { 162 162 .alloc = m68k_dma_alloc, 163 163 .free = m68k_dma_free, 164 164 .map_page = m68k_dma_map_page,
+2 -2
arch/metag/include/asm/dma-mapping.h
··· 1 1 #ifndef _ASM_METAG_DMA_MAPPING_H 2 2 #define _ASM_METAG_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops metag_dma_ops; 4 + extern const struct dma_map_ops metag_dma_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 7 7 { 8 8 return &metag_dma_ops; 9 9 }
+1 -1
arch/metag/kernel/dma.c
··· 575 575 dma_sync_for_device(sg_virt(sg), sg->length, direction); 576 576 } 577 577 578 - struct dma_map_ops metag_dma_ops = { 578 + const struct dma_map_ops metag_dma_ops = { 579 579 .alloc = metag_dma_alloc, 580 580 .free = metag_dma_free, 581 581 .map_page = metag_dma_map_page,
+2 -2
arch/microblaze/include/asm/dma-mapping.h
··· 36 36 /* 37 37 * Available generic sets of operations 38 38 */ 39 - extern struct dma_map_ops dma_direct_ops; 39 + extern const struct dma_map_ops dma_direct_ops; 40 40 41 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 41 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 42 42 { 43 43 return &dma_direct_ops; 44 44 }
+1 -1
arch/microblaze/kernel/dma.c
··· 187 187 #endif 188 188 } 189 189 190 - struct dma_map_ops dma_direct_ops = { 190 + const struct dma_map_ops dma_direct_ops = { 191 191 .alloc = dma_direct_alloc_coherent, 192 192 .free = dma_direct_free_coherent, 193 193 .mmap = dma_direct_mmap_coherent,
+2 -2
arch/mips/cavium-octeon/dma-octeon.c
··· 200 200 } 201 201 202 202 struct octeon_dma_map_ops { 203 - struct dma_map_ops dma_map_ops; 203 + const struct dma_map_ops dma_map_ops; 204 204 dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); 205 205 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); 206 206 }; ··· 328 328 }, 329 329 }; 330 330 331 - struct dma_map_ops *octeon_pci_dma_map_ops; 331 + const struct dma_map_ops *octeon_pci_dma_map_ops; 332 332 333 333 void __init octeon_pci_dma_init(void) 334 334 {
-5
arch/mips/include/asm/device.h
··· 6 6 #ifndef _ASM_MIPS_DEVICE_H 7 7 #define _ASM_MIPS_DEVICE_H 8 8 9 - struct dma_map_ops; 10 - 11 9 struct dev_archdata { 12 - /* DMA operations on that device */ 13 - struct dma_map_ops *dma_ops; 14 - 15 10 #ifdef CONFIG_DMA_PERDEV_COHERENT 16 11 /* Non-zero if DMA is coherent with CPU caches */ 17 12 bool dma_coherent;
+3 -6
arch/mips/include/asm/dma-mapping.h
··· 9 9 #include <dma-coherence.h> 10 10 #endif 11 11 12 - extern struct dma_map_ops *mips_dma_map_ops; 12 + extern const struct dma_map_ops *mips_dma_map_ops; 13 13 14 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 14 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 15 15 { 16 - if (dev && dev->archdata.dma_ops) 17 - return dev->archdata.dma_ops; 18 - else 19 - return mips_dma_map_ops; 16 + return mips_dma_map_ops; 20 17 } 21 18 22 19 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+1 -1
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
··· 65 65 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 66 66 67 67 struct dma_map_ops; 68 - extern struct dma_map_ops *octeon_pci_dma_map_ops; 68 + extern const struct dma_map_ops *octeon_pci_dma_map_ops; 69 69 extern char *octeon_swiotlb; 70 70 71 71 #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
+1 -1
arch/mips/include/asm/netlogic/common.h
··· 88 88 extern char nlm_reset_entry[], nlm_reset_entry_end[]; 89 89 90 90 /* SWIOTLB */ 91 - extern struct dma_map_ops nlm_swiotlb_dma_ops; 91 + extern const struct dma_map_ops nlm_swiotlb_dma_ops; 92 92 93 93 extern unsigned int nlm_threads_per_core; 94 94 extern cpumask_t nlm_cpumask;
+1 -1
arch/mips/loongson64/common/dma-swiotlb.c
··· 114 114 return daddr; 115 115 } 116 116 117 - static struct dma_map_ops loongson_dma_map_ops = { 117 + static const struct dma_map_ops loongson_dma_map_ops = { 118 118 .alloc = loongson_dma_alloc_coherent, 119 119 .free = loongson_dma_free_coherent, 120 120 .map_page = loongson_dma_map_page,
+2 -2
arch/mips/mm/dma-default.c
··· 417 417 418 418 EXPORT_SYMBOL(dma_cache_sync); 419 419 420 - static struct dma_map_ops mips_default_dma_map_ops = { 420 + static const struct dma_map_ops mips_default_dma_map_ops = { 421 421 .alloc = mips_dma_alloc_coherent, 422 422 .free = mips_dma_free_coherent, 423 423 .mmap = mips_dma_mmap, ··· 433 433 .dma_supported = mips_dma_supported 434 434 }; 435 435 436 - struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; 436 + const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; 437 437 EXPORT_SYMBOL(mips_dma_map_ops); 438 438 439 439 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+1 -1
arch/mips/netlogic/common/nlm-dma.c
··· 67 67 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 68 68 } 69 69 70 - struct dma_map_ops nlm_swiotlb_dma_ops = { 70 + const struct dma_map_ops nlm_swiotlb_dma_ops = { 71 71 .alloc = nlm_dma_alloc_coherent, 72 72 .free = nlm_dma_free_coherent, 73 73 .map_page = swiotlb_map_page,
+1 -1
arch/mips/pci/pci-octeon.c
··· 167 167 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); 168 168 } 169 169 170 - dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops; 170 + dev->dev.dma_ops = octeon_pci_dma_map_ops; 171 171 172 172 return 0; 173 173 }
+2 -2
arch/mn10300/include/asm/dma-mapping.h
··· 14 14 #include <asm/cache.h> 15 15 #include <asm/io.h> 16 16 17 - extern struct dma_map_ops mn10300_dma_ops; 17 + extern const struct dma_map_ops mn10300_dma_ops; 18 18 19 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 19 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 20 20 { 21 21 return &mn10300_dma_ops; 22 22 }
+1 -1
arch/mn10300/mm/dma-alloc.c
··· 121 121 return 1; 122 122 } 123 123 124 - struct dma_map_ops mn10300_dma_ops = { 124 + const struct dma_map_ops mn10300_dma_ops = { 125 125 .alloc = mn10300_dma_alloc, 126 126 .free = mn10300_dma_free, 127 127 .map_page = mn10300_dma_map_page,
+2 -2
arch/nios2/include/asm/dma-mapping.h
··· 10 10 #ifndef _ASM_NIOS2_DMA_MAPPING_H 11 11 #define _ASM_NIOS2_DMA_MAPPING_H 12 12 13 - extern struct dma_map_ops nios2_dma_ops; 13 + extern const struct dma_map_ops nios2_dma_ops; 14 14 15 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 15 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 16 16 { 17 17 return &nios2_dma_ops; 18 18 }
+1 -1
arch/nios2/mm/dma-mapping.c
··· 192 192 193 193 } 194 194 195 - struct dma_map_ops nios2_dma_ops = { 195 + const struct dma_map_ops nios2_dma_ops = { 196 196 .alloc = nios2_dma_alloc, 197 197 .free = nios2_dma_free, 198 198 .map_page = nios2_dma_map_page,
+2 -2
arch/openrisc/include/asm/dma-mapping.h
··· 28 28 29 29 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 30 30 31 - extern struct dma_map_ops or1k_dma_map_ops; 31 + extern const struct dma_map_ops or1k_dma_map_ops; 32 32 33 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 33 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 34 34 { 35 35 return &or1k_dma_map_ops; 36 36 }
+1 -1
arch/openrisc/kernel/dma.c
··· 232 232 mtspr(SPR_DCBFR, cl); 233 233 } 234 234 235 - struct dma_map_ops or1k_dma_map_ops = { 235 + const struct dma_map_ops or1k_dma_map_ops = { 236 236 .alloc = or1k_dma_alloc, 237 237 .free = or1k_dma_free, 238 238 .map_page = or1k_map_page,
+4 -4
arch/parisc/include/asm/dma-mapping.h
··· 21 21 */ 22 22 23 23 #ifdef CONFIG_PA11 24 - extern struct dma_map_ops pcxl_dma_ops; 25 - extern struct dma_map_ops pcx_dma_ops; 24 + extern const struct dma_map_ops pcxl_dma_ops; 25 + extern const struct dma_map_ops pcx_dma_ops; 26 26 #endif 27 27 28 - extern struct dma_map_ops *hppa_dma_ops; 28 + extern const struct dma_map_ops *hppa_dma_ops; 29 29 30 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 30 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 31 31 { 32 32 return hppa_dma_ops; 33 33 }
+1 -1
arch/parisc/kernel/drivers.c
··· 40 40 #include <asm/parisc-device.h> 41 41 42 42 /* See comments in include/asm-parisc/pci.h */ 43 - struct dma_map_ops *hppa_dma_ops __read_mostly; 43 + const struct dma_map_ops *hppa_dma_ops __read_mostly; 44 44 EXPORT_SYMBOL(hppa_dma_ops); 45 45 46 46 static struct device root = {
+2 -2
arch/parisc/kernel/pci-dma.c
··· 572 572 flush_kernel_vmap_range(sg_virt(sg), sg->length); 573 573 } 574 574 575 - struct dma_map_ops pcxl_dma_ops = { 575 + const struct dma_map_ops pcxl_dma_ops = { 576 576 .dma_supported = pa11_dma_supported, 577 577 .alloc = pa11_dma_alloc, 578 578 .free = pa11_dma_free, ··· 608 608 return; 609 609 } 610 610 611 - struct dma_map_ops pcx_dma_ops = { 611 + const struct dma_map_ops pcx_dma_ops = { 612 612 .dma_supported = pa11_dma_supported, 613 613 .alloc = pcx_dma_alloc, 614 614 .free = pcx_dma_free,
-4
arch/powerpc/include/asm/device.h
··· 6 6 #ifndef _ASM_POWERPC_DEVICE_H 7 7 #define _ASM_POWERPC_DEVICE_H 8 8 9 - struct dma_map_ops; 10 9 struct device_node; 11 10 #ifdef CONFIG_PPC64 12 11 struct pci_dn; ··· 19 20 * drivers/macintosh/macio_asic.c 20 21 */ 21 22 struct dev_archdata { 22 - /* DMA operations on that device */ 23 - struct dma_map_ops *dma_ops; 24 - 25 23 /* 26 24 * These two used to be a union. However, with the hybrid ops we need 27 25 * both so here we store both a DMA offset for direct mappings and
+3 -11
arch/powerpc/include/asm/dma-mapping.h
··· 76 76 #ifdef CONFIG_PPC64 77 77 extern struct dma_map_ops dma_iommu_ops; 78 78 #endif 79 - extern struct dma_map_ops dma_direct_ops; 79 + extern const struct dma_map_ops dma_direct_ops; 80 80 81 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 81 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 82 82 { 83 83 /* We don't handle the NULL dev case for ISA for now. We could 84 84 * do it via an out of line call but it is not needed for now. The 85 85 * only ISA DMA device we support is the floppy and we have a hack 86 86 * in the floppy driver directly to get a device for us. 87 87 */ 88 - if (unlikely(dev == NULL)) 89 - return NULL; 90 - 91 - return dev->archdata.dma_ops; 92 - } 93 - 94 - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 95 - { 96 - dev->archdata.dma_ops = ops; 88 + return NULL; 97 89 } 98 90 99 91 /*
+2 -2
arch/powerpc/include/asm/pci.h
··· 53 53 } 54 54 55 55 #ifdef CONFIG_PCI 56 - extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); 57 - extern struct dma_map_ops *get_pci_dma_ops(void); 56 + extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); 57 + extern const struct dma_map_ops *get_pci_dma_ops(void); 58 58 #else /* CONFIG_PCI */ 59 59 #define set_pci_dma_ops(d) 60 60 #define get_pci_dma_ops() NULL
+1 -1
arch/powerpc/include/asm/ps3.h
··· 435 435 return dev_get_drvdata(&dev->core); 436 436 } 437 437 438 - /* These two need global scope for get_dma_ops(). */ 438 + /* These two need global scope for get_arch_dma_ops(). */ 439 439 440 440 extern struct bus_type ps3_system_bus_type; 441 441
+1 -1
arch/powerpc/include/asm/swiotlb.h
··· 13 13 14 14 #include <linux/swiotlb.h> 15 15 16 - extern struct dma_map_ops swiotlb_dma_ops; 16 + extern const struct dma_map_ops swiotlb_dma_ops; 17 17 18 18 static inline void dma_mark_clean(void *addr, size_t size) {} 19 19
+1 -1
arch/powerpc/kernel/dma-swiotlb.c
··· 46 46 * map_page, and unmap_page on highmem, use normal dma_ops 47 47 * for everything else. 48 48 */ 49 - struct dma_map_ops swiotlb_dma_ops = { 49 + const struct dma_map_ops swiotlb_dma_ops = { 50 50 .alloc = __dma_direct_alloc_coherent, 51 51 .free = __dma_direct_free_coherent, 52 52 .mmap = dma_direct_mmap_coherent,
+4 -4
arch/powerpc/kernel/dma.c
··· 33 33 struct dev_archdata __maybe_unused *sd = &dev->archdata; 34 34 35 35 #ifdef CONFIG_SWIOTLB 36 - if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) 36 + if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops) 37 37 pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); 38 38 #endif 39 39 ··· 274 274 } 275 275 #endif 276 276 277 - struct dma_map_ops dma_direct_ops = { 277 + const struct dma_map_ops dma_direct_ops = { 278 278 .alloc = dma_direct_alloc_coherent, 279 279 .free = dma_direct_free_coherent, 280 280 .mmap = dma_direct_mmap_coherent, ··· 316 316 317 317 int __dma_set_mask(struct device *dev, u64 dma_mask) 318 318 { 319 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 319 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 320 320 321 321 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) 322 322 return dma_ops->set_dma_mask(dev, dma_mask); ··· 344 344 345 345 u64 __dma_get_required_mask(struct device *dev) 346 346 { 347 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 347 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 348 348 349 349 if (unlikely(dma_ops == NULL)) 350 350 return 0;
+3 -3
arch/powerpc/kernel/pci-common.c
··· 60 60 EXPORT_SYMBOL(isa_mem_base); 61 61 62 62 63 - static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 63 + static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 64 64 65 - void set_pci_dma_ops(struct dma_map_ops *dma_ops) 65 + void set_pci_dma_ops(const struct dma_map_ops *dma_ops) 66 66 { 67 67 pci_dma_ops = dma_ops; 68 68 } 69 69 70 - struct dma_map_ops *get_pci_dma_ops(void) 70 + const struct dma_map_ops *get_pci_dma_ops(void) 71 71 { 72 72 return pci_dma_ops; 73 73 }
+3 -3
arch/powerpc/platforms/cell/iommu.c
··· 651 651 652 652 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); 653 653 654 - static struct dma_map_ops dma_iommu_fixed_ops = { 654 + static const struct dma_map_ops dma_iommu_fixed_ops = { 655 655 .alloc = dma_fixed_alloc_coherent, 656 656 .free = dma_fixed_free_coherent, 657 657 .map_sg = dma_fixed_map_sg, ··· 692 692 return 0; 693 693 694 694 /* We use the PCI DMA ops */ 695 - dev->archdata.dma_ops = get_pci_dma_ops(); 695 + dev->dma_ops = get_pci_dma_ops(); 696 696 697 697 cell_dma_dev_setup(dev); 698 698 ··· 1172 1172 1173 1173 static u64 cell_dma_get_required_mask(struct device *dev) 1174 1174 { 1175 - struct dma_map_ops *dma_ops; 1175 + const struct dma_map_ops *dma_ops; 1176 1176 1177 1177 if (!dev->dma_mask) 1178 1178 return 0;
+1 -1
arch/powerpc/platforms/pasemi/iommu.c
··· 186 186 */ 187 187 if (dev->vendor == 0x1959 && dev->device == 0xa007 && 188 188 !firmware_has_feature(FW_FEATURE_LPAR)) { 189 - dev->dev.archdata.dma_ops = &dma_direct_ops; 189 + dev->dev.dma_ops = &dma_direct_ops; 190 190 /* 191 191 * Set the coherent DMA mask to prevent the iommu 192 192 * being used unnecessarily
+1 -1
arch/powerpc/platforms/pasemi/setup.c
··· 363 363 return 0; 364 364 365 365 /* We use the direct ops for localbus */ 366 - dev->archdata.dma_ops = &dma_direct_ops; 366 + dev->dma_ops = &dma_direct_ops; 367 367 368 368 return 0; 369 369 }
+1 -1
arch/powerpc/platforms/powernv/npu-dma.c
··· 115 115 return 0; 116 116 } 117 117 118 - static struct dma_map_ops dma_npu_ops = { 118 + static const struct dma_map_ops dma_npu_ops = { 119 119 .map_page = dma_npu_map_page, 120 120 .map_sg = dma_npu_map_sg, 121 121 .alloc = dma_npu_alloc,
+4 -4
arch/powerpc/platforms/ps3/system-bus.c
··· 701 701 return DMA_BIT_MASK(32); 702 702 } 703 703 704 - static struct dma_map_ops ps3_sb_dma_ops = { 704 + static const struct dma_map_ops ps3_sb_dma_ops = { 705 705 .alloc = ps3_alloc_coherent, 706 706 .free = ps3_free_coherent, 707 707 .map_sg = ps3_sb_map_sg, ··· 712 712 .unmap_page = ps3_unmap_page, 713 713 }; 714 714 715 - static struct dma_map_ops ps3_ioc0_dma_ops = { 715 + static const struct dma_map_ops ps3_ioc0_dma_ops = { 716 716 .alloc = ps3_alloc_coherent, 717 717 .free = ps3_free_coherent, 718 718 .map_sg = ps3_ioc0_map_sg, ··· 756 756 757 757 switch (dev->dev_type) { 758 758 case PS3_DEVICE_TYPE_IOC0: 759 - dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; 759 + dev->core.dma_ops = &ps3_ioc0_dma_ops; 760 760 dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); 761 761 break; 762 762 case PS3_DEVICE_TYPE_SB: 763 - dev->core.archdata.dma_ops = &ps3_sb_dma_ops; 763 + dev->core.dma_ops = &ps3_sb_dma_ops; 764 764 dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); 765 765 766 766 break;
+2 -2
arch/powerpc/platforms/pseries/ibmebus.c
··· 136 136 return DMA_BIT_MASK(64); 137 137 } 138 138 139 - static struct dma_map_ops ibmebus_dma_ops = { 139 + static const struct dma_map_ops ibmebus_dma_ops = { 140 140 .alloc = ibmebus_alloc_coherent, 141 141 .free = ibmebus_free_coherent, 142 142 .map_sg = ibmebus_map_sg, ··· 169 169 return -ENOMEM; 170 170 171 171 dev->dev.bus = &ibmebus_bus_type; 172 - dev->dev.archdata.dma_ops = &ibmebus_dma_ops; 172 + dev->dev.dma_ops = &ibmebus_dma_ops; 173 173 174 174 ret = of_device_add(dev); 175 175 if (ret)
+1 -1
arch/powerpc/platforms/pseries/vio.c
··· 615 615 return dma_iommu_ops.get_required_mask(dev); 616 616 } 617 617 618 - static struct dma_map_ops vio_dma_mapping_ops = { 618 + static const struct dma_map_ops vio_dma_mapping_ops = { 619 619 .alloc = vio_dma_iommu_alloc_coherent, 620 620 .free = vio_dma_iommu_free_coherent, 621 621 .mmap = dma_direct_mmap_coherent,
+1
arch/s390/Kconfig
··· 137 137 select HAVE_DEBUG_KMEMLEAK 138 138 select HAVE_DMA_API_DEBUG 139 139 select HAVE_DMA_CONTIGUOUS 140 + select DMA_NOOP_OPS 140 141 select HAVE_DYNAMIC_FTRACE 141 142 select HAVE_DYNAMIC_FTRACE_WITH_REGS 142 143 select HAVE_EFFICIENT_UNALIGNED_ACCESS
-1
arch/s390/include/asm/device.h
··· 4 4 * This file is released under the GPLv2 5 5 */ 6 6 struct dev_archdata { 7 - struct dma_map_ops *dma_ops; 8 7 }; 9 8 10 9 struct pdev_archdata {
+2 -4
arch/s390/include/asm/dma-mapping.h
··· 10 10 11 11 #define DMA_ERROR_CODE (~(dma_addr_t) 0x0) 12 12 13 - extern struct dma_map_ops s390_pci_dma_ops; 13 + extern const struct dma_map_ops s390_pci_dma_ops; 14 14 15 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 15 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 16 16 { 17 - if (dev && dev->archdata.dma_ops) 18 - return dev->archdata.dma_ops; 19 17 return &dma_noop_ops; 20 18 } 21 19
+1 -1
arch/s390/pci/pci.c
··· 641 641 int i; 642 642 643 643 pdev->dev.groups = zpci_attr_groups; 644 - pdev->dev.archdata.dma_ops = &s390_pci_dma_ops; 644 + pdev->dev.dma_ops = &s390_pci_dma_ops; 645 645 zpci_map_resources(pdev); 646 646 647 647 for (i = 0; i < PCI_BAR_COUNT; i++) {
+1 -1
arch/s390/pci/pci_dma.c
··· 650 650 } 651 651 fs_initcall(dma_debug_do_init); 652 652 653 - struct dma_map_ops s390_pci_dma_ops = { 653 + const struct dma_map_ops s390_pci_dma_ops = { 654 654 .alloc = s390_dma_alloc, 655 655 .free = s390_dma_free, 656 656 .map_sg = s390_dma_map_sg,
+2 -2
arch/sh/include/asm/dma-mapping.h
··· 1 1 #ifndef __ASM_SH_DMA_MAPPING_H 2 2 #define __ASM_SH_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops *dma_ops; 4 + extern const struct dma_map_ops *dma_ops; 5 5 extern void no_iommu_init(void); 6 6 7 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 7 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 8 8 { 9 9 return dma_ops; 10 10 }
+1 -1
arch/sh/kernel/dma-nommu.c
··· 65 65 } 66 66 #endif 67 67 68 - struct dma_map_ops nommu_dma_ops = { 68 + const struct dma_map_ops nommu_dma_ops = { 69 69 .alloc = dma_generic_alloc_coherent, 70 70 .free = dma_generic_free_coherent, 71 71 .map_page = nommu_map_page,
+1 -1
arch/sh/mm/consistent.c
··· 22 22 23 23 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 24 24 25 - struct dma_map_ops *dma_ops; 25 + const struct dma_map_ops *dma_ops; 26 26 EXPORT_SYMBOL(dma_ops); 27 27 28 28 static int __init dma_init(void)
+5 -5
arch/sparc/include/asm/dma-mapping.h
··· 18 18 */ 19 19 } 20 20 21 - extern struct dma_map_ops *dma_ops; 22 - extern struct dma_map_ops *leon_dma_ops; 23 - extern struct dma_map_ops pci32_dma_ops; 21 + extern const struct dma_map_ops *dma_ops; 22 + extern const struct dma_map_ops *leon_dma_ops; 23 + extern const struct dma_map_ops pci32_dma_ops; 24 24 25 25 extern struct bus_type pci_bus_type; 26 26 27 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 27 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 28 28 { 29 29 #ifdef CONFIG_SPARC_LEON 30 30 if (sparc_cpu_model == sparc_leon) 31 31 return leon_dma_ops; 32 32 #endif 33 33 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) 34 - if (dev->bus == &pci_bus_type) 34 + if (bus == &pci_bus_type) 35 35 return &pci32_dma_ops; 36 36 #endif 37 37 return dma_ops;
+2 -2
arch/sparc/kernel/iommu.c
··· 741 741 spin_unlock_irqrestore(&iommu->lock, flags); 742 742 } 743 743 744 - static struct dma_map_ops sun4u_dma_ops = { 744 + static const struct dma_map_ops sun4u_dma_ops = { 745 745 .alloc = dma_4u_alloc_coherent, 746 746 .free = dma_4u_free_coherent, 747 747 .map_page = dma_4u_map_page, ··· 752 752 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 753 753 }; 754 754 755 - struct dma_map_ops *dma_ops = &sun4u_dma_ops; 755 + const struct dma_map_ops *dma_ops = &sun4u_dma_ops; 756 756 EXPORT_SYMBOL(dma_ops); 757 757 758 758 int dma_supported(struct device *dev, u64 device_mask)
+4 -4
arch/sparc/kernel/ioport.c
··· 401 401 BUG(); 402 402 } 403 403 404 - static struct dma_map_ops sbus_dma_ops = { 404 + static const struct dma_map_ops sbus_dma_ops = { 405 405 .alloc = sbus_alloc_coherent, 406 406 .free = sbus_free_coherent, 407 407 .map_page = sbus_map_page, ··· 637 637 } 638 638 } 639 639 640 - struct dma_map_ops pci32_dma_ops = { 640 + const struct dma_map_ops pci32_dma_ops = { 641 641 .alloc = pci32_alloc_coherent, 642 642 .free = pci32_free_coherent, 643 643 .map_page = pci32_map_page, ··· 652 652 EXPORT_SYMBOL(pci32_dma_ops); 653 653 654 654 /* leon re-uses pci32_dma_ops */ 655 - struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; 655 + const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; 656 656 EXPORT_SYMBOL(leon_dma_ops); 657 657 658 - struct dma_map_ops *dma_ops = &sbus_dma_ops; 658 + const struct dma_map_ops *dma_ops = &sbus_dma_ops; 659 659 EXPORT_SYMBOL(dma_ops); 660 660 661 661
+1 -1
arch/sparc/kernel/pci_sun4v.c
··· 669 669 local_irq_restore(flags); 670 670 } 671 671 672 - static struct dma_map_ops sun4v_dma_ops = { 672 + static const struct dma_map_ops sun4v_dma_ops = { 673 673 .alloc = dma_4v_alloc_coherent, 674 674 .free = dma_4v_free_coherent, 675 675 .map_page = dma_4v_map_page,
-3
arch/tile/include/asm/device.h
··· 17 17 #define _ASM_TILE_DEVICE_H 18 18 19 19 struct dev_archdata { 20 - /* DMA operations on that device */ 21 - struct dma_map_ops *dma_ops; 22 - 23 20 /* Offset of the DMA address from the PA. */ 24 21 dma_addr_t dma_offset; 25 22
+6 -14
arch/tile/include/asm/dma-mapping.h
··· 24 24 #define ARCH_HAS_DMA_GET_REQUIRED_MASK 25 25 #endif 26 26 27 - extern struct dma_map_ops *tile_dma_map_ops; 28 - extern struct dma_map_ops *gx_pci_dma_map_ops; 29 - extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; 30 - extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 27 + extern const struct dma_map_ops *tile_dma_map_ops; 28 + extern const struct dma_map_ops *gx_pci_dma_map_ops; 29 + extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops; 30 + extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 31 31 32 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 32 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 33 33 { 34 - if (dev && dev->archdata.dma_ops) 35 - return dev->archdata.dma_ops; 36 - else 37 - return tile_dma_map_ops; 34 + return tile_dma_map_ops; 38 35 } 39 36 40 37 static inline dma_addr_t get_dma_offset(struct device *dev) ··· 55 58 } 56 59 57 60 static inline void dma_mark_clean(void *addr, size_t size) {} 58 - 59 - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 60 - { 61 - dev->archdata.dma_ops = ops; 62 - } 63 61 64 62 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 65 63 {
+12 -12
arch/tile/kernel/pci-dma.c
··· 329 329 return 1; 330 330 } 331 331 332 - static struct dma_map_ops tile_default_dma_map_ops = { 332 + static const struct dma_map_ops tile_default_dma_map_ops = { 333 333 .alloc = tile_dma_alloc_coherent, 334 334 .free = tile_dma_free_coherent, 335 335 .map_page = tile_dma_map_page, ··· 344 344 .dma_supported = tile_dma_supported 345 345 }; 346 346 347 - struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; 347 + const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; 348 348 EXPORT_SYMBOL(tile_dma_map_ops); 349 349 350 350 /* Generic PCI DMA mapping functions */ ··· 516 516 return 1; 517 517 } 518 518 519 - static struct dma_map_ops tile_pci_default_dma_map_ops = { 519 + static const struct dma_map_ops tile_pci_default_dma_map_ops = { 520 520 .alloc = tile_pci_dma_alloc_coherent, 521 521 .free = tile_pci_dma_free_coherent, 522 522 .map_page = tile_pci_dma_map_page, ··· 531 531 .dma_supported = tile_pci_dma_supported 532 532 }; 533 533 534 - struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; 534 + const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; 535 535 EXPORT_SYMBOL(gx_pci_dma_map_ops); 536 536 537 537 /* PCI DMA mapping functions for legacy PCI devices */ ··· 552 552 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 553 553 } 554 554 555 - static struct dma_map_ops pci_swiotlb_dma_ops = { 555 + static const struct dma_map_ops pci_swiotlb_dma_ops = { 556 556 .alloc = tile_swiotlb_alloc_coherent, 557 557 .free = tile_swiotlb_free_coherent, 558 558 .map_page = swiotlb_map_page, ··· 567 567 .mapping_error = swiotlb_dma_mapping_error, 568 568 }; 569 569 570 - static struct dma_map_ops pci_hybrid_dma_ops = { 570 + static const struct dma_map_ops pci_hybrid_dma_ops = { 571 571 .alloc = tile_swiotlb_alloc_coherent, 572 572 .free = tile_swiotlb_free_coherent, 573 573 .map_page = tile_pci_dma_map_page, ··· 582 582 .dma_supported = tile_pci_dma_supported 583 583 }; 584 584 585 - struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; 586 - struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; 585 + const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; 586 + const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; 587 587 #else 588 - struct dma_map_ops *gx_legacy_pci_dma_map_ops; 589 - struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 588 + const struct dma_map_ops *gx_legacy_pci_dma_map_ops; 589 + const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 590 590 #endif 591 591 EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); 592 592 EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); 593 593 594 594 int dma_set_mask(struct device *dev, u64 mask) 595 595 { 596 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 596 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 597 597 598 598 /* 599 599 * For PCI devices with 64-bit DMA addressing capability, promote ··· 623 623 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 624 624 int dma_set_coherent_mask(struct device *dev, u64 mask) 625 625 { 626 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 626 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 627 627 628 628 /* 629 629 * For PCI devices with 64-bit DMA addressing capability, promote
+2 -2
arch/unicore32/include/asm/dma-mapping.h
··· 21 21 #include <asm/memory.h> 22 22 #include <asm/cacheflush.h> 23 23 24 - extern struct dma_map_ops swiotlb_dma_map_ops; 24 + extern const struct dma_map_ops swiotlb_dma_map_ops; 25 25 26 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 26 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 27 27 { 28 28 return &swiotlb_dma_map_ops; 29 29 }
+1 -1
arch/unicore32/mm/dma-swiotlb.c
··· 31 31 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 32 32 } 33 33 34 - struct dma_map_ops swiotlb_dma_map_ops = { 34 + const struct dma_map_ops swiotlb_dma_map_ops = { 35 35 .alloc = unicore_swiotlb_alloc_coherent, 36 36 .free = unicore_swiotlb_free_coherent, 37 37 .map_sg = swiotlb_map_sg_attrs,
+1 -4
arch/x86/include/asm/device.h
··· 2 2 #define _ASM_X86_DEVICE_H 3 3 4 4 struct dev_archdata { 5 - #ifdef CONFIG_X86_DEV_DMA_OPS 6 - struct dma_map_ops *dma_ops; 7 - #endif 8 5 #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) 9 6 void *iommu; /* hook for IOMMU specific extension */ 10 7 #endif ··· 10 13 #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) 11 14 struct dma_domain { 12 15 struct list_head node; 13 - struct dma_map_ops *dma_ops; 16 + const struct dma_map_ops *dma_ops; 14 17 int domain_nr; 15 18 }; 16 19 void add_dma_domain(struct dma_domain *domain);
+2 -9
arch/x86/include/asm/dma-mapping.h
··· 25 25 extern struct device x86_dma_fallback_dev; 26 26 extern int panic_on_overflow; 27 27 28 - extern struct dma_map_ops *dma_ops; 28 + extern const struct dma_map_ops *dma_ops; 29 29 30 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 30 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 31 31 { 32 - #ifndef CONFIG_X86_DEV_DMA_OPS 33 32 return dma_ops; 34 - #else 35 - if (unlikely(!dev) || !dev->archdata.dma_ops) 36 - return dma_ops; 37 - else 38 - return dev->archdata.dma_ops; 39 - #endif 40 33 } 41 34 42 35 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
+1 -1
arch/x86/include/asm/iommu.h
··· 1 1 #ifndef _ASM_X86_IOMMU_H 2 2 #define _ASM_X86_IOMMU_H 3 3 4 - extern struct dma_map_ops nommu_dma_ops; 4 + extern const struct dma_map_ops nommu_dma_ops; 5 5 extern int force_iommu, no_iommu; 6 6 extern int iommu_detected; 7 7 extern int iommu_pass_through;
+1 -1
arch/x86/kernel/amd_gart_64.c
··· 695 695 return -1; 696 696 } 697 697 698 - static struct dma_map_ops gart_dma_ops = { 698 + static const struct dma_map_ops gart_dma_ops = { 699 699 .map_sg = gart_map_sg, 700 700 .unmap_sg = gart_unmap_sg, 701 701 .map_page = gart_map_page,
+3 -3
arch/x86/kernel/pci-calgary_64.c
··· 478 478 free_pages((unsigned long)vaddr, get_order(size)); 479 479 } 480 480 481 - static struct dma_map_ops calgary_dma_ops = { 481 + static const struct dma_map_ops calgary_dma_ops = { 482 482 .alloc = calgary_alloc_coherent, 483 483 .free = calgary_free_coherent, 484 484 .map_sg = calgary_map_sg, ··· 1177 1177 tbl = find_iommu_table(&dev->dev); 1178 1178 1179 1179 if (translation_enabled(tbl)) 1180 - dev->dev.archdata.dma_ops = &calgary_dma_ops; 1180 + dev->dev.dma_ops = &calgary_dma_ops; 1181 1181 } 1182 1182 1183 1183 return ret; ··· 1201 1201 calgary_disable_translation(dev); 1202 1202 calgary_free_bus(dev); 1203 1203 pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ 1204 - dev->dev.archdata.dma_ops = NULL; 1204 + dev->dev.dma_ops = NULL; 1205 1205 } while (1); 1206 1206 1207 1207 return ret;
+2 -2
arch/x86/kernel/pci-dma.c
··· 17 17 18 18 static int forbid_dac __read_mostly; 19 19 20 - struct dma_map_ops *dma_ops = &nommu_dma_ops; 20 + const struct dma_map_ops *dma_ops = &nommu_dma_ops; 21 21 EXPORT_SYMBOL(dma_ops); 22 22 23 23 static int iommu_sac_force __read_mostly; ··· 215 215 216 216 int dma_supported(struct device *dev, u64 mask) 217 217 { 218 - struct dma_map_ops *ops = get_dma_ops(dev); 218 + const struct dma_map_ops *ops = get_dma_ops(dev); 219 219 220 220 #ifdef CONFIG_PCI 221 221 if (mask > 0xffffffff && forbid_dac > 0) {
+1 -1
arch/x86/kernel/pci-nommu.c
··· 88 88 flush_write_buffers(); 89 89 } 90 90 91 - struct dma_map_ops nommu_dma_ops = { 91 + const struct dma_map_ops nommu_dma_ops = { 92 92 .alloc = dma_generic_alloc_coherent, 93 93 .free = dma_generic_free_coherent, 94 94 .map_sg = nommu_map_sg,
+1 -1
arch/x86/kernel/pci-swiotlb.c
··· 45 45 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); 46 46 } 47 47 48 - static struct dma_map_ops swiotlb_dma_ops = { 48 + static const struct dma_map_ops swiotlb_dma_ops = { 49 49 .mapping_error = swiotlb_dma_mapping_error, 50 50 .alloc = x86_swiotlb_alloc_coherent, 51 51 .free = x86_swiotlb_free_coherent,
+1 -1
arch/x86/pci/common.c
··· 667 667 spin_lock(&dma_domain_list_lock); 668 668 list_for_each_entry(domain, &dma_domain_list, node) { 669 669 if (pci_domain_nr(pdev->bus) == domain->domain_nr) { 670 - pdev->dev.archdata.dma_ops = domain->dma_ops; 670 + pdev->dev.dma_ops = domain->dma_ops; 671 671 break; 672 672 } 673 673 }
+5 -5
arch/x86/pci/sta2x11-fixup.c
··· 179 179 } 180 180 181 181 /* We have our own dma_ops: the same as swiotlb but from alloc (above) */ 182 - static struct dma_map_ops sta2x11_dma_ops = { 182 + static const struct dma_map_ops sta2x11_dma_ops = { 183 183 .alloc = sta2x11_swiotlb_alloc_coherent, 184 184 .free = x86_swiotlb_free_coherent, 185 185 .map_page = swiotlb_map_page, ··· 203 203 return; 204 204 pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); 205 205 pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); 206 - pdev->dev.archdata.dma_ops = &sta2x11_dma_ops; 206 + pdev->dev.dma_ops = &sta2x11_dma_ops; 207 207 208 208 /* We must enable all devices as master, for audio DMA to work */ 209 209 pci_set_master(pdev); ··· 223 223 { 224 224 struct sta2x11_mapping *map; 225 225 226 - if (dev->archdata.dma_ops != &sta2x11_dma_ops) { 226 + if (dev->dma_ops != &sta2x11_dma_ops) { 227 227 if (!dev->dma_mask) 228 228 return false; 229 229 return addr + size - 1 <= *dev->dma_mask; ··· 247 247 */ 248 248 dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 249 249 { 250 - if (dev->archdata.dma_ops != &sta2x11_dma_ops) 250 + if (dev->dma_ops != &sta2x11_dma_ops) 251 251 return paddr; 252 252 return p2a(paddr, to_pci_dev(dev)); 253 253 } ··· 259 259 */ 260 260 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 261 261 { 262 - if (dev->archdata.dma_ops != &sta2x11_dma_ops) 262 + if (dev->dma_ops != &sta2x11_dma_ops) 263 263 return daddr; 264 264 return a2p(daddr, to_pci_dev(dev)); 265 265 }
+1 -1
arch/x86/xen/pci-swiotlb-xen.c
··· 18 18 19 19 int xen_swiotlb __read_mostly; 20 20 21 - static struct dma_map_ops xen_swiotlb_dma_ops = { 21 + static const struct dma_map_ops xen_swiotlb_dma_ops = { 22 22 .alloc = xen_swiotlb_alloc_coherent, 23 23 .free = xen_swiotlb_free_coherent, 24 24 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
-4
arch/xtensa/include/asm/device.h
··· 6 6 #ifndef _ASM_XTENSA_DEVICE_H 7 7 #define _ASM_XTENSA_DEVICE_H 8 8 9 - struct dma_map_ops; 10 - 11 9 struct dev_archdata { 12 - /* DMA operations on that device */ 13 - struct dma_map_ops *dma_ops; 14 10 }; 15 11 16 12 struct pdev_archdata {
+3 -6
arch/xtensa/include/asm/dma-mapping.h
··· 18 18 19 19 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 20 20 21 - extern struct dma_map_ops xtensa_dma_map_ops; 21 + extern const struct dma_map_ops xtensa_dma_map_ops; 22 22 23 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 23 + static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) 24 24 { 25 - if (dev && dev->archdata.dma_ops) 26 - return dev->archdata.dma_ops; 27 - else 28 - return &xtensa_dma_map_ops; 25 + return &xtensa_dma_map_ops; 29 26 } 30 27 31 28 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+1 -1
arch/xtensa/kernel/pci-dma.c
··· 250 250 return 0; 251 251 } 252 252 253 - struct dma_map_ops xtensa_dma_map_ops = { 253 + const struct dma_map_ops xtensa_dma_map_ops = { 254 254 .alloc = xtensa_dma_alloc, 255 255 .free = xtensa_dma_free, 256 256 .map_page = xtensa_map_page,
+9
drivers/infiniband/core/device.c
··· 333 333 int ret; 334 334 struct ib_client *client; 335 335 struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 336 + struct device *parent = device->dev.parent; 337 + 338 + WARN_ON_ONCE(!parent); 339 + if (!device->dev.dma_ops) 340 + device->dev.dma_ops = parent->dma_ops; 341 + if (!device->dev.dma_mask) 342 + device->dev.dma_mask = parent->dma_mask; 343 + if (!device->dev.coherent_dma_mask) 344 + device->dev.coherent_dma_mask = parent->coherent_dma_mask; 336 345 337 346 mutex_lock(&device_mutex); 338 347
+1 -1
drivers/infiniband/core/sysfs.c
··· 1258 1258 int ret; 1259 1259 int i; 1260 1260 1261 - device->dev.parent = device->dma_device; 1261 + WARN_ON_ONCE(!device->dev.parent); 1262 1262 ret = dev_set_name(class_dev, "%s", device->name); 1263 1263 if (ret) 1264 1264 return ret;
+1 -1
drivers/infiniband/core/ucm.c
··· 1290 1290 goto err; 1291 1291 1292 1292 ucm_dev->dev.class = &cm_class; 1293 - ucm_dev->dev.parent = device->dma_device; 1293 + ucm_dev->dev.parent = device->dev.parent; 1294 1294 ucm_dev->dev.devt = ucm_dev->cdev.dev; 1295 1295 ucm_dev->dev.release = ib_ucm_release_dev; 1296 1296 dev_set_name(&ucm_dev->dev, "ucm%d", ucm_dev->devnum);
+2 -2
drivers/infiniband/core/user_mad.c
··· 1188 1188 if (cdev_add(&port->cdev, base, 1)) 1189 1189 goto err_cdev; 1190 1190 1191 - port->dev = device_create(umad_class, device->dma_device, 1191 + port->dev = device_create(umad_class, device->dev.parent, 1192 1192 port->cdev.dev, port, 1193 1193 "umad%d", port->dev_num); 1194 1194 if (IS_ERR(port->dev)) ··· 1207 1207 if (cdev_add(&port->sm_cdev, base, 1)) 1208 1208 goto err_sm_cdev; 1209 1209 1210 - port->sm_dev = device_create(umad_class, device->dma_device, 1210 + port->sm_dev = device_create(umad_class, device->dev.parent, 1211 1211 port->sm_cdev.dev, port, 1212 1212 "issm%d", port->dev_num); 1213 1213 if (IS_ERR(port->sm_dev))
+1 -1
drivers/infiniband/core/uverbs_main.c
··· 1174 1174 if (cdev_add(&uverbs_dev->cdev, base, 1)) 1175 1175 goto err_cdev; 1176 1176 1177 - uverbs_dev->dev = device_create(uverbs_class, device->dma_device, 1177 + uverbs_dev->dev = device_create(uverbs_class, device->dev.parent, 1178 1178 uverbs_dev->cdev.dev, uverbs_dev, 1179 1179 "uverbs%d", uverbs_dev->devnum); 1180 1180 if (IS_ERR(uverbs_dev->dev))
+1 -1
drivers/infiniband/hw/bnxt_re/main.c
··· 436 436 bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); 437 437 438 438 ibdev->num_comp_vectors = 1; 439 - ibdev->dma_device = &rdev->en_dev->pdev->dev; 439 + ibdev->dev.parent = &rdev->en_dev->pdev->dev; 440 440 ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY; 441 441 442 442 /* User space */
+1 -1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 1393 1393 memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC)); 1394 1394 dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports; 1395 1395 dev->ibdev.num_comp_vectors = 1; 1396 - dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1396 + dev->ibdev.dev.parent = &dev->rdev.rnic_info.pdev->dev; 1397 1397 dev->ibdev.query_device = iwch_query_device; 1398 1398 dev->ibdev.query_port = iwch_query_port; 1399 1399 dev->ibdev.query_pkey = iwch_query_pkey;
+1 -1
drivers/infiniband/hw/cxgb4/provider.c
··· 572 572 memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); 573 573 dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; 574 574 dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq; 575 - dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); 575 + dev->ibdev.dev.parent = &dev->rdev.lldi.pdev->dev; 576 576 dev->ibdev.query_device = c4iw_query_device; 577 577 dev->ibdev.query_port = c4iw_query_port; 578 578 dev->ibdev.query_pkey = c4iw_query_pkey;
-183
drivers/infiniband/hw/hfi1/dma.c
··· 1 - /* 2 - * Copyright(c) 2015, 2016 Intel Corporation. 3 - * 4 - * This file is provided under a dual BSD/GPLv2 license. When using or 5 - * redistributing this file, you may do so under either license. 6 - * 7 - * GPL LICENSE SUMMARY 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of version 2 of the GNU General Public License as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * BSD LICENSE 19 - * 20 - * Redistribution and use in source and binary forms, with or without 21 - * modification, are permitted provided that the following conditions 22 - * are met: 23 - * 24 - * - Redistributions of source code must retain the above copyright 25 - * notice, this list of conditions and the following disclaimer. 26 - * - Redistributions in binary form must reproduce the above copyright 27 - * notice, this list of conditions and the following disclaimer in 28 - * the documentation and/or other materials provided with the 29 - * distribution. 30 - * - Neither the name of Intel Corporation nor the names of its 31 - * contributors may be used to endorse or promote products derived 32 - * from this software without specific prior written permission. 33 - * 34 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 - * 46 - */ 47 - #include <linux/types.h> 48 - #include <linux/scatterlist.h> 49 - 50 - #include "verbs.h" 51 - 52 - #define BAD_DMA_ADDRESS ((u64)0) 53 - 54 - /* 55 - * The following functions implement driver specific replacements 56 - * for the ib_dma_*() functions. 57 - * 58 - * These functions return kernel virtual addresses instead of 59 - * device bus addresses since the driver uses the CPU to copy 60 - * data instead of using hardware DMA. 61 - */ 62 - 63 - static int hfi1_mapping_error(struct ib_device *dev, u64 dma_addr) 64 - { 65 - return dma_addr == BAD_DMA_ADDRESS; 66 - } 67 - 68 - static u64 hfi1_dma_map_single(struct ib_device *dev, void *cpu_addr, 69 - size_t size, enum dma_data_direction direction) 70 - { 71 - if (WARN_ON(!valid_dma_direction(direction))) 72 - return BAD_DMA_ADDRESS; 73 - 74 - return (u64)cpu_addr; 75 - } 76 - 77 - static void hfi1_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, 78 - enum dma_data_direction direction) 79 - { 80 - /* This is a stub, nothing to be done here */ 81 - } 82 - 83 - static u64 hfi1_dma_map_page(struct ib_device *dev, struct page *page, 84 - unsigned long offset, size_t size, 85 - enum dma_data_direction direction) 86 - { 87 - u64 addr; 88 - 89 - if (WARN_ON(!valid_dma_direction(direction))) 90 - return BAD_DMA_ADDRESS; 91 - 92 - if (offset + size > PAGE_SIZE) 93 - return BAD_DMA_ADDRESS; 94 - 95 - addr = (u64)page_address(page); 96 - if (addr) 97 - addr += offset; 98 - 99 - return addr; 100 - } 101 - 102 - static void hfi1_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, 103 - enum dma_data_direction direction) 104 - { 105 - /* This is a stub, nothing to be done here */ 106 - } 107 - 108 - static int hfi1_map_sg(struct ib_device *dev, struct scatterlist *sgl, 109 - int nents, enum dma_data_direction direction) 110 - { 111 - struct scatterlist *sg; 112 - u64 addr; 113 - int i; 114 - int ret = nents; 115 - 116 - if (WARN_ON(!valid_dma_direction(direction))) 117 - return BAD_DMA_ADDRESS; 118 - 119 - for_each_sg(sgl, sg, nents, i) { 120 - addr = (u64)page_address(sg_page(sg)); 121 - if (!addr) { 122 - ret = 0; 123 - break; 124 - } 125 - sg->dma_address = addr + sg->offset; 126 - #ifdef CONFIG_NEED_SG_DMA_LENGTH 127 - sg->dma_length = sg->length; 128 - #endif 129 - } 130 - return ret; 131 - } 132 - 133 - static void hfi1_unmap_sg(struct ib_device *dev, 134 - struct scatterlist *sg, int nents, 135 - enum dma_data_direction direction) 136 - { 137 - /* This is a stub, nothing to be done here */ 138 - } 139 - 140 - static void hfi1_sync_single_for_cpu(struct ib_device *dev, u64 addr, 141 - size_t size, enum dma_data_direction dir) 142 - { 143 - } 144 - 145 - static void hfi1_sync_single_for_device(struct ib_device *dev, u64 addr, 146 - size_t size, 147 - enum dma_data_direction dir) 148 - { 149 - } 150 - 151 - static void *hfi1_dma_alloc_coherent(struct ib_device *dev, size_t size, 152 - u64 *dma_handle, gfp_t flag) 153 - { 154 - struct page *p; 155 - void *addr = NULL; 156 - 157 - p = alloc_pages(flag, get_order(size)); 158 - if (p) 159 - addr = page_address(p); 160 - if (dma_handle) 161 - *dma_handle = (u64)addr; 162 - return addr; 163 - } 164 - 165 - static void hfi1_dma_free_coherent(struct ib_device *dev, size_t size, 166 - void *cpu_addr, u64 dma_handle) 167 - { 168 - free_pages((unsigned long)cpu_addr, get_order(size)); 169 - } 170 - 171 - struct ib_dma_mapping_ops hfi1_dma_mapping_ops = { 172 - .mapping_error = hfi1_mapping_error, 173 - .map_single = hfi1_dma_map_single, 174 - .unmap_single = hfi1_dma_unmap_single, 175 - .map_page = hfi1_dma_map_page, 176 - .unmap_page = hfi1_dma_unmap_page, 177 - .map_sg = hfi1_map_sg, 178 - .unmap_sg = hfi1_unmap_sg, 179 - .sync_single_for_cpu = hfi1_sync_single_for_cpu, 180 - .sync_single_for_device = hfi1_sync_single_for_device, 181 - .alloc_coherent = hfi1_dma_alloc_coherent, 182 - .free_coherent = hfi1_dma_free_coherent 183 - };
+1 -1
drivers/infiniband/hw/hfi1/mad.c
··· 4406 4406 switch (in_mad->base_version) { 4407 4407 case OPA_MGMT_BASE_VERSION: 4408 4408 if (unlikely(in_mad_size != sizeof(struct opa_mad))) { 4409 - dev_err(ibdev->dma_device, "invalid in_mad_size\n"); 4409 + dev_err(ibdev->dev.parent, "invalid in_mad_size\n"); 4410 4410 return IB_MAD_RESULT_FAILURE; 4411 4411 } 4412 4412 return hfi1_process_opa_mad(ibdev, mad_flags, port,
+1 -1
drivers/infiniband/hw/hfi1/verbs.c
··· 1703 1703 strlcpy(ibdev->name + lcpysz, "_%d", IB_DEVICE_NAME_MAX - lcpysz); 1704 1704 ibdev->owner = THIS_MODULE; 1705 1705 ibdev->phys_port_cnt = dd->num_pports; 1706 - ibdev->dma_device = &dd->pcidev->dev; 1706 + ibdev->dev.parent = &dd->pcidev->dev; 1707 1707 ibdev->modify_device = modify_device; 1708 1708 ibdev->alloc_hw_stats = alloc_hw_stats; 1709 1709 ibdev->get_hw_stats = get_hw_stats;
+1 -1
drivers/infiniband/hw/hns/hns_roce_main.c
··· 439 439 440 440 ib_dev->owner = THIS_MODULE; 441 441 ib_dev->node_type = RDMA_NODE_IB_CA; 442 - ib_dev->dma_device = dev; 442 + ib_dev->dev.parent = dev; 443 443 444 444 ib_dev->phys_port_cnt = hr_dev->caps.num_ports; 445 445 ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey;
+1 -1
drivers/infiniband/hw/hns/hns_roce_qp.c
··· 101 101 event.event = IB_EVENT_QP_ACCESS_ERR; 102 102 break; 103 103 default: 104 - dev_dbg(ibqp->device->dma_device, "roce_ib: Unexpected event type %d on QP %06lx\n", 104 + dev_dbg(ibqp->device->dev.parent, "roce_ib: Unexpected event type %d on QP %06lx\n", 105 105 type, hr_qp->qpn); 106 106 return; 107 107 }
-1
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 2758 2758 (1ull << IB_USER_VERBS_CMD_POST_SEND); 2759 2759 iwibdev->ibdev.phys_port_cnt = 1; 2760 2760 iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count; 2761 - iwibdev->ibdev.dma_device = &pcidev->dev; 2762 2761 iwibdev->ibdev.dev.parent = &pcidev->dev; 2763 2762 iwibdev->ibdev.query_port = i40iw_query_port; 2764 2763 iwibdev->ibdev.modify_port = i40iw_modify_port;
+1 -1
drivers/infiniband/hw/mlx4/main.c
··· 2628 2628 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? 2629 2629 1 : ibdev->num_ports; 2630 2630 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 2631 - ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; 2631 + ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; 2632 2632 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev; 2633 2633 ibdev->ib_dev.add_gid = mlx4_ib_add_gid; 2634 2634 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
+1 -1
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 55 55 #define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__ 56 56 57 57 #define mlx4_ib_warn(ibdev, format, arg...) \ 58 - dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg) 58 + dev_warn((ibdev)->dev.parent, MLX4_IB_DRV_NAME ": " format, ## arg) 59 59 60 60 enum { 61 61 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+3 -3
drivers/infiniband/hw/mlx4/mr.c
··· 292 292 if (!mr->pages) 293 293 return -ENOMEM; 294 294 295 - mr->page_map = dma_map_single(device->dma_device, mr->pages, 295 + mr->page_map = dma_map_single(device->dev.parent, mr->pages, 296 296 mr->page_map_size, DMA_TO_DEVICE); 297 297 298 - if (dma_mapping_error(device->dma_device, mr->page_map)) { 298 + if (dma_mapping_error(device->dev.parent, mr->page_map)) { 299 299 ret = -ENOMEM; 300 300 goto err; 301 301 } ··· 313 313 if (mr->pages) { 314 314 struct ib_device *device = mr->ibmr.device; 315 315 316 - dma_unmap_single(device->dma_device, mr->page_map, 316 + dma_unmap_single(device->dev.parent, mr->page_map, 317 317 mr->page_map_size, DMA_TO_DEVICE); 318 318 free_page((unsigned long)mr->pages); 319 319 mr->pages = NULL;
+1 -1
drivers/infiniband/hw/mlx5/main.c
··· 3363 3363 dev->ib_dev.phys_port_cnt = dev->num_ports; 3364 3364 dev->ib_dev.num_comp_vectors = 3365 3365 dev->mdev->priv.eq_table.num_comp_vectors; 3366 - dev->ib_dev.dma_device = &mdev->pdev->dev; 3366 + dev->ib_dev.dev.parent = &mdev->pdev->dev; 3367 3367 3368 3368 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 3369 3369 dev->ib_dev.uverbs_cmd_mask =
+4 -4
drivers/infiniband/hw/mlx5/mr.c
··· 966 966 int page_shift, int flags) 967 967 { 968 968 struct mlx5_ib_dev *dev = mr->dev; 969 - struct device *ddev = dev->ib_dev.dma_device; 969 + struct device *ddev = dev->ib_dev.dev.parent; 970 970 struct mlx5_ib_ucontext *uctx = NULL; 971 971 int size; 972 972 void *xlt; ··· 1411 1411 1412 1412 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); 1413 1413 1414 - mr->desc_map = dma_map_single(device->dma_device, mr->descs, 1414 + mr->desc_map = dma_map_single(device->dev.parent, mr->descs, 1415 1415 size, DMA_TO_DEVICE); 1416 - if (dma_mapping_error(device->dma_device, mr->desc_map)) { 1416 + if (dma_mapping_error(device->dev.parent, mr->desc_map)) { 1417 1417 ret = -ENOMEM; 1418 1418 goto err; 1419 1419 } ··· 1432 1432 struct ib_device *device = mr->ibmr.device; 1433 1433 int size = mr->max_descs * mr->desc_size; 1434 1434 1435 - dma_unmap_single(device->dma_device, mr->desc_map, 1435 + dma_unmap_single(device->dev.parent, mr->desc_map, 1436 1436 size, DMA_TO_DEVICE); 1437 1437 kfree(mr->descs_alloc); 1438 1438 mr->descs = NULL;
+1 -1
drivers/infiniband/hw/mthca/mthca_provider.c
··· 1224 1224 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1225 1225 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 1226 1226 dev->ib_dev.num_comp_vectors = 1; 1227 - dev->ib_dev.dma_device = &dev->pdev->dev; 1227 + dev->ib_dev.dev.parent = &dev->pdev->dev; 1228 1228 dev->ib_dev.query_device = mthca_query_device; 1229 1229 dev->ib_dev.query_port = mthca_query_port; 1230 1230 dev->ib_dev.modify_device = mthca_modify_device;
-1
drivers/infiniband/hw/nes/nes_verbs.c
··· 3731 3731 3732 3732 nesibdev->ibdev.phys_port_cnt = 1; 3733 3733 nesibdev->ibdev.num_comp_vectors = 1; 3734 - nesibdev->ibdev.dma_device = &nesdev->pcidev->dev; 3735 3734 nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; 3736 3735 nesibdev->ibdev.query_device = nes_query_device; 3737 3736 nesibdev->ibdev.query_port = nes_query_port;
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 199 199 dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext; 200 200 dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext; 201 201 dev->ibdev.mmap = ocrdma_mmap; 202 - dev->ibdev.dma_device = &dev->nic_info.pdev->dev; 202 + dev->ibdev.dev.parent = &dev->nic_info.pdev->dev; 203 203 204 204 dev->ibdev.process_mad = ocrdma_process_mad; 205 205 dev->ibdev.get_port_immutable = ocrdma_port_immutable;
+1 -1
drivers/infiniband/hw/qedr/main.c
··· 170 170 dev->ibdev.get_port_immutable = qedr_port_immutable; 171 171 dev->ibdev.get_netdev = qedr_get_netdev; 172 172 173 - dev->ibdev.dma_device = &dev->pdev->dev; 173 + dev->ibdev.dev.parent = &dev->pdev->dev; 174 174 175 175 dev->ibdev.get_link_layer = qedr_link_layer; 176 176 dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
-169
drivers/infiniband/hw/qib/qib_dma.c
··· 1 - /* 2 - * Copyright (c) 2006, 2009, 2010 QLogic, Corporation. All rights reserved. 3 - * 4 - * This software is available to you under a choice of one of two 5 - * licenses. You may choose to be licensed under the terms of the GNU 6 - * General Public License (GPL) Version 2, available from the file 7 - * COPYING in the main directory of this source tree, or the 8 - * OpenIB.org BSD license below: 9 - * 10 - * Redistribution and use in source and binary forms, with or 11 - * without modification, are permitted provided that the following 12 - * conditions are met: 13 - * 14 - * - Redistributions of source code must retain the above 15 - * copyright notice, this list of conditions and the following 16 - * disclaimer. 17 - * 18 - * - Redistributions in binary form must reproduce the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer in the documentation and/or other materials 21 - * provided with the distribution. 22 - * 23 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 - * SOFTWARE. 31 - */ 32 - #include <linux/types.h> 33 - #include <linux/scatterlist.h> 34 - 35 - #include "qib_verbs.h" 36 - 37 - #define BAD_DMA_ADDRESS ((u64) 0) 38 - 39 - /* 40 - * The following functions implement driver specific replacements 41 - * for the ib_dma_*() functions. 42 - * 43 - * These functions return kernel virtual addresses instead of 44 - * device bus addresses since the driver uses the CPU to copy 45 - * data instead of using hardware DMA. 46 - */ 47 - 48 - static int qib_mapping_error(struct ib_device *dev, u64 dma_addr) 49 - { 50 - return dma_addr == BAD_DMA_ADDRESS; 51 - } 52 - 53 - static u64 qib_dma_map_single(struct ib_device *dev, void *cpu_addr, 54 - size_t size, enum dma_data_direction direction) 55 - { 56 - BUG_ON(!valid_dma_direction(direction)); 57 - return (u64) cpu_addr; 58 - } 59 - 60 - static void qib_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, 61 - enum dma_data_direction direction) 62 - { 63 - BUG_ON(!valid_dma_direction(direction)); 64 - } 65 - 66 - static u64 qib_dma_map_page(struct ib_device *dev, struct page *page, 67 - unsigned long offset, size_t size, 68 - enum dma_data_direction direction) 69 - { 70 - u64 addr; 71 - 72 - BUG_ON(!valid_dma_direction(direction)); 73 - 74 - if (offset + size > PAGE_SIZE) { 75 - addr = BAD_DMA_ADDRESS; 76 - goto done; 77 - } 78 - 79 - addr = (u64) page_address(page); 80 - if (addr) 81 - addr += offset; 82 - /* TODO: handle highmem pages */ 83 - 84 - done: 85 - return addr; 86 - } 87 - 88 - static void qib_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, 89 - enum dma_data_direction direction) 90 - { 91 - BUG_ON(!valid_dma_direction(direction)); 92 - } 93 - 94 - static int qib_map_sg(struct ib_device *dev, struct scatterlist *sgl, 95 - int nents, enum dma_data_direction direction) 96 - { 97 - struct scatterlist *sg; 98 - u64 addr; 99 - int i; 100 - int ret = nents; 101 - 102 - BUG_ON(!valid_dma_direction(direction)); 103 - 104 - for_each_sg(sgl, sg, nents, i) { 105 - addr = (u64) page_address(sg_page(sg)); 106 - /* TODO: handle highmem pages */ 107 - if (!addr) { 108 - ret = 0; 109 - break; 110 - } 111 - sg->dma_address = addr + sg->offset; 112 - #ifdef CONFIG_NEED_SG_DMA_LENGTH 113 - sg->dma_length = sg->length; 114 - #endif 115 - } 116 - return ret; 117 - } 118 - 119 - static void qib_unmap_sg(struct ib_device *dev, 120 - struct scatterlist *sg, int nents, 121 - enum dma_data_direction direction) 122 - { 123 - BUG_ON(!valid_dma_direction(direction)); 124 - } 125 - 126 - static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, 127 - size_t size, enum dma_data_direction dir) 128 - { 129 - } 130 - 131 - static void qib_sync_single_for_device(struct ib_device *dev, u64 addr, 132 - size_t size, 133 - enum dma_data_direction dir) 134 - { 135 - } 136 - 137 - static void *qib_dma_alloc_coherent(struct ib_device *dev, size_t size, 138 - u64 *dma_handle, gfp_t flag) 139 - { 140 - struct page *p; 141 - void *addr = NULL; 142 - 143 - p = alloc_pages(flag, get_order(size)); 144 - if (p) 145 - addr = page_address(p); 146 - if (dma_handle) 147 - *dma_handle = (u64) addr; 148 - return addr; 149 - } 150 - 151 - static void qib_dma_free_coherent(struct ib_device *dev, size_t size, 152 - void *cpu_addr, u64 dma_handle) 153 - { 154 - free_pages((unsigned long) cpu_addr, get_order(size)); 155 - } 156 - 157 - struct ib_dma_mapping_ops qib_dma_mapping_ops = { 158 - .mapping_error = qib_mapping_error, 159 - .map_single = qib_dma_map_single, 160 - .unmap_single = qib_dma_unmap_single, 161 - .map_page = qib_dma_map_page, 162 - .unmap_page = qib_dma_unmap_page, 163 - .map_sg = qib_map_sg, 164 - .unmap_sg = qib_unmap_sg, 165 - .sync_single_for_cpu = qib_sync_single_for_cpu, 166 - .sync_single_for_device = qib_sync_single_for_device, 167 - .alloc_coherent = qib_dma_alloc_coherent, 168 - .free_coherent = qib_dma_free_coherent 169 - };
+1 -4
drivers/infiniband/hw/qib/qib_keys.c
··· 158 158 unsigned n, m; 159 159 size_t off; 160 160 161 - /* 162 - * We use RKEY == zero for kernel virtual addresses 163 - * (see qib_get_dma_mr and qib_dma.c). 164 - */ 161 + /* We use RKEY == zero for kernel virtual addresses */ 165 162 rcu_read_lock(); 166 163 if (rkey == 0) { 167 164 struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
+1 -1
drivers/infiniband/hw/qib/qib_verbs.c
··· 1550 1550 ibdev->owner = THIS_MODULE; 1551 1551 ibdev->node_guid = ppd->guid; 1552 1552 ibdev->phys_port_cnt = dd->num_pports; 1553 - ibdev->dma_device = &dd->pcidev->dev; 1553 + ibdev->dev.parent = &dd->pcidev->dev; 1554 1554 ibdev->modify_device = qib_modify_device; 1555 1555 ibdev->process_mad = qib_process_mad; 1556 1556
+1 -1
drivers/infiniband/hw/usnic/usnic_ib_main.c
··· 382 382 us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; 383 383 us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; 384 384 us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; 385 - us_ibdev->ib_dev.dma_device = &dev->dev; 385 + us_ibdev->ib_dev.dev.parent = &dev->dev; 386 386 us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION; 387 387 strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX); 388 388
+1 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 173 173 dev->flags = 0; 174 174 dev->ib_dev.owner = THIS_MODULE; 175 175 dev->ib_dev.num_comp_vectors = 1; 176 - dev->ib_dev.dma_device = &dev->pdev->dev; 176 + dev->ib_dev.dev.parent = &dev->pdev->dev; 177 177 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION; 178 178 dev->ib_dev.uverbs_cmd_mask = 179 179 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+1
drivers/infiniband/sw/rdmavt/Kconfig
··· 1 1 config INFINIBAND_RDMAVT 2 2 tristate "RDMA verbs transport library" 3 3 depends on 64BIT 4 + select DMA_VIRT_OPS 4 5 ---help--- 5 6 This is a common software verbs provider for RDMA networks.
+1 -1
drivers/infiniband/sw/rdmavt/Makefile
··· 7 7 # 8 8 obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt.o 9 9 10 - rdmavt-y := vt.o ah.o cq.o dma.o mad.o mcast.o mmap.o mr.o pd.o qp.o \ 10 + rdmavt-y := vt.o ah.o cq.o mad.o mcast.o mmap.o mr.o pd.o qp.o \ 11 11 rc.o srq.o trace.o 12 12 13 13 CFLAGS_trace.o = -I$(src)
-198
drivers/infiniband/sw/rdmavt/dma.c
··· 1 - /* 2 - * Copyright(c) 2016 Intel Corporation. 3 - * 4 - * This file is provided under a dual BSD/GPLv2 license. When using or 5 - * redistributing this file, you may do so under either license. 6 - * 7 - * GPL LICENSE SUMMARY 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of version 2 of the GNU General Public License as 11 - * published by the Free Software Foundation. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * BSD LICENSE 19 - * 20 - * Redistribution and use in source and binary forms, with or without 21 - * modification, are permitted provided that the following conditions 22 - * are met: 23 - * 24 - * - Redistributions of source code must retain the above copyright 25 - * notice, this list of conditions and the following disclaimer. 26 - * - Redistributions in binary form must reproduce the above copyright 27 - * notice, this list of conditions and the following disclaimer in 28 - * the documentation and/or other materials provided with the 29 - * distribution. 30 - * - Neither the name of Intel Corporation nor the names of its 31 - * contributors may be used to endorse or promote products derived 32 - * from this software without specific prior written permission. 33 - * 34 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 35 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 36 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 37 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 38 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 39 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 40 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 41 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 42 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 43 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 44 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 45 - * 46 - */ 47 - #include <linux/types.h> 48 - #include <linux/scatterlist.h> 49 - #include <rdma/ib_verbs.h> 50 - 51 - #include "dma.h" 52 - 53 - #define BAD_DMA_ADDRESS ((u64)0) 54 - 55 - /* 56 - * The following functions implement driver specific replacements 57 - * for the ib_dma_*() functions. 58 - * 59 - * These functions return kernel virtual addresses instead of 60 - * device bus addresses since the driver uses the CPU to copy 61 - * data instead of using hardware DMA. 62 - */ 63 - 64 - static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr) 65 - { 66 - return dma_addr == BAD_DMA_ADDRESS; 67 - } 68 - 69 - static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr, 70 - size_t size, enum dma_data_direction direction) 71 - { 72 - if (WARN_ON(!valid_dma_direction(direction))) 73 - return BAD_DMA_ADDRESS; 74 - 75 - return (u64)cpu_addr; 76 - } 77 - 78 - static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size, 79 - enum dma_data_direction direction) 80 - { 81 - /* This is a stub, nothing to be done here */ 82 - } 83 - 84 - static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page, 85 - unsigned long offset, size_t size, 86 - enum dma_data_direction direction) 87 - { 88 - u64 addr; 89 - 90 - if (WARN_ON(!valid_dma_direction(direction))) 91 - return BAD_DMA_ADDRESS; 92 - 93 - addr = (u64)page_address(page); 94 - if (addr) 95 - addr += offset; 96 - 97 - return addr; 98 - } 99 - 100 - static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size, 101 - enum dma_data_direction direction) 102 - { 103 - /* This is a stub, nothing to be done here */ 104 - } 105 - 106 - static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl, 107 - int nents, enum dma_data_direction direction) 108 - { 109 - struct scatterlist *sg; 110 - u64 addr; 111 - int i; 112 - int ret = nents; 113 - 114 - if (WARN_ON(!valid_dma_direction(direction))) 115 - return 0; 116 - 117 - for_each_sg(sgl, sg, nents, i) { 118 - addr = (u64)page_address(sg_page(sg)); 119 - if (!addr) { 120 - ret = 0; 121 - break; 122 - } 123 - sg->dma_address = addr + sg->offset; 124 - #ifdef CONFIG_NEED_SG_DMA_LENGTH 125 - sg->dma_length = sg->length; 126 - #endif 127 - } 128 - return ret; 129 - } 130 - 131 - static void rvt_unmap_sg(struct ib_device *dev, 132 - struct scatterlist *sg, int nents, 133 - enum dma_data_direction direction) 134 - { 135 - /* This is a stub, nothing to be done here */ 136 - } 137 - 138 - static int rvt_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl, 139 - int nents, enum dma_data_direction direction, 140 - unsigned long attrs) 141 - { 142 - return rvt_map_sg(dev, sgl, nents, direction); 143 - } 144 - 145 - static void rvt_unmap_sg_attrs(struct ib_device *dev, 146 - struct scatterlist *sg, int nents, 147 - enum dma_data_direction direction, 148 - unsigned long attrs) 149 - { 150 - return rvt_unmap_sg(dev, sg, nents, direction); 151 - } 152 - 153 - static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr, 154 - size_t size, enum dma_data_direction dir) 155 - { 156 - } 157 - 158 - static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr, 159 - size_t size, 160 - enum dma_data_direction dir) 161 - { 162 - } 163 - 164 - static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size, 165 - u64 *dma_handle, gfp_t flag) 166 - { 167 - struct page *p; 168 - void *addr = NULL; 169 - 170 - p = alloc_pages(flag, get_order(size)); 171 - if (p) 172 - addr = page_address(p); 173 - if (dma_handle) 174 - *dma_handle = (u64)addr; 175 - return addr; 176 - } 177 - 178 - static void rvt_dma_free_coherent(struct ib_device *dev, size_t size, 179 - void *cpu_addr, u64 dma_handle) 180 - { 181 - free_pages((unsigned long)cpu_addr, get_order(size)); 182 - } 183 - 184 - struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = { 185 - .mapping_error = rvt_mapping_error, 186 - .map_single = rvt_dma_map_single, 187 - .unmap_single = rvt_dma_unmap_single, 188 - .map_page = rvt_dma_map_page, 189 - .unmap_page = rvt_dma_unmap_page, 190 - .map_sg = rvt_map_sg, 191 - .unmap_sg = rvt_unmap_sg, 192 - .map_sg_attrs = rvt_map_sg_attrs, 193 - .unmap_sg_attrs = rvt_unmap_sg_attrs, 194 - .sync_single_for_cpu = rvt_sync_single_for_cpu, 195 - .sync_single_for_device = rvt_sync_single_for_device, 196 - .alloc_coherent = rvt_dma_alloc_coherent, 197 - .free_coherent = rvt_dma_free_coherent 198 - };
-53
drivers/infiniband/sw/rdmavt/dma.h
··· 1 - #ifndef DEF_RDMAVTDMA_H 2 - #define DEF_RDMAVTDMA_H 3 - 4 - /* 5 - * Copyright(c) 2016 Intel Corporation. 6 - * 7 - * This file is provided under a dual BSD/GPLv2 license. When using or 8 - * redistributing this file, you may do so under either license. 9 - * 10 - * GPL LICENSE SUMMARY 11 - * 12 - * This program is free software; you can redistribute it and/or modify 13 - * it under the terms of version 2 of the GNU General Public License as 14 - * published by the Free Software Foundation. 15 - * 16 - * This program is distributed in the hope that it will be useful, but 17 - * WITHOUT ANY WARRANTY; without even the implied warranty of 18 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 - * General Public License for more details. 20 - * 21 - * BSD LICENSE 22 - * 23 - * Redistribution and use in source and binary forms, with or without 24 - * modification, are permitted provided that the following conditions 25 - * are met: 26 - * 27 - * - Redistributions of source code must retain the above copyright 28 - * notice, this list of conditions and the following disclaimer. 29 - * - Redistributions in binary form must reproduce the above copyright 30 - * notice, this list of conditions and the following disclaimer in 31 - * the documentation and/or other materials provided with the 32 - * distribution. 33 - * - Neither the name of Intel Corporation nor the names of its 34 - * contributors may be used to endorse or promote products derived 35 - * from this software without specific prior written permission. 36 - * 37 - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 39 - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 40 - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 41 - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 42 - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 43 - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 44 - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 45 - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 46 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 47 - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 - * 49 - */ 50 - 51 - extern struct ib_dma_mapping_ops rvt_default_dma_mapping_ops; 52 - 53 - #endif /* DEF_RDMAVTDMA_H */
+4 -4
drivers/infiniband/sw/rdmavt/mr.c
··· 320 320 * @acc: access flags 321 321 * 322 322 * Return: the memory region on success, otherwise returns an errno. 323 - * Note that all DMA addresses should be created via the 324 - * struct ib_dma_mapping_ops functions (see dma.c). 323 + * Note that all DMA addresses should be created via the functions in 324 + * struct dma_virt_ops. 325 325 */ 326 326 struct ib_mr *rvt_get_dma_mr(struct ib_pd *pd, int acc) 327 327 { ··· 799 799 800 800 /* 801 801 * We use LKEY == zero for kernel virtual addresses 802 - * (see rvt_get_dma_mr and dma.c). 802 + * (see rvt_get_dma_mr() and dma_virt_ops). 803 803 */ 804 804 rcu_read_lock(); 805 805 if (sge->lkey == 0) { ··· 897 897 898 898 /* 899 899 * We use RKEY == zero for kernel virtual addresses 900 - * (see rvt_get_dma_mr and dma.c). 900 + * (see rvt_get_dma_mr() and dma_virt_ops). 901 901 */ 902 902 rcu_read_lock(); 903 903 if (rkey == 0) {
+2 -2
drivers/infiniband/sw/rdmavt/vt.c
··· 47 47 48 48 #include <linux/module.h> 49 49 #include <linux/kernel.h> 50 + #include <linux/dma-mapping.h> 50 51 #include "vt.h" 51 52 #include "trace.h" 52 53 ··· 779 778 } 780 779 781 780 /* DMA Operations */ 782 - rdi->ibdev.dma_ops = 783 - rdi->ibdev.dma_ops ? : &rvt_default_dma_mapping_ops; 781 + rdi->ibdev.dev.dma_ops = rdi->ibdev.dev.dma_ops ? : &dma_virt_ops; 784 782 785 783 /* Protection Domain */ 786 784 spin_lock_init(&rdi->n_pds_lock);
-1
drivers/infiniband/sw/rdmavt/vt.h
··· 50 50 51 51 #include <rdma/rdma_vt.h> 52 52 #include <linux/pci.h> 53 - #include "dma.h" 54 53 #include "pd.h" 55 54 #include "qp.h" 56 55 #include "ah.h"
+1
drivers/infiniband/sw/rxe/Kconfig
··· 2 2 tristate "Software RDMA over Ethernet (RoCE) driver" 3 3 depends on INET && PCI && INFINIBAND 4 4 depends on NET_UDP_TUNNEL 5 + select DMA_VIRT_OPS 5 6 ---help--- 6 7 This driver implements the InfiniBand RDMA transport over 7 8 the Linux network stack. It enables a system with a
-1
drivers/infiniband/sw/rxe/Makefile
··· 14 14 rxe_qp.o \ 15 15 rxe_cq.o \ 16 16 rxe_mr.o \ 17 - rxe_dma.o \ 18 17 rxe_opcode.o \ 19 18 rxe_mmap.o \ 20 19 rxe_icrc.o \
-183
drivers/infiniband/sw/rxe/rxe_dma.c
··· 1 - /* 2 - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. 3 - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. 4 - * 5 - * This software is available to you under a choice of one of two 6 - * licenses. You may choose to be licensed under the terms of the GNU 7 - * General Public License (GPL) Version 2, available from the file 8 - * COPYING in the main directory of this source tree, or the 9 - * OpenIB.org BSD license below: 10 - * 11 - * Redistribution and use in source and binary forms, with or 12 - * without modification, are permitted provided that the following 13 - * conditions are met: 14 - * 15 - * - Redistributions of source code must retain the above 16 - * copyright notice, this list of conditions and the following 17 - * disclaimer. 18 - * 19 - * - Redistributions in binary form must reproduce the above 20 - * copyright notice, this list of conditions and the following 21 - * disclaimer in the documentation and/or other materials 22 - * provided with the distribution. 23 - * 24 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 - * SOFTWARE. 32 - */ 33 - 34 - #include "rxe.h" 35 - #include "rxe_loc.h" 36 - 37 - #define DMA_BAD_ADDER ((u64)0) 38 - 39 - static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr) 40 - { 41 - return dma_addr == DMA_BAD_ADDER; 42 - } 43 - 44 - static u64 rxe_dma_map_single(struct ib_device *dev, 45 - void *cpu_addr, size_t size, 46 - enum dma_data_direction direction) 47 - { 48 - WARN_ON(!valid_dma_direction(direction)); 49 - return (uintptr_t)cpu_addr; 50 - } 51 - 52 - static void rxe_dma_unmap_single(struct ib_device *dev, 53 - u64 addr, size_t size, 54 - enum dma_data_direction direction) 55 - { 56 - WARN_ON(!valid_dma_direction(direction)); 57 - } 58 - 59 - static u64 rxe_dma_map_page(struct ib_device *dev, 60 - struct page *page, 61 - unsigned long offset, 62 - size_t size, enum dma_data_direction direction) 63 - { 64 - u64 addr; 65 - 66 - WARN_ON(!valid_dma_direction(direction)); 67 - 68 - if (offset + size > PAGE_SIZE) { 69 - addr = DMA_BAD_ADDER; 70 - goto done; 71 - } 72 - 73 - addr = (uintptr_t)page_address(page); 74 - if (addr) 75 - addr += offset; 76 - 77 - done: 78 - return addr; 79 - } 80 - 81 - static void rxe_dma_unmap_page(struct ib_device *dev, 82 - u64 addr, size_t size, 83 - enum dma_data_direction direction) 84 - { 85 - WARN_ON(!valid_dma_direction(direction)); 86 - } 87 - 88 - static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl, 89 - int nents, enum dma_data_direction direction) 90 - { 91 - struct scatterlist *sg; 92 - u64 addr; 93 - int i; 94 - int ret = nents; 95 - 96 - WARN_ON(!valid_dma_direction(direction)); 97 - 98 - for_each_sg(sgl, sg, nents, i) { 99 - addr = (uintptr_t)page_address(sg_page(sg)); 100 - if (!addr) { 101 - ret = 0; 102 - break; 103 - } 104 - sg->dma_address = addr + sg->offset; 105 - #ifdef CONFIG_NEED_SG_DMA_LENGTH 106 - sg->dma_length = sg->length; 107 - #endif 108 - } 109 - 110 - return ret; 111 - } 112 - 113 - static void rxe_unmap_sg(struct ib_device *dev, 114 - struct scatterlist *sg, int nents, 115 - enum dma_data_direction direction) 116 - { 117 - WARN_ON(!valid_dma_direction(direction)); 118 - } 119 - 120 - static int rxe_map_sg_attrs(struct ib_device *dev, struct scatterlist *sgl, 121 - int nents, enum dma_data_direction direction, 122 - unsigned long attrs) 123 - { 124 - return rxe_map_sg(dev, sgl, nents, direction); 125 - } 126 - 127 - static void rxe_unmap_sg_attrs(struct ib_device *dev, 128 - struct scatterlist *sg, int nents, 129 - enum dma_data_direction direction, 130 - unsigned long attrs) 131 - { 132 - rxe_unmap_sg(dev, sg, nents, direction); 133 - } 134 - 135 - static void rxe_sync_single_for_cpu(struct ib_device *dev, 136 - u64 addr, 137 - size_t size, enum dma_data_direction dir) 138 - { 139 - } 140 - 141 - static void rxe_sync_single_for_device(struct ib_device *dev, 142 - u64 addr, 143 - size_t size, enum dma_data_direction dir) 144 - { 145 - } 146 - 147 - static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size, 148 - u64 *dma_handle, gfp_t flag) 149 - { 150 - struct page *p; 151 - void *addr = NULL; 152 - 153 - p = alloc_pages(flag, get_order(size)); 154 - if (p) 155 - addr = page_address(p); 156 - 157 - if (dma_handle) 158 - *dma_handle = (uintptr_t)addr; 159 - 160 - return addr; 161 - } 162 - 163 - static void rxe_dma_free_coherent(struct ib_device *dev, size_t size, 164 - void *cpu_addr, u64 dma_handle) 165 - { 166 - free_pages((unsigned long)cpu_addr, get_order(size)); 167 - } 168 - 169 - struct ib_dma_mapping_ops rxe_dma_mapping_ops = { 170 - .mapping_error = rxe_mapping_error, 171 - .map_single = rxe_dma_map_single, 172 - .unmap_single = rxe_dma_unmap_single, 173 - .map_page = rxe_dma_map_page, 174 - .unmap_page = rxe_dma_unmap_page, 175 - .map_sg = rxe_map_sg, 176 - .unmap_sg = rxe_unmap_sg, 177 - .map_sg_attrs = rxe_map_sg_attrs, 178 - .unmap_sg_attrs = rxe_unmap_sg_attrs, 179 - .sync_single_for_cpu = rxe_sync_single_for_cpu, 180 - .sync_single_for_device = rxe_sync_single_for_device, 181 - .alloc_coherent = rxe_dma_alloc_coherent, 182 - .free_coherent = rxe_dma_free_coherent 183 - };
-2
drivers/infiniband/sw/rxe/rxe_loc.h
··· 237 237 struct ib_srq_attr *attr, enum ib_srq_attr_mask mask, 238 238 struct ib_udata *udata); 239 239 240 - extern struct ib_dma_mapping_ops rxe_dma_mapping_ops; 241 - 242 240 void rxe_release(struct kref *kref); 243 241 244 242 void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify);
+5 -4
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 31 31 * SOFTWARE. 32 32 */ 33 33 34 + #include <linux/dma-mapping.h> 34 35 #include "rxe.h" 35 36 #include "rxe_loc.h" 36 37 #include "rxe_queue.h" ··· 170 169 struct rxe_port *port; 171 170 172 171 if (unlikely(port_num != 1)) { 173 - dev_warn(device->dma_device, "invalid port_num = %d\n", 172 + dev_warn(device->dev.parent, "invalid port_num = %d\n", 174 173 port_num); 175 174 goto err1; 176 175 } ··· 178 177 port = &rxe->port; 179 178 180 179 if (unlikely(index >= port->attr.pkey_tbl_len)) { 181 - dev_warn(device->dma_device, "invalid index = %d\n", 180 + dev_warn(device->dev.parent, "invalid index = %d\n", 182 181 index); 183 182 goto err1; 184 183 } ··· 1235 1234 dev->node_type = RDMA_NODE_IB_CA; 1236 1235 dev->phys_port_cnt = 1; 1237 1236 dev->num_comp_vectors = RXE_NUM_COMP_VECTORS; 1238 - dev->dma_device = rxe_dma_device(rxe); 1237 + dev->dev.parent = rxe_dma_device(rxe); 1239 1238 dev->local_dma_lkey = 0; 1240 1239 dev->node_guid = rxe_node_guid(rxe); 1241 - dev->dma_ops = &rxe_dma_mapping_ops; 1240 + dev->dev.dma_ops = &dma_virt_ops; 1242 1241 1243 1242 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; 1244 1243 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
··· 65 65 ib_get_device_fw_str(priv->ca, drvinfo->fw_version, 66 66 sizeof(drvinfo->fw_version)); 67 67 68 - strlcpy(drvinfo->bus_info, dev_name(priv->ca->dma_device), 68 + strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent), 69 69 sizeof(drvinfo->bus_info)); 70 70 71 71 strlcpy(drvinfo->version, ipoib_driver_version,
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 2020 2020 if (!priv) 2021 2021 goto alloc_mem_failed; 2022 2022 2023 - SET_NETDEV_DEV(priv->dev, hca->dma_device); 2023 + SET_NETDEV_DEV(priv->dev, hca->dev.parent); 2024 2024 priv->dev->dev_id = port - 1; 2025 2025 2026 2026 result = ib_query_port(hca, port, &attr);
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 652 652 } 653 653 654 654 if (iscsi_host_add(shost, 655 - ib_conn->device->ib_device->dma_device)) { 655 + ib_conn->device->ib_device->dev.parent)) { 656 656 mutex_unlock(&iser_conn->state_mutex); 657 657 goto free_host; 658 658 }
+2 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 2933 2933 sprintf(target->target_name, "SRP.T10:%016llX", 2934 2934 be64_to_cpu(target->id_ext)); 2935 2935 2936 - if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device)) 2936 + if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) 2937 2937 return -ENODEV; 2938 2938 2939 2939 memcpy(ids.port_id, &target->id_ext, 8); ··· 3546 3546 host->port = port; 3547 3547 3548 3548 host->dev.class = &srp_class; 3549 - host->dev.parent = device->dev->dma_device; 3549 + host->dev.parent = device->dev->dev.parent; 3550 3550 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port); 3551 3551 3552 3552 if (device_register(&host->dev))
+1 -2
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 2479 2479 struct ib_srq_init_attr srq_attr; 2480 2480 int i; 2481 2481 2482 - pr_debug("device = %p, device->dma_ops = %p\n", device, 2483 - device->dma_ops); 2482 + pr_debug("device = %p\n", device); 2484 2483 2485 2484 sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); 2486 2485 if (!sdev)
+5 -5
drivers/iommu/amd_iommu.c
··· 117 117 static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 118 118 int amd_iommu_max_glx_val = -1; 119 119 120 - static struct dma_map_ops amd_iommu_dma_ops; 120 + static const struct dma_map_ops amd_iommu_dma_ops; 121 121 122 122 /* 123 123 * This struct contains device specific data for the IOMMU ··· 519 519 iommu_group_remove_device(dev); 520 520 521 521 /* Remove dma-ops */ 522 - dev->archdata.dma_ops = NULL; 522 + dev->dma_ops = NULL; 523 523 524 524 /* 525 525 * We keep dev_data around for unplugged devices and reuse it when the ··· 2168 2168 dev_name(dev)); 2169 2169 2170 2170 iommu_ignore_device(dev); 2171 - dev->archdata.dma_ops = &nommu_dma_ops; 2171 + dev->dma_ops = &nommu_dma_ops; 2172 2172 goto out; 2173 2173 } 2174 2174 init_iommu_group(dev); ··· 2185 2185 if (domain->type == IOMMU_DOMAIN_IDENTITY) 2186 2186 dev_data->passthrough = true; 2187 2187 else 2188 - dev->archdata.dma_ops = &amd_iommu_dma_ops; 2188 + dev->dma_ops = &amd_iommu_dma_ops; 2189 2189 2190 2190 out: 2191 2191 iommu_completion_wait(iommu); ··· 2732 2732 return check_device(dev); 2733 2733 } 2734 2734 2735 - static struct dma_map_ops amd_iommu_dma_ops = { 2735 + static const struct dma_map_ops amd_iommu_dma_ops = { 2736 2736 .alloc = alloc_coherent, 2737 2737 .free = free_coherent, 2738 2738 .map_page = map_page,
+2 -2
drivers/misc/mic/bus/mic_bus.c
··· 143 143 } 144 144 145 145 struct mbus_device * 146 - mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 146 + mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, 147 147 struct mbus_hw_ops *hw_ops, int index, 148 148 void __iomem *mmio_va) 149 149 { ··· 158 158 mbdev->dev.parent = pdev; 159 159 mbdev->id.device = id; 160 160 mbdev->id.vendor = MBUS_DEV_ANY_ID; 161 - mbdev->dev.archdata.dma_ops = dma_ops; 161 + mbdev->dev.dma_ops = dma_ops; 162 162 mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask; 163 163 dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64)); 164 164 mbdev->dev.release = mbus_release_dev;
+2 -2
drivers/misc/mic/bus/scif_bus.c
··· 138 138 } 139 139 140 140 struct scif_hw_dev * 141 - scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 141 + scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, 142 142 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, 143 143 struct mic_mw *mmio, struct mic_mw *aper, void *dp, 144 144 void __iomem *rdp, struct dma_chan **chan, int num_chan, ··· 154 154 sdev->dev.parent = pdev; 155 155 sdev->id.device = id; 156 156 sdev->id.vendor = SCIF_DEV_ANY_ID; 157 - sdev->dev.archdata.dma_ops = dma_ops; 157 + sdev->dev.dma_ops = dma_ops; 158 158 sdev->dev.release = scif_release_dev; 159 159 sdev->hw_ops = hw_ops; 160 160 sdev->dnode = dnode;
+1 -1
drivers/misc/mic/bus/scif_bus.h
··· 113 113 void scif_unregister_driver(struct scif_driver *driver); 114 114 struct scif_hw_dev * 115 115 scif_register_device(struct device *pdev, int id, 116 - struct dma_map_ops *dma_ops, 116 + const struct dma_map_ops *dma_ops, 117 117 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, 118 118 struct mic_mw *mmio, struct mic_mw *aper, 119 119 void *dp, void __iomem *rdp,
+1 -1
drivers/misc/mic/bus/vop_bus.c
··· 154 154 vdev->dev.parent = pdev; 155 155 vdev->id.device = id; 156 156 vdev->id.vendor = VOP_DEV_ANY_ID; 157 - vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; 157 + vdev->dev.dma_ops = dma_ops; 158 158 vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; 159 159 dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); 160 160 vdev->dev.release = vop_release_dev;
+2 -2
drivers/misc/mic/host/mic_boot.c
··· 245 245 dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir); 246 246 } 247 247 248 - static struct dma_map_ops __mic_dma_ops = { 248 + static const struct dma_map_ops __mic_dma_ops = { 249 249 .alloc = __mic_dma_alloc, 250 250 .free = __mic_dma_free, 251 251 .map_page = __mic_dma_map_page, ··· 344 344 mic_unmap_single(mdev, dma_addr, size); 345 345 } 346 346 347 - static struct dma_map_ops mic_dma_ops = { 347 + static const struct dma_map_ops mic_dma_ops = { 348 348 .map_page = mic_dma_map_page, 349 349 .unmap_page = mic_dma_unmap_page, 350 350 };
+1 -1
drivers/nvme/host/rdma.c
··· 1251 1251 1252 1252 dev = nvme_rdma_find_get_device(queue->cm_id); 1253 1253 if (!dev) { 1254 - dev_err(queue->cm_id->device->dma_device, 1254 + dev_err(queue->cm_id->device->dev.parent, 1255 1255 "no client data found!\n"); 1256 1256 return -ECONNREFUSED; 1257 1257 }
+1 -1
drivers/parisc/ccio-dma.c
··· 1011 1011 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); 1012 1012 } 1013 1013 1014 - static struct dma_map_ops ccio_ops = { 1014 + static const struct dma_map_ops ccio_ops = { 1015 1015 .dma_supported = ccio_dma_supported, 1016 1016 .alloc = ccio_alloc, 1017 1017 .free = ccio_free,
+1 -1
drivers/parisc/sba_iommu.c
··· 1069 1069 1070 1070 } 1071 1071 1072 - static struct dma_map_ops sba_ops = { 1072 + static const struct dma_map_ops sba_ops = { 1073 1073 .dma_supported = sba_dma_supported, 1074 1074 .alloc = sba_alloc, 1075 1075 .free = sba_free,
+1 -1
drivers/pci/host/vmd.c
··· 282 282 return &vmd->dev->dev; 283 283 } 284 284 285 - static struct dma_map_ops *vmd_dma_ops(struct device *dev) 285 + static const struct dma_map_ops *vmd_dma_ops(struct device *dev) 286 286 { 287 287 return get_dma_ops(to_vmd_dev(dev)); 288 288 }
+1
include/linux/device.h
··· 925 925 #ifdef CONFIG_NUMA 926 926 int numa_node; /* NUMA node this device is close to */ 927 927 #endif 928 + const struct dma_map_ops *dma_ops; 928 929 u64 *dma_mask; /* dma mask (if dma'able device) */ 929 930 u64 coherent_dma_mask;/* Like dma_mask, but for 930 931 alloc_coherent mappings as
+34 -21
include/linux/dma-mapping.h
··· 134 134 int is_phys; 135 135 }; 136 136 137 - extern struct dma_map_ops dma_noop_ops; 137 + extern const struct dma_map_ops dma_noop_ops; 138 + extern const struct dma_map_ops dma_virt_ops; 138 139 139 140 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 140 141 ··· 172 171 173 172 #ifdef CONFIG_HAS_DMA 174 173 #include <asm/dma-mapping.h> 174 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 175 + { 176 + if (dev && dev->dma_ops) 177 + return dev->dma_ops; 178 + return get_arch_dma_ops(dev ? dev->bus : NULL); 179 + } 180 + 181 + static inline void set_dma_ops(struct device *dev, 182 + const struct dma_map_ops *dma_ops) 183 + { 184 + dev->dma_ops = dma_ops; 185 + } 175 186 #else 176 187 /* 177 188 * Define the dma api to allow compilation but not linking of 178 189 * dma dependent code. Code that depends on the dma-mapping 179 190 * API needs to set 'depends on HAS_DMA' in its Kconfig 180 191 */ 181 - extern struct dma_map_ops bad_dma_ops; 182 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 192 + extern const struct dma_map_ops bad_dma_ops; 193 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 183 194 { 184 195 return &bad_dma_ops; 185 196 } ··· 202 189 enum dma_data_direction dir, 203 190 unsigned long attrs) 204 191 { 205 - struct dma_map_ops *ops = get_dma_ops(dev); 192 + const struct dma_map_ops *ops = get_dma_ops(dev); 206 193 dma_addr_t addr; 207 194 208 195 kmemcheck_mark_initialized(ptr, size); ··· 221 208 enum dma_data_direction dir, 222 209 unsigned long attrs) 223 210 { 224 - struct dma_map_ops *ops = get_dma_ops(dev); 211 + const struct dma_map_ops *ops = get_dma_ops(dev); 225 212 226 213 BUG_ON(!valid_dma_direction(dir)); 227 214 if (ops->unmap_page) ··· 237 224 int nents, enum dma_data_direction dir, 238 225 unsigned long attrs) 239 226 { 240 - struct dma_map_ops *ops = get_dma_ops(dev); 227 + const struct dma_map_ops *ops = get_dma_ops(dev); 241 228 int i, ents; 242 229 struct scatterlist *s; 243 230 ··· 255 242 int nents, enum dma_data_direction dir, 256 243 unsigned long attrs) 257 244 { 258 - struct dma_map_ops *ops = get_dma_ops(dev); 245 + const struct dma_map_ops *ops = get_dma_ops(dev); 259 246 260 247 BUG_ON(!valid_dma_direction(dir)); 261 248 debug_dma_unmap_sg(dev, sg, nents, dir); ··· 269 256 enum dma_data_direction dir, 270 257 unsigned long attrs) 271 258 { 272 - struct dma_map_ops *ops = get_dma_ops(dev); 259 + const struct dma_map_ops *ops = get_dma_ops(dev); 273 260 dma_addr_t addr; 274 261 275 262 kmemcheck_mark_initialized(page_address(page) + offset, size); ··· 285 272 enum dma_data_direction dir, 286 273 unsigned long attrs) 287 274 { 288 - struct dma_map_ops *ops = get_dma_ops(dev); 275 + const struct dma_map_ops *ops = get_dma_ops(dev); 289 276 290 277 BUG_ON(!valid_dma_direction(dir)); 291 278 if (ops->unmap_page) ··· 299 286 enum dma_data_direction dir, 300 287 unsigned long attrs) 301 288 { 302 - struct dma_map_ops *ops = get_dma_ops(dev); 289 + const struct dma_map_ops *ops = get_dma_ops(dev); 303 290 dma_addr_t addr; 304 291 305 292 BUG_ON(!valid_dma_direction(dir)); ··· 320 307 size_t size, enum dma_data_direction dir, 321 308 unsigned long attrs) 322 309 { 323 - struct dma_map_ops *ops = get_dma_ops(dev); 310 + const struct dma_map_ops *ops = get_dma_ops(dev); 324 311 325 312 BUG_ON(!valid_dma_direction(dir)); 326 313 if (ops->unmap_resource) ··· 332 319 size_t size, 333 320 enum dma_data_direction dir) 334 321 { 335 - struct dma_map_ops *ops = get_dma_ops(dev); 322 + const struct dma_map_ops *ops = get_dma_ops(dev); 336 323 337 324 BUG_ON(!valid_dma_direction(dir)); 338 325 if (ops->sync_single_for_cpu) ··· 344 331 dma_addr_t addr, size_t size, 345 332 enum dma_data_direction dir) 346 333 { 347 - struct dma_map_ops *ops = get_dma_ops(dev); 334 + const struct dma_map_ops *ops = get_dma_ops(dev); 348 335 349 336 BUG_ON(!valid_dma_direction(dir)); 350 337 if (ops->sync_single_for_device) ··· 384 371 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 385 372 int nelems, enum dma_data_direction dir) 386 373 { 387 - struct dma_map_ops *ops = get_dma_ops(dev); 374 + const struct dma_map_ops *ops = get_dma_ops(dev); 388 375 389 376 BUG_ON(!valid_dma_direction(dir)); 390 377 if (ops->sync_sg_for_cpu) ··· 396 383 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 397 384 int nelems, enum dma_data_direction dir) 398 385 { 399 - struct dma_map_ops *ops = get_dma_ops(dev); 386 + const struct dma_map_ops *ops = get_dma_ops(dev); 400 387 401 388 BUG_ON(!valid_dma_direction(dir)); 402 389 if (ops->sync_sg_for_device) ··· 441 428 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 442 429 dma_addr_t dma_addr, size_t size, unsigned long attrs) 443 430 { 444 - struct dma_map_ops *ops = get_dma_ops(dev); 431 + const struct dma_map_ops *ops = get_dma_ops(dev); 445 432 BUG_ON(!ops); 446 433 if (ops->mmap) 447 434 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); ··· 459 446 dma_addr_t dma_addr, size_t size, 460 447 unsigned long attrs) 461 448 { 462 - struct dma_map_ops *ops = get_dma_ops(dev); 449 + const struct dma_map_ops *ops = get_dma_ops(dev); 463 450 BUG_ON(!ops); 464 451 if (ops->get_sgtable) 465 452 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, ··· 477 464 dma_addr_t *dma_handle, gfp_t flag, 478 465 unsigned long attrs) 479 466 { 480 - struct dma_map_ops *ops = get_dma_ops(dev); 467 + const struct dma_map_ops *ops = get_dma_ops(dev); 481 468 void *cpu_addr; 482 469 483 470 BUG_ON(!ops); ··· 499 486 void *cpu_addr, dma_addr_t dma_handle, 500 487 unsigned long attrs) 501 488 { 502 - struct dma_map_ops *ops = get_dma_ops(dev); 489 + const struct dma_map_ops *ops = get_dma_ops(dev); 503 490 504 491 BUG_ON(!ops); 505 492 WARN_ON(irqs_disabled()); ··· 557 544 #ifndef HAVE_ARCH_DMA_SUPPORTED 558 545 static inline int dma_supported(struct device *dev, u64 mask) 559 546 { 560 - struct dma_map_ops *ops = get_dma_ops(dev); 547 + const struct dma_map_ops *ops = get_dma_ops(dev); 561 548 562 549 if (!ops) 563 550 return 0; ··· 570 557 #ifndef HAVE_ARCH_DMA_SET_MASK 571 558 static inline int dma_set_mask(struct device *dev, u64 mask) 572 559 { 573 - struct dma_map_ops *ops = get_dma_ops(dev); 560 + const struct dma_map_ops *ops = get_dma_ops(dev); 574 561 575 562 if (ops->set_dma_mask) 576 563 return ops->set_dma_mask(dev, mask);
+1 -1
include/linux/mic_bus.h
··· 90 90 }; 91 91 92 92 struct mbus_device * 93 - mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 93 + mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, 94 94 struct mbus_hw_ops *hw_ops, int index, 95 95 void __iomem *mmio_va); 96 96 void mbus_unregister_device(struct mbus_device *mbdev);
+15 -128
include/rdma/ib_verbs.h
··· 1843 1843 struct ib_port_cache *ports; 1844 1844 }; 1845 1845 1846 - struct ib_dma_mapping_ops { 1847 - int (*mapping_error)(struct ib_device *dev, 1848 - u64 dma_addr); 1849 - u64 (*map_single)(struct ib_device *dev, 1850 - void *ptr, size_t size, 1851 - enum dma_data_direction direction); 1852 - void (*unmap_single)(struct ib_device *dev, 1853 - u64 addr, size_t size, 1854 - enum dma_data_direction direction); 1855 - u64 (*map_page)(struct ib_device *dev, 1856 - struct page *page, unsigned long offset, 1857 - size_t size, 1858 - enum dma_data_direction direction); 1859 - void (*unmap_page)(struct ib_device *dev, 1860 - u64 addr, size_t size, 1861 - enum dma_data_direction direction); 1862 - int (*map_sg)(struct ib_device *dev, 1863 - struct scatterlist *sg, int nents, 1864 - enum dma_data_direction direction); 1865 - void (*unmap_sg)(struct ib_device *dev, 1866 - struct scatterlist *sg, int nents, 1867 - enum dma_data_direction direction); 1868 - int (*map_sg_attrs)(struct ib_device *dev, 1869 - struct scatterlist *sg, int nents, 1870 - enum dma_data_direction direction, 1871 - unsigned long attrs); 1872 - void (*unmap_sg_attrs)(struct ib_device *dev, 1873 - struct scatterlist *sg, int nents, 1874 - enum dma_data_direction direction, 1875 - unsigned long attrs); 1876 - void (*sync_single_for_cpu)(struct ib_device *dev, 1877 - u64 dma_handle, 1878 - size_t size, 1879 - enum dma_data_direction dir); 1880 - void (*sync_single_for_device)(struct ib_device *dev, 1881 - u64 dma_handle, 1882 - size_t size, 1883 - enum dma_data_direction dir); 1884 - void *(*alloc_coherent)(struct ib_device *dev, 1885 - size_t size, 1886 - u64 *dma_handle, 1887 - gfp_t flag); 1888 - void (*free_coherent)(struct ib_device *dev, 1889 - size_t size, void *cpu_addr, 1890 - u64 dma_handle); 1891 - }; 1892 - 1893 1846 struct iw_cm_verbs; 1894 1847 1895 1848 struct ib_port_immutable { ··· 1853 1900 }; 1854 1901 1855 1902 struct ib_device { 1856 - struct device *dma_device; 1857 - 1858 1903 char name[IB_DEVICE_NAME_MAX]; 1859 1904 1860 1905 struct list_head event_handler_list; ··· 2102 2151 struct ib_rwq_ind_table_init_attr *init_attr, 2103 2152 struct ib_udata *udata); 2104 2153 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); 2105 - struct ib_dma_mapping_ops *dma_ops; 2106 2154 2107 2155 struct module *owner; 2108 2156 struct device dev; ··· 2993 3043 */ 2994 3044 static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 2995 3045 { 2996 - if (dev->dma_ops) 2997 - return dev->dma_ops->mapping_error(dev, dma_addr); 2998 - return dma_mapping_error(dev->dma_device, dma_addr); 3046 + return dma_mapping_error(&dev->dev, dma_addr); 2999 3047 } 3000 3048 3001 3049 /** ··· 3007 3059 void *cpu_addr, size_t size, 3008 3060 enum dma_data_direction direction) 3009 3061 { 3010 - if (dev->dma_ops) 3011 - return dev->dma_ops->map_single(dev, cpu_addr, size, direction); 3012 - return dma_map_single(dev->dma_device, cpu_addr, size, direction); 3062 + return dma_map_single(&dev->dev, cpu_addr, size, direction); 3013 3063 } 3014 3064 3015 3065 /** ··· 3021 3075 u64 addr, size_t size, 3022 3076 enum dma_data_direction direction) 3023 3077 { 3024 - if (dev->dma_ops) 3025 - dev->dma_ops->unmap_single(dev, addr, size, direction); 3026 - else 3027 - dma_unmap_single(dev->dma_device, addr, size, direction); 3028 - } 3029 - 3030 - static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 3031 - void *cpu_addr, size_t size, 3032 - enum dma_data_direction direction, 3033 - unsigned long dma_attrs) 3034 - { 3035 - return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 3036 - direction, dma_attrs); 3037 - } 3038 - 3039 - static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 3040 - u64 addr, size_t size, 3041 - enum dma_data_direction direction, 3042 - unsigned long dma_attrs) 3043 - { 3044 - return dma_unmap_single_attrs(dev->dma_device, addr, size, 3045 - direction, dma_attrs); 3078 + dma_unmap_single(&dev->dev, addr, size, direction); 3046 3079 } 3047 3080 3048 3081 /** ··· 3038 3113 size_t size, 3039 3114 enum dma_data_direction direction) 3040 3115 { 3041 - if (dev->dma_ops) 3042 - return dev->dma_ops->map_page(dev, page, offset, size, direction); 3043 - return dma_map_page(dev->dma_device, page, offset, size, direction); 3116 + return dma_map_page(&dev->dev, page, offset, size, direction); 3044 3117 } 3045 3118 3046 3119 /** ··· 3052 3129 u64 addr, size_t size, 3053 3130 enum dma_data_direction direction) 3054 3131 { 3055 - if (dev->dma_ops) 3056 - dev->dma_ops->unmap_page(dev, addr, size, direction); 3057 - else 3058 - dma_unmap_page(dev->dma_device, addr, size, direction); 3132 + dma_unmap_page(&dev->dev, addr, size, direction); 3059 3133 } 3060 3134 3061 3135 /** ··· 3066 3146 struct scatterlist *sg, int nents, 3067 3147 enum dma_data_direction direction) 3068 3148 { 3069 - if (dev->dma_ops) 3070 - return dev->dma_ops->map_sg(dev, sg, nents, direction); 3071 - return dma_map_sg(dev->dma_device, sg, nents, direction); 3149 + return dma_map_sg(&dev->dev, sg, nents, direction); 3072 3150 } 3073 3151 3074 3152 /** ··· 3080 3162 struct scatterlist *sg, int nents, 3081 3163 enum dma_data_direction direction) 3082 3164 { 3083 - if (dev->dma_ops) 3084 - dev->dma_ops->unmap_sg(dev, sg, nents, direction); 3085 - else 3086 - dma_unmap_sg(dev->dma_device, sg, nents, direction); 3165 + dma_unmap_sg(&dev->dev, sg, nents, direction); 3087 3166 } 3088 3167 3089 3168 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, ··· 3088 3173 enum dma_data_direction direction, 3089 3174 unsigned long dma_attrs) 3090 3175 { 3091 - if (dev->dma_ops) 3092 - return dev->dma_ops->map_sg_attrs(dev, sg, nents, direction, 3093 - dma_attrs); 3094 - else 3095 - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 3096 - dma_attrs); 3176 + return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3097 3177 } 3098 3178 3099 3179 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, ··· 3096 3186 enum dma_data_direction direction, 3097 3187 unsigned long dma_attrs) 3098 3188 { 3099 - if (dev->dma_ops) 3100 - return dev->dma_ops->unmap_sg_attrs(dev, sg, nents, direction, 3101 - dma_attrs); 3102 - else 3103 - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, 3104 - dma_attrs); 3189 + dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); 3105 3190 } 3106 3191 /** 3107 3192 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry ··· 3138 3233 size_t size, 3139 3234 enum dma_data_direction dir) 3140 3235 { 3141 - if (dev->dma_ops) 3142 - dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir); 3143 - else 3144 - dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 3236 + dma_sync_single_for_cpu(&dev->dev, addr, size, dir); 3145 3237 } 3146 3238 3147 3239 /** ··· 3153 3251 size_t size, 3154 3252 enum dma_data_direction dir) 3155 3253 { 3156 - if (dev->dma_ops) 3157 - dev->dma_ops->sync_single_for_device(dev, addr, size, dir); 3158 - else 3159 - dma_sync_single_for_device(dev->dma_device, addr, size, dir); 3254 + dma_sync_single_for_device(&dev->dev, addr, size, dir); 3160 3255 } 3161 3256 3162 3257 /** ··· 3165 3266 */ 3166 3267 static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 3167 3268 size_t size, 3168 - u64 *dma_handle, 3269 + dma_addr_t *dma_handle, 3169 3270 gfp_t flag) 3170 3271 { 3171 - if (dev->dma_ops) 3172 - return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag); 3173 - else { 3174 - dma_addr_t handle; 3175 - void *ret; 3176 - 3177 - ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag); 3178 - *dma_handle = handle; 3179 - return ret; 3180 - } 3272 + return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); 3181 3273 } 3182 3274 3183 3275 /** ··· 3180 3290 */ 3181 3291 static inline void ib_dma_free_coherent(struct ib_device *dev, 3182 3292 size_t size, void *cpu_addr, 3183 - u64 dma_handle) 3293 + dma_addr_t dma_handle) 3184 3294 { 3185 - if (dev->dma_ops) 3186 - dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 3187 - else 3188 - dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 3295 + dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); 3189 3296 } 3190 3297 3191 3298 /**
+1 -1
include/xen/arm/hypervisor.h
··· 18 18 return PARAVIRT_LAZY_NONE; 19 19 } 20 20 21 - extern struct dma_map_ops *xen_dma_ops; 21 + extern const struct dma_map_ops *xen_dma_ops; 22 22 23 23 #ifdef CONFIG_XEN 24 24 void __init xen_early_init(void);
+10
lib/Kconfig
··· 394 394 depends on !NO_DMA 395 395 default y 396 396 397 + config DMA_NOOP_OPS 398 + bool 399 + depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) 400 + default n 401 + 402 + config DMA_VIRT_OPS 403 + bool 404 + depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) 405 + default n 406 + 397 407 config CHECK_SIGNATURE 398 408 bool 399 409
+2 -1
lib/Makefile
··· 27 27 28 28 lib-$(CONFIG_MMU) += ioremap.o 29 29 lib-$(CONFIG_SMP) += cpumask.o 30 - lib-$(CONFIG_HAS_DMA) += dma-noop.o 30 + lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o 31 + lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o 31 32 32 33 lib-y += kobject.o klist.o 33 34 obj-y += lockref.o
+2 -2
lib/dma-noop.c
··· 1 1 /* 2 2 * lib/dma-noop.c 3 3 * 4 - * Simple DMA noop-ops that map 1:1 with memory 4 + * DMA operations that map to physical addresses without flushing memory. 5 5 */ 6 6 #include <linux/export.h> 7 7 #include <linux/mm.h> ··· 64 64 return 1; 65 65 } 66 66 67 - struct dma_map_ops dma_noop_ops = { 67 + const struct dma_map_ops dma_noop_ops = { 68 68 .alloc = dma_noop_alloc, 69 69 .free = dma_noop_free, 70 70 .map_page = dma_noop_map_page,
+72
lib/dma-virt.c
··· 1 + /* 2 + * lib/dma-virt.c 3 + * 4 + * DMA operations that map to virtual addresses without flushing memory. 5 + */ 6 + #include <linux/export.h> 7 + #include <linux/mm.h> 8 + #include <linux/dma-mapping.h> 9 + #include <linux/scatterlist.h> 10 + 11 + static void *dma_virt_alloc(struct device *dev, size_t size, 12 + dma_addr_t *dma_handle, gfp_t gfp, 13 + unsigned long attrs) 14 + { 15 + void *ret; 16 + 17 + ret = (void *)__get_free_pages(gfp, get_order(size)); 18 + if (ret) 19 + *dma_handle = (uintptr_t)ret; 20 + return ret; 21 + } 22 + 23 + static void dma_virt_free(struct device *dev, size_t size, 24 + void *cpu_addr, dma_addr_t dma_addr, 25 + unsigned long attrs) 26 + { 27 + free_pages((unsigned long)cpu_addr, get_order(size)); 28 + } 29 + 30 + static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, 31 + unsigned long offset, size_t size, 32 + enum dma_data_direction dir, 33 + unsigned long attrs) 34 + { 35 + return (uintptr_t)(page_address(page) + offset); 36 + } 37 + 38 + static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, 39 + int nents, enum dma_data_direction dir, 40 + unsigned long attrs) 41 + { 42 + int i; 43 + struct scatterlist *sg; 44 + 45 + for_each_sg(sgl, sg, nents, i) { 46 + BUG_ON(!sg_page(sg)); 47 + sg_dma_address(sg) = (uintptr_t)sg_virt(sg); 48 + sg_dma_len(sg) = sg->length; 49 + } 50 + 51 + return nents; 52 + } 53 + 54 + static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) 55 + { 56 + return false; 57 + } 58 + 59 + static int dma_virt_supported(struct device *dev, u64 mask) 60 + { 61 + return true; 62 + } 63 + 64 + const struct dma_map_ops dma_virt_ops = { 65 + .alloc = dma_virt_alloc, 66 + .free = dma_virt_free, 67 + .map_page = dma_virt_map_page, 68 + .map_sg = dma_virt_map_sg, 69 + .mapping_error = dma_virt_mapping_error, 70 + .dma_supported = dma_virt_supported, 71 + }; 72 + EXPORT_SYMBOL(dma_virt_ops);
+4 -4
net/rds/ib.h
··· 136 136 struct rds_ib_work_ring i_send_ring; 137 137 struct rm_data_op *i_data_op; 138 138 struct rds_header *i_send_hdrs; 139 - u64 i_send_hdrs_dma; 139 + dma_addr_t i_send_hdrs_dma; 140 140 struct rds_ib_send_work *i_sends; 141 141 atomic_t i_signaled_sends; 142 142 ··· 146 146 struct rds_ib_incoming *i_ibinc; 147 147 u32 i_recv_data_rem; 148 148 struct rds_header *i_recv_hdrs; 149 - u64 i_recv_hdrs_dma; 149 + dma_addr_t i_recv_hdrs_dma; 150 150 struct rds_ib_recv_work *i_recvs; 151 151 u64 i_ack_recv; /* last ACK received */ 152 152 struct rds_ib_refill_cache i_cache_incs; ··· 164 164 struct rds_header *i_ack; 165 165 struct ib_send_wr i_ack_wr; 166 166 struct ib_sge i_ack_sge; 167 - u64 i_ack_dma; 167 + dma_addr_t i_ack_dma; 168 168 unsigned long i_ack_queued; 169 169 170 170 /* Flow control related information ··· 235 235 int *vector_load; 236 236 }; 237 237 238 - #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device) 238 + #define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent) 239 239 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) 240 240 241 241 /* bits for i_ack_flags */
-1
net/rds/ib_mr.h
··· 45 45 46 46 struct rds_ib_fmr { 47 47 struct ib_fmr *fmr; 48 - u64 *dma; 49 48 }; 50 49 51 50 enum rds_ib_fr_state {