Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: Constify most dma_map_ops structures

Most dma_map_ops structures are never modified. Constify these
structures such that these can be write-protected. This patch
has been generated as follows:

git grep -l 'struct dma_map_ops' |
xargs -d\\n sed -i \
-e 's/struct dma_map_ops/const struct dma_map_ops/g' \
-e 's/const struct dma_map_ops {/struct dma_map_ops {/g' \
-e 's/^const struct dma_map_ops;$/struct dma_map_ops;/' \
-e 's/const const struct dma_map_ops /const struct dma_map_ops /g';
sed -i -e 's/const \(struct dma_map_ops intel_dma_ops\)/\1/' \
$(git grep -l 'struct dma_map_ops intel_dma_ops');
sed -i -e 's/const \(struct dma_map_ops dma_iommu_ops\)/\1/' \
$(git grep -l 'struct dma_map_ops' | grep ^arch/powerpc);
sed -i -e '/^struct vmd_dev {$/,/^};$/ s/const \(struct dma_map_ops[[:blank:]]dma_ops;\)/\1/' \
-e '/^static void vmd_setup_dma_ops/,/^}$/ s/const \(struct dma_map_ops \*dest\)/\1/' \
-e 's/const \(struct dma_map_ops \*dest = \&vmd->dma_ops\)/\1/' \
drivers/pci/host/*.c
sed -i -e '/^void __init pci_iommu_alloc(void)$/,/^}$/ s/dma_ops->/intel_dma_ops./' arch/ia64/kernel/pci-dma.c
sed -i -e 's/static const struct dma_map_ops sn_dma_ops/static struct dma_map_ops sn_dma_ops/' arch/ia64/sn/pci/pci_dma.c
sed -i -e 's/(const struct dma_map_ops \*)//' drivers/misc/mic/bus/vop_bus.c

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: Russell King <linux@armlinux.org.uk>
Cc: x86@kernel.org
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Bart Van Assche and committed by
Doug Ledford
5299709d 102c5ce0

+227 -227
+2 -2
arch/alpha/include/asm/dma-mapping.h
··· 1 1 #ifndef _ALPHA_DMA_MAPPING_H 2 2 #define _ALPHA_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops *dma_ops; 4 + extern const struct dma_map_ops *dma_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 7 7 { 8 8 return dma_ops; 9 9 }
+2 -2
arch/alpha/kernel/pci-noop.c
··· 128 128 return mask < 0x00ffffffUL ? 0 : 1; 129 129 } 130 130 131 - struct dma_map_ops alpha_noop_ops = { 131 + const struct dma_map_ops alpha_noop_ops = { 132 132 .alloc = alpha_noop_alloc_coherent, 133 133 .free = dma_noop_free_coherent, 134 134 .map_page = dma_noop_map_page, ··· 137 137 .dma_supported = alpha_noop_supported, 138 138 }; 139 139 140 - struct dma_map_ops *dma_ops = &alpha_noop_ops; 140 + const struct dma_map_ops *dma_ops = &alpha_noop_ops; 141 141 EXPORT_SYMBOL(dma_ops);
+2 -2
arch/alpha/kernel/pci_iommu.c
··· 939 939 return dma_addr == 0; 940 940 } 941 941 942 - struct dma_map_ops alpha_pci_ops = { 942 + const struct dma_map_ops alpha_pci_ops = { 943 943 .alloc = alpha_pci_alloc_coherent, 944 944 .free = alpha_pci_free_coherent, 945 945 .map_page = alpha_pci_map_page, ··· 950 950 .dma_supported = alpha_pci_supported, 951 951 }; 952 952 953 - struct dma_map_ops *dma_ops = &alpha_pci_ops; 953 + const struct dma_map_ops *dma_ops = &alpha_pci_ops; 954 954 EXPORT_SYMBOL(dma_ops);
+2 -2
arch/arc/include/asm/dma-mapping.h
··· 18 18 #include <plat/dma.h> 19 19 #endif 20 20 21 - extern struct dma_map_ops arc_dma_ops; 21 + extern const struct dma_map_ops arc_dma_ops; 22 22 23 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 23 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 24 24 { 25 25 return &arc_dma_ops; 26 26 }
+1 -1
arch/arc/mm/dma.c
··· 218 218 return dma_mask == DMA_BIT_MASK(32); 219 219 } 220 220 221 - struct dma_map_ops arc_dma_ops = { 221 + const struct dma_map_ops arc_dma_ops = { 222 222 .alloc = arc_dma_alloc, 223 223 .free = arc_dma_free, 224 224 .mmap = arc_dma_mmap,
+1 -1
arch/arm/common/dmabounce.c
··· 452 452 return arm_dma_ops.set_dma_mask(dev, dma_mask); 453 453 } 454 454 455 - static struct dma_map_ops dmabounce_ops = { 455 + static const struct dma_map_ops dmabounce_ops = { 456 456 .alloc = arm_dma_alloc, 457 457 .free = arm_dma_free, 458 458 .mmap = arm_dma_mmap,
+1 -1
arch/arm/include/asm/device.h
··· 7 7 #define ASMARM_DEVICE_H 8 8 9 9 struct dev_archdata { 10 - struct dma_map_ops *dma_ops; 10 + const struct dma_map_ops *dma_ops; 11 11 #ifdef CONFIG_DMABOUNCE 12 12 struct dmabounce_device_info *dmabounce; 13 13 #endif
+5 -5
arch/arm/include/asm/dma-mapping.h
··· 13 13 #include <asm/xen/hypervisor.h> 14 14 15 15 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 16 - extern struct dma_map_ops arm_dma_ops; 17 - extern struct dma_map_ops arm_coherent_dma_ops; 16 + extern const struct dma_map_ops arm_dma_ops; 17 + extern const struct dma_map_ops arm_coherent_dma_ops; 18 18 19 - static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 19 + static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) 20 20 { 21 21 if (dev && dev->archdata.dma_ops) 22 22 return dev->archdata.dma_ops; 23 23 return &arm_dma_ops; 24 24 } 25 25 26 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 26 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 27 27 { 28 28 if (xen_initial_domain()) 29 29 return xen_dma_ops; ··· 31 31 return __generic_dma_ops(dev); 32 32 } 33 33 34 - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 34 + static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) 35 35 { 36 36 BUG_ON(!dev); 37 37 dev->archdata.dma_ops = ops;
+11 -11
arch/arm/mm/dma-mapping.c
··· 180 180 __dma_page_cpu_to_dev(page, offset, size, dir); 181 181 } 182 182 183 - struct dma_map_ops arm_dma_ops = { 183 + const struct dma_map_ops arm_dma_ops = { 184 184 .alloc = arm_dma_alloc, 185 185 .free = arm_dma_free, 186 186 .mmap = arm_dma_mmap, ··· 204 204 void *cpu_addr, dma_addr_t dma_addr, size_t size, 205 205 unsigned long attrs); 206 206 207 - struct dma_map_ops arm_coherent_dma_ops = { 207 + const struct dma_map_ops arm_coherent_dma_ops = { 208 208 .alloc = arm_coherent_dma_alloc, 209 209 .free = arm_coherent_dma_free, 210 210 .mmap = arm_coherent_dma_mmap, ··· 1067 1067 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1068 1068 enum dma_data_direction dir, unsigned long attrs) 1069 1069 { 1070 - struct dma_map_ops *ops = get_dma_ops(dev); 1070 + const struct dma_map_ops *ops = get_dma_ops(dev); 1071 1071 struct scatterlist *s; 1072 1072 int i, j; 1073 1073 ··· 1101 1101 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1102 1102 enum dma_data_direction dir, unsigned long attrs) 1103 1103 { 1104 - struct dma_map_ops *ops = get_dma_ops(dev); 1104 + const struct dma_map_ops *ops = get_dma_ops(dev); 1105 1105 struct scatterlist *s; 1106 1106 1107 1107 int i; ··· 1120 1120 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1121 1121 int nents, enum dma_data_direction dir) 1122 1122 { 1123 - struct dma_map_ops *ops = get_dma_ops(dev); 1123 + const struct dma_map_ops *ops = get_dma_ops(dev); 1124 1124 struct scatterlist *s; 1125 1125 int i; 1126 1126 ··· 1139 1139 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1140 1140 int nents, enum dma_data_direction dir) 1141 1141 { 1142 - struct dma_map_ops *ops = get_dma_ops(dev); 1142 + const struct dma_map_ops *ops = get_dma_ops(dev); 1143 1143 struct scatterlist *s; 1144 1144 int i; 1145 1145 ··· 2099 2099 __dma_page_cpu_to_dev(page, offset, size, dir); 2100 2100 } 2101 2101 2102 - struct dma_map_ops iommu_ops = { 2102 + const struct dma_map_ops iommu_ops = { 2103 2103 .alloc = arm_iommu_alloc_attrs, 2104 2104 .free = arm_iommu_free_attrs, 2105 2105 .mmap = arm_iommu_mmap_attrs, ··· 2119 2119 .unmap_resource = arm_iommu_unmap_resource, 2120 2120 }; 2121 2121 2122 - struct dma_map_ops iommu_coherent_ops = { 2122 + const struct dma_map_ops iommu_coherent_ops = { 2123 2123 .alloc = arm_coherent_iommu_alloc_attrs, 2124 2124 .free = arm_coherent_iommu_free_attrs, 2125 2125 .mmap = arm_coherent_iommu_mmap_attrs, ··· 2319 2319 } 2320 2320 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2321 2321 2322 - static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2322 + static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2323 2323 { 2324 2324 return coherent ? &iommu_coherent_ops : &iommu_ops; 2325 2325 } ··· 2374 2374 2375 2375 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 2376 2376 2377 - static struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 2377 + static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 2378 2378 { 2379 2379 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 2380 2380 } ··· 2382 2382 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 2383 2383 const struct iommu_ops *iommu, bool coherent) 2384 2384 { 2385 - struct dma_map_ops *dma_ops; 2385 + const struct dma_map_ops *dma_ops; 2386 2386 2387 2387 dev->archdata.dma_coherent = coherent; 2388 2388 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
+2 -2
arch/arm/xen/mm.c
··· 182 182 } 183 183 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 184 184 185 - struct dma_map_ops *xen_dma_ops; 185 + const struct dma_map_ops *xen_dma_ops; 186 186 EXPORT_SYMBOL(xen_dma_ops); 187 187 188 - static struct dma_map_ops xen_swiotlb_dma_ops = { 188 + static const struct dma_map_ops xen_swiotlb_dma_ops = { 189 189 .alloc = xen_swiotlb_alloc_coherent, 190 190 .free = xen_swiotlb_free_coherent, 191 191 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+1 -1
arch/arm64/include/asm/device.h
··· 17 17 #define __ASM_DEVICE_H 18 18 19 19 struct dev_archdata { 20 - struct dma_map_ops *dma_ops; 20 + const struct dma_map_ops *dma_ops; 21 21 #ifdef CONFIG_IOMMU_API 22 22 void *iommu; /* private IOMMU data */ 23 23 #endif
+3 -3
arch/arm64/include/asm/dma-mapping.h
··· 25 25 #include <asm/xen/hypervisor.h> 26 26 27 27 #define DMA_ERROR_CODE (~(dma_addr_t)0) 28 - extern struct dma_map_ops dummy_dma_ops; 28 + extern const struct dma_map_ops dummy_dma_ops; 29 29 30 - static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) 30 + static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) 31 31 { 32 32 if (dev && dev->archdata.dma_ops) 33 33 return dev->archdata.dma_ops; ··· 39 39 return &dummy_dma_ops; 40 40 } 41 41 42 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 42 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 43 43 { 44 44 if (xen_initial_domain()) 45 45 return xen_dma_ops;
+3 -3
arch/arm64/mm/dma-mapping.c
··· 352 352 return 1; 353 353 } 354 354 355 - static struct dma_map_ops swiotlb_dma_ops = { 355 + static const struct dma_map_ops swiotlb_dma_ops = { 356 356 .alloc = __dma_alloc, 357 357 .free = __dma_free, 358 358 .mmap = __swiotlb_mmap, ··· 505 505 return 0; 506 506 } 507 507 508 - struct dma_map_ops dummy_dma_ops = { 508 + const struct dma_map_ops dummy_dma_ops = { 509 509 .alloc = __dummy_alloc, 510 510 .free = __dummy_free, 511 511 .mmap = __dummy_mmap, ··· 784 784 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs); 785 785 } 786 786 787 - static struct dma_map_ops iommu_dma_ops = { 787 + static const struct dma_map_ops iommu_dma_ops = { 788 788 .alloc = __iommu_alloc_attrs, 789 789 .free = __iommu_free_attrs, 790 790 .mmap = __iommu_mmap_attrs,
+2 -2
arch/avr32/include/asm/dma-mapping.h
··· 4 4 extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 5 5 int direction); 6 6 7 - extern struct dma_map_ops avr32_dma_ops; 7 + extern const struct dma_map_ops avr32_dma_ops; 8 8 9 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 9 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 10 10 { 11 11 return &avr32_dma_ops; 12 12 }
+1 -1
arch/avr32/mm/dma-coherent.c
··· 191 191 dma_cache_sync(dev, sg_virt(sg), sg->length, direction); 192 192 } 193 193 194 - struct dma_map_ops avr32_dma_ops = { 194 + const struct dma_map_ops avr32_dma_ops = { 195 195 .alloc = avr32_dma_alloc, 196 196 .free = avr32_dma_free, 197 197 .map_page = avr32_dma_map_page,
+2 -2
arch/blackfin/include/asm/dma-mapping.h
··· 36 36 __dma_sync(addr, size, dir); 37 37 } 38 38 39 - extern struct dma_map_ops bfin_dma_ops; 39 + extern const struct dma_map_ops bfin_dma_ops; 40 40 41 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 41 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 42 42 { 43 43 return &bfin_dma_ops; 44 44 }
+1 -1
arch/blackfin/kernel/dma-mapping.c
··· 159 159 _dma_sync(handle, size, dir); 160 160 } 161 161 162 - struct dma_map_ops bfin_dma_ops = { 162 + const struct dma_map_ops bfin_dma_ops = { 163 163 .alloc = bfin_dma_alloc, 164 164 .free = bfin_dma_free, 165 165
+2 -2
arch/c6x/include/asm/dma-mapping.h
··· 17 17 */ 18 18 #define DMA_ERROR_CODE ~0 19 19 20 - extern struct dma_map_ops c6x_dma_ops; 20 + extern const struct dma_map_ops c6x_dma_ops; 21 21 22 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 22 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 23 23 { 24 24 return &c6x_dma_ops; 25 25 }
+1 -1
arch/c6x/kernel/dma.c
··· 123 123 124 124 } 125 125 126 - struct dma_map_ops c6x_dma_ops = { 126 + const struct dma_map_ops c6x_dma_ops = { 127 127 .alloc = c6x_dma_alloc, 128 128 .free = c6x_dma_free, 129 129 .map_page = c6x_dma_map_page,
+1 -1
arch/cris/arch-v32/drivers/pci/dma.c
··· 69 69 return 1; 70 70 } 71 71 72 - struct dma_map_ops v32_dma_ops = { 72 + const struct dma_map_ops v32_dma_ops = { 73 73 .alloc = v32_dma_alloc, 74 74 .free = v32_dma_free, 75 75 .map_page = v32_dma_map_page,
+3 -3
arch/cris/include/asm/dma-mapping.h
··· 2 2 #define _ASM_CRIS_DMA_MAPPING_H 3 3 4 4 #ifdef CONFIG_PCI 5 - extern struct dma_map_ops v32_dma_ops; 5 + extern const struct dma_map_ops v32_dma_ops; 6 6 7 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 7 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 8 8 { 9 9 return &v32_dma_ops; 10 10 } 11 11 #else 12 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 12 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 13 13 { 14 14 BUG(); 15 15 return NULL;
+2 -2
arch/frv/include/asm/dma-mapping.h
··· 7 7 extern unsigned long __nongprelbss dma_coherent_mem_start; 8 8 extern unsigned long __nongprelbss dma_coherent_mem_end; 9 9 10 - extern struct dma_map_ops frv_dma_ops; 10 + extern const struct dma_map_ops frv_dma_ops; 11 11 12 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 12 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 13 13 { 14 14 return &frv_dma_ops; 15 15 }
+1 -1
arch/frv/mb93090-mb00/pci-dma-nommu.c
··· 164 164 return 1; 165 165 } 166 166 167 - struct dma_map_ops frv_dma_ops = { 167 + const struct dma_map_ops frv_dma_ops = { 168 168 .alloc = frv_dma_alloc, 169 169 .free = frv_dma_free, 170 170 .map_page = frv_dma_map_page,
+1 -1
arch/frv/mb93090-mb00/pci-dma.c
··· 106 106 return 1; 107 107 } 108 108 109 - struct dma_map_ops frv_dma_ops = { 109 + const struct dma_map_ops frv_dma_ops = { 110 110 .alloc = frv_dma_alloc, 111 111 .free = frv_dma_free, 112 112 .map_page = frv_dma_map_page,
+2 -2
arch/h8300/include/asm/dma-mapping.h
··· 1 1 #ifndef _H8300_DMA_MAPPING_H 2 2 #define _H8300_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops h8300_dma_map_ops; 4 + extern const struct dma_map_ops h8300_dma_map_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 7 7 { 8 8 return &h8300_dma_map_ops; 9 9 }
+1 -1
arch/h8300/kernel/dma.c
··· 60 60 return nents; 61 61 } 62 62 63 - struct dma_map_ops h8300_dma_map_ops = { 63 + const struct dma_map_ops h8300_dma_map_ops = { 64 64 .alloc = dma_alloc, 65 65 .free = dma_free, 66 66 .map_page = map_page,
+2 -2
arch/hexagon/include/asm/dma-mapping.h
··· 32 32 extern int bad_dma_address; 33 33 #define DMA_ERROR_CODE bad_dma_address 34 34 35 - extern struct dma_map_ops *dma_ops; 35 + extern const struct dma_map_ops *dma_ops; 36 36 37 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 37 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 38 38 { 39 39 if (unlikely(dev == NULL)) 40 40 return NULL;
+2 -2
arch/hexagon/kernel/dma.c
··· 25 25 #include <linux/module.h> 26 26 #include <asm/page.h> 27 27 28 - struct dma_map_ops *dma_ops; 28 + const struct dma_map_ops *dma_ops; 29 29 EXPORT_SYMBOL(dma_ops); 30 30 31 31 int bad_dma_address; /* globals are automatically initialized to zero */ ··· 203 203 dma_sync(dma_addr_to_virt(dma_handle), size, dir); 204 204 } 205 205 206 - struct dma_map_ops hexagon_dma_ops = { 206 + const struct dma_map_ops hexagon_dma_ops = { 207 207 .alloc = hexagon_dma_alloc_coherent, 208 208 .free = hexagon_free_coherent, 209 209 .map_sg = hexagon_map_sg,
+2 -2
arch/ia64/hp/common/hwsw_iommu.c
··· 18 18 #include <linux/export.h> 19 19 #include <asm/machvec.h> 20 20 21 - extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; 21 + extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; 22 22 23 23 /* swiotlb declarations & definitions: */ 24 24 extern int swiotlb_late_init_with_default_size (size_t size); ··· 34 34 !sba_dma_ops.dma_supported(dev, *dev->dma_mask); 35 35 } 36 36 37 - struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) 37 + const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) 38 38 { 39 39 if (use_swiotlb(dev)) 40 40 return &swiotlb_dma_ops;
+2 -2
arch/ia64/hp/common/sba_iommu.c
··· 2096 2096 /* This has to run before acpi_scan_init(). */ 2097 2097 arch_initcall(acpi_sba_ioc_init_acpi); 2098 2098 2099 - extern struct dma_map_ops swiotlb_dma_ops; 2099 + extern const struct dma_map_ops swiotlb_dma_ops; 2100 2100 2101 2101 static int __init 2102 2102 sba_init(void) ··· 2216 2216 2217 2217 __setup("sbapagesize=",sba_page_override); 2218 2218 2219 - struct dma_map_ops sba_dma_ops = { 2219 + const struct dma_map_ops sba_dma_ops = { 2220 2220 .alloc = sba_alloc_coherent, 2221 2221 .free = sba_free_coherent, 2222 2222 .map_page = sba_map_page,
+1 -1
arch/ia64/include/asm/dma-mapping.h
··· 14 14 15 15 #define DMA_ERROR_CODE 0 16 16 17 - extern struct dma_map_ops *dma_ops; 17 + extern const struct dma_map_ops *dma_ops; 18 18 extern struct ia64_machine_vector ia64_mv; 19 19 extern void set_iommu_machvec(void); 20 20
+2 -2
arch/ia64/include/asm/machvec.h
··· 44 44 /* DMA-mapping interface: */ 45 45 typedef void ia64_mv_dma_init (void); 46 46 typedef u64 ia64_mv_dma_get_required_mask (struct device *); 47 - typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); 47 + typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); 48 48 49 49 /* 50 50 * WARNING: The legacy I/O space is _architected_. Platforms are ··· 248 248 # endif /* CONFIG_IA64_GENERIC */ 249 249 250 250 extern void swiotlb_dma_init(void); 251 - extern struct dma_map_ops *dma_get_ops(struct device *); 251 + extern const struct dma_map_ops *dma_get_ops(struct device *); 252 252 253 253 /* 254 254 * Define default versions so we can extend machvec for new platforms without having
+2 -2
arch/ia64/kernel/dma-mapping.c
··· 4 4 /* Set this to 1 if there is a HW IOMMU in the system */ 5 5 int iommu_detected __read_mostly; 6 6 7 - struct dma_map_ops *dma_ops; 7 + const struct dma_map_ops *dma_ops; 8 8 EXPORT_SYMBOL(dma_ops); 9 9 10 10 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) ··· 17 17 } 18 18 fs_initcall(dma_init); 19 19 20 - struct dma_map_ops *dma_get_ops(struct device *dev) 20 + const struct dma_map_ops *dma_get_ops(struct device *dev) 21 21 { 22 22 return dma_ops; 23 23 }
+5 -5
arch/ia64/kernel/pci-dma.c
··· 90 90 { 91 91 dma_ops = &intel_dma_ops; 92 92 93 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single; 94 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg; 95 - dma_ops->sync_single_for_device = machvec_dma_sync_single; 96 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg; 97 - dma_ops->dma_supported = iommu_dma_supported; 93 + intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single; 94 + intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg; 95 + intel_dma_ops.sync_single_for_device = machvec_dma_sync_single; 96 + intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg; 97 + intel_dma_ops.dma_supported = iommu_dma_supported; 98 98 99 99 /* 100 100 * The order of these functions is important for
+1 -1
arch/ia64/kernel/pci-swiotlb.c
··· 30 30 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 31 31 } 32 32 33 - struct dma_map_ops swiotlb_dma_ops = { 33 + const struct dma_map_ops swiotlb_dma_ops = { 34 34 .alloc = ia64_swiotlb_alloc_coherent, 35 35 .free = ia64_swiotlb_free_coherent, 36 36 .map_page = swiotlb_map_page,
+1 -1
arch/m32r/include/asm/device.h
··· 4 4 * This file is released under the GPLv2 5 5 */ 6 6 struct dev_archdata { 7 - struct dma_map_ops *dma_ops; 7 + const struct dma_map_ops *dma_ops; 8 8 }; 9 9 10 10 struct pdev_archdata {
+1 -1
arch/m32r/include/asm/dma-mapping.h
··· 10 10 11 11 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 12 12 13 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 13 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 14 14 { 15 15 if (dev && dev->archdata.dma_ops) 16 16 return dev->archdata.dma_ops;
+2 -2
arch/m68k/include/asm/dma-mapping.h
··· 1 1 #ifndef _M68K_DMA_MAPPING_H 2 2 #define _M68K_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops m68k_dma_ops; 4 + extern const struct dma_map_ops m68k_dma_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 7 7 { 8 8 return &m68k_dma_ops; 9 9 }
+1 -1
arch/m68k/kernel/dma.c
··· 158 158 return nents; 159 159 } 160 160 161 - struct dma_map_ops m68k_dma_ops = { 161 + const struct dma_map_ops m68k_dma_ops = { 162 162 .alloc = m68k_dma_alloc, 163 163 .free = m68k_dma_free, 164 164 .map_page = m68k_dma_map_page,
+2 -2
arch/metag/include/asm/dma-mapping.h
··· 1 1 #ifndef _ASM_METAG_DMA_MAPPING_H 2 2 #define _ASM_METAG_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops metag_dma_ops; 4 + extern const struct dma_map_ops metag_dma_ops; 5 5 6 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 6 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 7 7 { 8 8 return &metag_dma_ops; 9 9 }
+1 -1
arch/metag/kernel/dma.c
··· 575 575 dma_sync_for_device(sg_virt(sg), sg->length, direction); 576 576 } 577 577 578 - struct dma_map_ops metag_dma_ops = { 578 + const struct dma_map_ops metag_dma_ops = { 579 579 .alloc = metag_dma_alloc, 580 580 .free = metag_dma_free, 581 581 .map_page = metag_dma_map_page,
+2 -2
arch/microblaze/include/asm/dma-mapping.h
··· 36 36 /* 37 37 * Available generic sets of operations 38 38 */ 39 - extern struct dma_map_ops dma_direct_ops; 39 + extern const struct dma_map_ops dma_direct_ops; 40 40 41 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 41 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 42 42 { 43 43 return &dma_direct_ops; 44 44 }
+1 -1
arch/microblaze/kernel/dma.c
··· 187 187 #endif 188 188 } 189 189 190 - struct dma_map_ops dma_direct_ops = { 190 + const struct dma_map_ops dma_direct_ops = { 191 191 .alloc = dma_direct_alloc_coherent, 192 192 .free = dma_direct_free_coherent, 193 193 .mmap = dma_direct_mmap_coherent,
+2 -2
arch/mips/cavium-octeon/dma-octeon.c
··· 205 205 } 206 206 207 207 struct octeon_dma_map_ops { 208 - struct dma_map_ops dma_map_ops; 208 + const struct dma_map_ops dma_map_ops; 209 209 dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); 210 210 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); 211 211 }; ··· 333 333 }, 334 334 }; 335 335 336 - struct dma_map_ops *octeon_pci_dma_map_ops; 336 + const struct dma_map_ops *octeon_pci_dma_map_ops; 337 337 338 338 void __init octeon_pci_dma_init(void) 339 339 {
+1 -1
arch/mips/include/asm/device.h
··· 10 10 11 11 struct dev_archdata { 12 12 /* DMA operations on that device */ 13 - struct dma_map_ops *dma_ops; 13 + const struct dma_map_ops *dma_ops; 14 14 15 15 #ifdef CONFIG_DMA_PERDEV_COHERENT 16 16 /* Non-zero if DMA is coherent with CPU caches */
+2 -2
arch/mips/include/asm/dma-mapping.h
··· 9 9 #include <dma-coherence.h> 10 10 #endif 11 11 12 - extern struct dma_map_ops *mips_dma_map_ops; 12 + extern const struct dma_map_ops *mips_dma_map_ops; 13 13 14 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 14 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 15 15 { 16 16 if (dev && dev->archdata.dma_ops) 17 17 return dev->archdata.dma_ops;
+1 -1
arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
··· 65 65 phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); 66 66 67 67 struct dma_map_ops; 68 - extern struct dma_map_ops *octeon_pci_dma_map_ops; 68 + extern const struct dma_map_ops *octeon_pci_dma_map_ops; 69 69 extern char *octeon_swiotlb; 70 70 71 71 #endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
+1 -1
arch/mips/include/asm/netlogic/common.h
··· 88 88 extern char nlm_reset_entry[], nlm_reset_entry_end[]; 89 89 90 90 /* SWIOTLB */ 91 - extern struct dma_map_ops nlm_swiotlb_dma_ops; 91 + extern const struct dma_map_ops nlm_swiotlb_dma_ops; 92 92 93 93 extern unsigned int nlm_threads_per_core; 94 94 extern cpumask_t nlm_cpumask;
+1 -1
arch/mips/loongson64/common/dma-swiotlb.c
··· 122 122 return daddr; 123 123 } 124 124 125 - static struct dma_map_ops loongson_dma_map_ops = { 125 + static const struct dma_map_ops loongson_dma_map_ops = { 126 126 .alloc = loongson_dma_alloc_coherent, 127 127 .free = loongson_dma_free_coherent, 128 128 .map_page = loongson_dma_map_page,
+2 -2
arch/mips/mm/dma-default.c
··· 417 417 418 418 EXPORT_SYMBOL(dma_cache_sync); 419 419 420 - static struct dma_map_ops mips_default_dma_map_ops = { 420 + static const struct dma_map_ops mips_default_dma_map_ops = { 421 421 .alloc = mips_dma_alloc_coherent, 422 422 .free = mips_dma_free_coherent, 423 423 .mmap = mips_dma_mmap, ··· 433 433 .dma_supported = mips_dma_supported 434 434 }; 435 435 436 - struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; 436 + const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops; 437 437 EXPORT_SYMBOL(mips_dma_map_ops); 438 438 439 439 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+1 -1
arch/mips/netlogic/common/nlm-dma.c
··· 67 67 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 68 68 } 69 69 70 - struct dma_map_ops nlm_swiotlb_dma_ops = { 70 + const struct dma_map_ops nlm_swiotlb_dma_ops = { 71 71 .alloc = nlm_dma_alloc_coherent, 72 72 .free = nlm_dma_free_coherent, 73 73 .map_page = swiotlb_map_page,
+2 -2
arch/mn10300/include/asm/dma-mapping.h
··· 14 14 #include <asm/cache.h> 15 15 #include <asm/io.h> 16 16 17 - extern struct dma_map_ops mn10300_dma_ops; 17 + extern const struct dma_map_ops mn10300_dma_ops; 18 18 19 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 19 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 20 20 { 21 21 return &mn10300_dma_ops; 22 22 }
+1 -1
arch/mn10300/mm/dma-alloc.c
··· 121 121 return 1; 122 122 } 123 123 124 - struct dma_map_ops mn10300_dma_ops = { 124 + const struct dma_map_ops mn10300_dma_ops = { 125 125 .alloc = mn10300_dma_alloc, 126 126 .free = mn10300_dma_free, 127 127 .map_page = mn10300_dma_map_page,
+2 -2
arch/nios2/include/asm/dma-mapping.h
··· 10 10 #ifndef _ASM_NIOS2_DMA_MAPPING_H 11 11 #define _ASM_NIOS2_DMA_MAPPING_H 12 12 13 - extern struct dma_map_ops nios2_dma_ops; 13 + extern const struct dma_map_ops nios2_dma_ops; 14 14 15 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 15 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 16 16 { 17 17 return &nios2_dma_ops; 18 18 }
+1 -1
arch/nios2/mm/dma-mapping.c
··· 192 192 193 193 } 194 194 195 - struct dma_map_ops nios2_dma_ops = { 195 + const struct dma_map_ops nios2_dma_ops = { 196 196 .alloc = nios2_dma_alloc, 197 197 .free = nios2_dma_free, 198 198 .map_page = nios2_dma_map_page,
+2 -2
arch/openrisc/include/asm/dma-mapping.h
··· 28 28 29 29 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 30 30 31 - extern struct dma_map_ops or1k_dma_map_ops; 31 + extern const struct dma_map_ops or1k_dma_map_ops; 32 32 33 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 33 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 34 34 { 35 35 return &or1k_dma_map_ops; 36 36 }
+1 -1
arch/openrisc/kernel/dma.c
··· 232 232 mtspr(SPR_DCBFR, cl); 233 233 } 234 234 235 - struct dma_map_ops or1k_dma_map_ops = { 235 + const struct dma_map_ops or1k_dma_map_ops = { 236 236 .alloc = or1k_dma_alloc, 237 237 .free = or1k_dma_free, 238 238 .map_page = or1k_map_page,
+4 -4
arch/parisc/include/asm/dma-mapping.h
··· 21 21 */ 22 22 23 23 #ifdef CONFIG_PA11 24 - extern struct dma_map_ops pcxl_dma_ops; 25 - extern struct dma_map_ops pcx_dma_ops; 24 + extern const struct dma_map_ops pcxl_dma_ops; 25 + extern const struct dma_map_ops pcx_dma_ops; 26 26 #endif 27 27 28 - extern struct dma_map_ops *hppa_dma_ops; 28 + extern const struct dma_map_ops *hppa_dma_ops; 29 29 30 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 30 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 31 31 { 32 32 return hppa_dma_ops; 33 33 }
+1 -1
arch/parisc/kernel/drivers.c
··· 40 40 #include <asm/parisc-device.h> 41 41 42 42 /* See comments in include/asm-parisc/pci.h */ 43 - struct dma_map_ops *hppa_dma_ops __read_mostly; 43 + const struct dma_map_ops *hppa_dma_ops __read_mostly; 44 44 EXPORT_SYMBOL(hppa_dma_ops); 45 45 46 46 static struct device root = {
+2 -2
arch/parisc/kernel/pci-dma.c
··· 572 572 flush_kernel_vmap_range(sg_virt(sg), sg->length); 573 573 } 574 574 575 - struct dma_map_ops pcxl_dma_ops = { 575 + const struct dma_map_ops pcxl_dma_ops = { 576 576 .dma_supported = pa11_dma_supported, 577 577 .alloc = pa11_dma_alloc, 578 578 .free = pa11_dma_free, ··· 608 608 return; 609 609 } 610 610 611 - struct dma_map_ops pcx_dma_ops = { 611 + const struct dma_map_ops pcx_dma_ops = { 612 612 .dma_supported = pa11_dma_supported, 613 613 .alloc = pcx_dma_alloc, 614 614 .free = pcx_dma_free,
+1 -1
arch/powerpc/include/asm/device.h
··· 21 21 */ 22 22 struct dev_archdata { 23 23 /* DMA operations on that device */ 24 - struct dma_map_ops *dma_ops; 24 + const struct dma_map_ops *dma_ops; 25 25 26 26 /* 27 27 * These two used to be a union. However, with the hybrid ops we need
+3 -3
arch/powerpc/include/asm/dma-mapping.h
··· 76 76 #ifdef CONFIG_PPC64 77 77 extern struct dma_map_ops dma_iommu_ops; 78 78 #endif 79 - extern struct dma_map_ops dma_direct_ops; 79 + extern const struct dma_map_ops dma_direct_ops; 80 80 81 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 81 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 82 82 { 83 83 /* We don't handle the NULL dev case for ISA for now. We could 84 84 * do it via an out of line call but it is not needed for now. The ··· 91 91 return dev->archdata.dma_ops; 92 92 } 93 93 94 - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 94 + static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) 95 95 { 96 96 dev->archdata.dma_ops = ops; 97 97 }
+2 -2
arch/powerpc/include/asm/pci.h
··· 53 53 } 54 54 55 55 #ifdef CONFIG_PCI 56 - extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); 57 - extern struct dma_map_ops *get_pci_dma_ops(void); 56 + extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); 57 + extern const struct dma_map_ops *get_pci_dma_ops(void); 58 58 #else /* CONFIG_PCI */ 59 59 #define set_pci_dma_ops(d) 60 60 #define get_pci_dma_ops() NULL
+1 -1
arch/powerpc/include/asm/swiotlb.h
··· 13 13 14 14 #include <linux/swiotlb.h> 15 15 16 - extern struct dma_map_ops swiotlb_dma_ops; 16 + extern const struct dma_map_ops swiotlb_dma_ops; 17 17 18 18 static inline void dma_mark_clean(void *addr, size_t size) {} 19 19
+1 -1
arch/powerpc/kernel/dma-swiotlb.c
··· 46 46 * map_page, and unmap_page on highmem, use normal dma_ops 47 47 * for everything else. 48 48 */ 49 - struct dma_map_ops swiotlb_dma_ops = { 49 + const struct dma_map_ops swiotlb_dma_ops = { 50 50 .alloc = __dma_direct_alloc_coherent, 51 51 .free = __dma_direct_free_coherent, 52 52 .mmap = dma_direct_mmap_coherent,
+3 -3
arch/powerpc/kernel/dma.c
··· 274 274 } 275 275 #endif 276 276 277 - struct dma_map_ops dma_direct_ops = { 277 + const struct dma_map_ops dma_direct_ops = { 278 278 .alloc = dma_direct_alloc_coherent, 279 279 .free = dma_direct_free_coherent, 280 280 .mmap = dma_direct_mmap_coherent, ··· 316 316 317 317 int __dma_set_mask(struct device *dev, u64 dma_mask) 318 318 { 319 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 319 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 320 320 321 321 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) 322 322 return dma_ops->set_dma_mask(dev, dma_mask); ··· 344 344 345 345 u64 __dma_get_required_mask(struct device *dev) 346 346 { 347 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 347 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 348 348 349 349 if (unlikely(dma_ops == NULL)) 350 350 return 0;
+3 -3
arch/powerpc/kernel/pci-common.c
··· 59 59 EXPORT_SYMBOL(isa_mem_base); 60 60 61 61 62 - static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 62 + static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 63 63 64 - void set_pci_dma_ops(struct dma_map_ops *dma_ops) 64 + void set_pci_dma_ops(const struct dma_map_ops *dma_ops) 65 65 { 66 66 pci_dma_ops = dma_ops; 67 67 } 68 68 69 - struct dma_map_ops *get_pci_dma_ops(void) 69 + const struct dma_map_ops *get_pci_dma_ops(void) 70 70 { 71 71 return pci_dma_ops; 72 72 }
+2 -2
arch/powerpc/platforms/cell/iommu.c
··· 651 651 652 652 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); 653 653 654 - static struct dma_map_ops dma_iommu_fixed_ops = { 654 + static const struct dma_map_ops dma_iommu_fixed_ops = { 655 655 .alloc = dma_fixed_alloc_coherent, 656 656 .free = dma_fixed_free_coherent, 657 657 .map_sg = dma_fixed_map_sg, ··· 1172 1172 1173 1173 static u64 cell_dma_get_required_mask(struct device *dev) 1174 1174 { 1175 - struct dma_map_ops *dma_ops; 1175 + const struct dma_map_ops *dma_ops; 1176 1176 1177 1177 if (!dev->dma_mask) 1178 1178 return 0;
+1 -1
arch/powerpc/platforms/powernv/npu-dma.c
··· 115 115 return 0; 116 116 } 117 117 118 - static struct dma_map_ops dma_npu_ops = { 118 + static const struct dma_map_ops dma_npu_ops = { 119 119 .map_page = dma_npu_map_page, 120 120 .map_sg = dma_npu_map_sg, 121 121 .alloc = dma_npu_alloc,
+2 -2
arch/powerpc/platforms/ps3/system-bus.c
··· 701 701 return DMA_BIT_MASK(32); 702 702 } 703 703 704 - static struct dma_map_ops ps3_sb_dma_ops = { 704 + static const struct dma_map_ops ps3_sb_dma_ops = { 705 705 .alloc = ps3_alloc_coherent, 706 706 .free = ps3_free_coherent, 707 707 .map_sg = ps3_sb_map_sg, ··· 712 712 .unmap_page = ps3_unmap_page, 713 713 }; 714 714 715 - static struct dma_map_ops ps3_ioc0_dma_ops = { 715 + static const struct dma_map_ops ps3_ioc0_dma_ops = { 716 716 .alloc = ps3_alloc_coherent, 717 717 .free = ps3_free_coherent, 718 718 .map_sg = ps3_ioc0_map_sg,
+1 -1
arch/powerpc/platforms/pseries/ibmebus.c
··· 136 136 return DMA_BIT_MASK(64); 137 137 } 138 138 139 - static struct dma_map_ops ibmebus_dma_ops = { 139 + static const struct dma_map_ops ibmebus_dma_ops = { 140 140 .alloc = ibmebus_alloc_coherent, 141 141 .free = ibmebus_free_coherent, 142 142 .map_sg = ibmebus_map_sg,
+1 -1
arch/powerpc/platforms/pseries/vio.c
··· 615 615 return dma_iommu_ops.get_required_mask(dev); 616 616 } 617 617 618 - static struct dma_map_ops vio_dma_mapping_ops = { 618 + static const struct dma_map_ops vio_dma_mapping_ops = { 619 619 .alloc = vio_dma_iommu_alloc_coherent, 620 620 .free = vio_dma_iommu_free_coherent, 621 621 .mmap = dma_direct_mmap_coherent,
+1 -1
arch/s390/include/asm/device.h
··· 4 4 * This file is released under the GPLv2 5 5 */ 6 6 struct dev_archdata { 7 - struct dma_map_ops *dma_ops; 7 + const struct dma_map_ops *dma_ops; 8 8 }; 9 9 10 10 struct pdev_archdata {
+2 -2
arch/s390/include/asm/dma-mapping.h
··· 10 10 11 11 #define DMA_ERROR_CODE (~(dma_addr_t) 0x0) 12 12 13 - extern struct dma_map_ops s390_pci_dma_ops; 13 + extern const struct dma_map_ops s390_pci_dma_ops; 14 14 15 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 15 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 16 16 { 17 17 if (dev && dev->archdata.dma_ops) 18 18 return dev->archdata.dma_ops;
+1 -1
arch/s390/pci/pci_dma.c
··· 650 650 } 651 651 fs_initcall(dma_debug_do_init); 652 652 653 - struct dma_map_ops s390_pci_dma_ops = { 653 + const struct dma_map_ops s390_pci_dma_ops = { 654 654 .alloc = s390_dma_alloc, 655 655 .free = s390_dma_free, 656 656 .map_sg = s390_dma_map_sg,
+2 -2
arch/sh/include/asm/dma-mapping.h
··· 1 1 #ifndef __ASM_SH_DMA_MAPPING_H 2 2 #define __ASM_SH_DMA_MAPPING_H 3 3 4 - extern struct dma_map_ops *dma_ops; 4 + extern const struct dma_map_ops *dma_ops; 5 5 extern void no_iommu_init(void); 6 6 7 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 7 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 8 8 { 9 9 return dma_ops; 10 10 }
+1 -1
arch/sh/kernel/dma-nommu.c
··· 65 65 } 66 66 #endif 67 67 68 - struct dma_map_ops nommu_dma_ops = { 68 + const struct dma_map_ops nommu_dma_ops = { 69 69 .alloc = dma_generic_alloc_coherent, 70 70 .free = dma_generic_free_coherent, 71 71 .map_page = nommu_map_page,
+1 -1
arch/sh/mm/consistent.c
··· 22 22 23 23 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 24 24 25 - struct dma_map_ops *dma_ops; 25 + const struct dma_map_ops *dma_ops; 26 26 EXPORT_SYMBOL(dma_ops); 27 27 28 28 static int __init dma_init(void)
+4 -4
arch/sparc/include/asm/dma-mapping.h
··· 18 18 */ 19 19 } 20 20 21 - extern struct dma_map_ops *dma_ops; 22 - extern struct dma_map_ops *leon_dma_ops; 23 - extern struct dma_map_ops pci32_dma_ops; 21 + extern const struct dma_map_ops *dma_ops; 22 + extern const struct dma_map_ops *leon_dma_ops; 23 + extern const struct dma_map_ops pci32_dma_ops; 24 24 25 25 extern struct bus_type pci_bus_type; 26 26 27 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 27 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 28 28 { 29 29 #ifdef CONFIG_SPARC_LEON 30 30 if (sparc_cpu_model == sparc_leon)
+2 -2
arch/sparc/kernel/iommu.c
··· 741 741 spin_unlock_irqrestore(&iommu->lock, flags); 742 742 } 743 743 744 - static struct dma_map_ops sun4u_dma_ops = { 744 + static const struct dma_map_ops sun4u_dma_ops = { 745 745 .alloc = dma_4u_alloc_coherent, 746 746 .free = dma_4u_free_coherent, 747 747 .map_page = dma_4u_map_page, ··· 752 752 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, 753 753 }; 754 754 755 - struct dma_map_ops *dma_ops = &sun4u_dma_ops; 755 + const struct dma_map_ops *dma_ops = &sun4u_dma_ops; 756 756 EXPORT_SYMBOL(dma_ops); 757 757 758 758 int dma_supported(struct device *dev, u64 device_mask)
+4 -4
arch/sparc/kernel/ioport.c
··· 401 401 BUG(); 402 402 } 403 403 404 - static struct dma_map_ops sbus_dma_ops = { 404 + static const struct dma_map_ops sbus_dma_ops = { 405 405 .alloc = sbus_alloc_coherent, 406 406 .free = sbus_free_coherent, 407 407 .map_page = sbus_map_page, ··· 637 637 } 638 638 } 639 639 640 - struct dma_map_ops pci32_dma_ops = { 640 + const struct dma_map_ops pci32_dma_ops = { 641 641 .alloc = pci32_alloc_coherent, 642 642 .free = pci32_free_coherent, 643 643 .map_page = pci32_map_page, ··· 652 652 EXPORT_SYMBOL(pci32_dma_ops); 653 653 654 654 /* leon re-uses pci32_dma_ops */ 655 - struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; 655 + const struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; 656 656 EXPORT_SYMBOL(leon_dma_ops); 657 657 658 - struct dma_map_ops *dma_ops = &sbus_dma_ops; 658 + const struct dma_map_ops *dma_ops = &sbus_dma_ops; 659 659 EXPORT_SYMBOL(dma_ops); 660 660 661 661
+1 -1
arch/sparc/kernel/pci_sun4v.c
··· 669 669 local_irq_restore(flags); 670 670 } 671 671 672 - static struct dma_map_ops sun4v_dma_ops = { 672 + static const struct dma_map_ops sun4v_dma_ops = { 673 673 .alloc = dma_4v_alloc_coherent, 674 674 .free = dma_4v_free_coherent, 675 675 .map_page = dma_4v_map_page,
+1 -1
arch/tile/include/asm/device.h
··· 18 18 19 19 struct dev_archdata { 20 20 /* DMA operations on that device */ 21 - struct dma_map_ops *dma_ops; 21 + const struct dma_map_ops *dma_ops; 22 22 23 23 /* Offset of the DMA address from the PA. */ 24 24 dma_addr_t dma_offset;
+6 -6
arch/tile/include/asm/dma-mapping.h
··· 24 24 #define ARCH_HAS_DMA_GET_REQUIRED_MASK 25 25 #endif 26 26 27 - extern struct dma_map_ops *tile_dma_map_ops; 28 - extern struct dma_map_ops *gx_pci_dma_map_ops; 29 - extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; 30 - extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 27 + extern const struct dma_map_ops *tile_dma_map_ops; 28 + extern const struct dma_map_ops *gx_pci_dma_map_ops; 29 + extern const struct dma_map_ops *gx_legacy_pci_dma_map_ops; 30 + extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 31 31 32 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 32 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 33 33 { 34 34 if (dev && dev->archdata.dma_ops) 35 35 return dev->archdata.dma_ops; ··· 59 59 60 60 static inline void dma_mark_clean(void *addr, size_t size) {} 61 61 62 - static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) 62 + static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) 63 63 { 64 64 dev->archdata.dma_ops = ops; 65 65 }
+12 -12
arch/tile/kernel/pci-dma.c
··· 329 329 return 1; 330 330 } 331 331 332 - static struct dma_map_ops tile_default_dma_map_ops = { 332 + static const struct dma_map_ops tile_default_dma_map_ops = { 333 333 .alloc = tile_dma_alloc_coherent, 334 334 .free = tile_dma_free_coherent, 335 335 .map_page = tile_dma_map_page, ··· 344 344 .dma_supported = tile_dma_supported 345 345 }; 346 346 347 - struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; 347 + const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops; 348 348 EXPORT_SYMBOL(tile_dma_map_ops); 349 349 350 350 /* Generic PCI DMA mapping functions */ ··· 516 516 return 1; 517 517 } 518 518 519 - static struct dma_map_ops tile_pci_default_dma_map_ops = { 519 + static const struct dma_map_ops tile_pci_default_dma_map_ops = { 520 520 .alloc = tile_pci_dma_alloc_coherent, 521 521 .free = tile_pci_dma_free_coherent, 522 522 .map_page = tile_pci_dma_map_page, ··· 531 531 .dma_supported = tile_pci_dma_supported 532 532 }; 533 533 534 - struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; 534 + const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops; 535 535 EXPORT_SYMBOL(gx_pci_dma_map_ops); 536 536 537 537 /* PCI DMA mapping functions for legacy PCI devices */ ··· 552 552 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 553 553 } 554 554 555 - static struct dma_map_ops pci_swiotlb_dma_ops = { 555 + static const struct dma_map_ops pci_swiotlb_dma_ops = { 556 556 .alloc = tile_swiotlb_alloc_coherent, 557 557 .free = tile_swiotlb_free_coherent, 558 558 .map_page = swiotlb_map_page, ··· 567 567 .mapping_error = swiotlb_dma_mapping_error, 568 568 }; 569 569 570 - static struct dma_map_ops pci_hybrid_dma_ops = { 570 + static const struct dma_map_ops pci_hybrid_dma_ops = { 571 571 .alloc = tile_swiotlb_alloc_coherent, 572 572 .free = tile_swiotlb_free_coherent, 573 573 .map_page = tile_pci_dma_map_page, ··· 582 582 .dma_supported = tile_pci_dma_supported 583 583 }; 584 584 585 - struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; 586 - struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; 585 + const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; 586 + const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops; 587 587 #else 588 - struct dma_map_ops *gx_legacy_pci_dma_map_ops; 589 - struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 588 + const struct dma_map_ops *gx_legacy_pci_dma_map_ops; 589 + const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; 590 590 #endif 591 591 EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); 592 592 EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops); 593 593 594 594 int dma_set_mask(struct device *dev, u64 mask) 595 595 { 596 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 596 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 597 597 598 598 /* 599 599 * For PCI devices with 64-bit DMA addressing capability, promote ··· 623 623 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 624 624 int dma_set_coherent_mask(struct device *dev, u64 mask) 625 625 { 626 - struct dma_map_ops *dma_ops = get_dma_ops(dev); 626 + const struct dma_map_ops *dma_ops = get_dma_ops(dev); 627 627 628 628 /* 629 629 * For PCI devices with 64-bit DMA addressing capability, promote
+2 -2
arch/unicore32/include/asm/dma-mapping.h
··· 21 21 #include <asm/memory.h> 22 22 #include <asm/cacheflush.h> 23 23 24 - extern struct dma_map_ops swiotlb_dma_map_ops; 24 + extern const struct dma_map_ops swiotlb_dma_map_ops; 25 25 26 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 26 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 27 27 { 28 28 return &swiotlb_dma_map_ops; 29 29 }
+1 -1
arch/unicore32/mm/dma-swiotlb.c
··· 31 31 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 32 32 } 33 33 34 - struct dma_map_ops swiotlb_dma_map_ops = { 34 + const struct dma_map_ops swiotlb_dma_map_ops = { 35 35 .alloc = unicore_swiotlb_alloc_coherent, 36 36 .free = unicore_swiotlb_free_coherent, 37 37 .map_sg = swiotlb_map_sg_attrs,
+2 -2
arch/x86/include/asm/device.h
··· 3 3 4 4 struct dev_archdata { 5 5 #ifdef CONFIG_X86_DEV_DMA_OPS 6 - struct dma_map_ops *dma_ops; 6 + const struct dma_map_ops *dma_ops; 7 7 #endif 8 8 #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) 9 9 void *iommu; /* hook for IOMMU specific extension */ ··· 13 13 #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) 14 14 struct dma_domain { 15 15 struct list_head node; 16 - struct dma_map_ops *dma_ops; 16 + const struct dma_map_ops *dma_ops; 17 17 int domain_nr; 18 18 }; 19 19 void add_dma_domain(struct dma_domain *domain);
+2 -2
arch/x86/include/asm/dma-mapping.h
··· 25 25 extern struct device x86_dma_fallback_dev; 26 26 extern int panic_on_overflow; 27 27 28 - extern struct dma_map_ops *dma_ops; 28 + extern const struct dma_map_ops *dma_ops; 29 29 30 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 30 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 31 31 { 32 32 #ifndef CONFIG_X86_DEV_DMA_OPS 33 33 return dma_ops;
+1 -1
arch/x86/include/asm/iommu.h
··· 1 1 #ifndef _ASM_X86_IOMMU_H 2 2 #define _ASM_X86_IOMMU_H 3 3 4 - extern struct dma_map_ops nommu_dma_ops; 4 + extern const struct dma_map_ops nommu_dma_ops; 5 5 extern int force_iommu, no_iommu; 6 6 extern int iommu_detected; 7 7 extern int iommu_pass_through;
+1 -1
arch/x86/kernel/amd_gart_64.c
··· 695 695 return -1; 696 696 } 697 697 698 - static struct dma_map_ops gart_dma_ops = { 698 + static const struct dma_map_ops gart_dma_ops = { 699 699 .map_sg = gart_map_sg, 700 700 .unmap_sg = gart_unmap_sg, 701 701 .map_page = gart_map_page,
+1 -1
arch/x86/kernel/pci-calgary_64.c
··· 478 478 free_pages((unsigned long)vaddr, get_order(size)); 479 479 } 480 480 481 - static struct dma_map_ops calgary_dma_ops = { 481 + static const struct dma_map_ops calgary_dma_ops = { 482 482 .alloc = calgary_alloc_coherent, 483 483 .free = calgary_free_coherent, 484 484 .map_sg = calgary_map_sg,
+2 -2
arch/x86/kernel/pci-dma.c
··· 17 17 18 18 static int forbid_dac __read_mostly; 19 19 20 - struct dma_map_ops *dma_ops = &nommu_dma_ops; 20 + const struct dma_map_ops *dma_ops = &nommu_dma_ops; 21 21 EXPORT_SYMBOL(dma_ops); 22 22 23 23 static int iommu_sac_force __read_mostly; ··· 214 214 215 215 int dma_supported(struct device *dev, u64 mask) 216 216 { 217 - struct dma_map_ops *ops = get_dma_ops(dev); 217 + const struct dma_map_ops *ops = get_dma_ops(dev); 218 218 219 219 #ifdef CONFIG_PCI 220 220 if (mask > 0xffffffff && forbid_dac > 0) {
+1 -1
arch/x86/kernel/pci-nommu.c
··· 88 88 flush_write_buffers(); 89 89 } 90 90 91 - struct dma_map_ops nommu_dma_ops = { 91 + const struct dma_map_ops nommu_dma_ops = { 92 92 .alloc = dma_generic_alloc_coherent, 93 93 .free = dma_generic_free_coherent, 94 94 .map_sg = nommu_map_sg,
+1 -1
arch/x86/kernel/pci-swiotlb.c
··· 45 45 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); 46 46 } 47 47 48 - static struct dma_map_ops swiotlb_dma_ops = { 48 + static const struct dma_map_ops swiotlb_dma_ops = { 49 49 .mapping_error = swiotlb_dma_mapping_error, 50 50 .alloc = x86_swiotlb_alloc_coherent, 51 51 .free = x86_swiotlb_free_coherent,
+1 -1
arch/x86/pci/sta2x11-fixup.c
··· 179 179 } 180 180 181 181 /* We have our own dma_ops: the same as swiotlb but from alloc (above) */ 182 - static struct dma_map_ops sta2x11_dma_ops = { 182 + static const struct dma_map_ops sta2x11_dma_ops = { 183 183 .alloc = sta2x11_swiotlb_alloc_coherent, 184 184 .free = x86_swiotlb_free_coherent, 185 185 .map_page = swiotlb_map_page,
+1 -1
arch/x86/xen/pci-swiotlb-xen.c
··· 18 18 19 19 int xen_swiotlb __read_mostly; 20 20 21 - static struct dma_map_ops xen_swiotlb_dma_ops = { 21 + static const struct dma_map_ops xen_swiotlb_dma_ops = { 22 22 .alloc = xen_swiotlb_alloc_coherent, 23 23 .free = xen_swiotlb_free_coherent, 24 24 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
+1 -1
arch/xtensa/include/asm/device.h
··· 10 10 11 11 struct dev_archdata { 12 12 /* DMA operations on that device */ 13 - struct dma_map_ops *dma_ops; 13 + const struct dma_map_ops *dma_ops; 14 14 }; 15 15 16 16 struct pdev_archdata {
+2 -2
arch/xtensa/include/asm/dma-mapping.h
··· 18 18 19 19 #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 20 20 21 - extern struct dma_map_ops xtensa_dma_map_ops; 21 + extern const struct dma_map_ops xtensa_dma_map_ops; 22 22 23 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 23 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 24 24 { 25 25 if (dev && dev->archdata.dma_ops) 26 26 return dev->archdata.dma_ops;
+1 -1
arch/xtensa/kernel/pci-dma.c
··· 249 249 return 0; 250 250 } 251 251 252 - struct dma_map_ops xtensa_dma_map_ops = { 252 + const struct dma_map_ops xtensa_dma_map_ops = { 253 253 .alloc = xtensa_dma_alloc, 254 254 .free = xtensa_dma_free, 255 255 .map_page = xtensa_map_page,
+2 -2
drivers/iommu/amd_iommu.c
··· 117 117 static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 118 118 int amd_iommu_max_glx_val = -1; 119 119 120 - static struct dma_map_ops amd_iommu_dma_ops; 120 + static const struct dma_map_ops amd_iommu_dma_ops; 121 121 122 122 /* 123 123 * This struct contains device specific data for the IOMMU ··· 2728 2728 return check_device(dev); 2729 2729 } 2730 2730 2731 - static struct dma_map_ops amd_iommu_dma_ops = { 2731 + static const struct dma_map_ops amd_iommu_dma_ops = { 2732 2732 .alloc = alloc_coherent, 2733 2733 .free = free_coherent, 2734 2734 .map_page = map_page,
+1 -1
drivers/misc/mic/bus/mic_bus.c
··· 143 143 } 144 144 145 145 struct mbus_device * 146 - mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 146 + mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, 147 147 struct mbus_hw_ops *hw_ops, int index, 148 148 void __iomem *mmio_va) 149 149 {
+1 -1
drivers/misc/mic/bus/scif_bus.c
··· 138 138 } 139 139 140 140 struct scif_hw_dev * 141 - scif_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 141 + scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, 142 142 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, 143 143 struct mic_mw *mmio, struct mic_mw *aper, void *dp, 144 144 void __iomem *rdp, struct dma_chan **chan, int num_chan,
+1 -1
drivers/misc/mic/bus/scif_bus.h
··· 113 113 void scif_unregister_driver(struct scif_driver *driver); 114 114 struct scif_hw_dev * 115 115 scif_register_device(struct device *pdev, int id, 116 - struct dma_map_ops *dma_ops, 116 + const struct dma_map_ops *dma_ops, 117 117 struct scif_hw_ops *hw_ops, u8 dnode, u8 snode, 118 118 struct mic_mw *mmio, struct mic_mw *aper, 119 119 void *dp, void __iomem *rdp,
+1 -1
drivers/misc/mic/bus/vop_bus.c
··· 154 154 vdev->dev.parent = pdev; 155 155 vdev->id.device = id; 156 156 vdev->id.vendor = VOP_DEV_ANY_ID; 157 - vdev->dev.archdata.dma_ops = (struct dma_map_ops *)dma_ops; 157 + vdev->dev.archdata.dma_ops = dma_ops; 158 158 vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; 159 159 dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); 160 160 vdev->dev.release = vop_release_dev;
+2 -2
drivers/misc/mic/host/mic_boot.c
··· 245 245 dma_unmap_sg(&mdev->pdev->dev, sg, nents, dir); 246 246 } 247 247 248 - static struct dma_map_ops __mic_dma_ops = { 248 + static const struct dma_map_ops __mic_dma_ops = { 249 249 .alloc = __mic_dma_alloc, 250 250 .free = __mic_dma_free, 251 251 .map_page = __mic_dma_map_page, ··· 344 344 mic_unmap_single(mdev, dma_addr, size); 345 345 } 346 346 347 - static struct dma_map_ops mic_dma_ops = { 347 + static const struct dma_map_ops mic_dma_ops = { 348 348 .map_page = mic_dma_map_page, 349 349 .unmap_page = mic_dma_unmap_page, 350 350 };
+1 -1
drivers/parisc/ccio-dma.c
··· 1011 1011 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); 1012 1012 } 1013 1013 1014 - static struct dma_map_ops ccio_ops = { 1014 + static const struct dma_map_ops ccio_ops = { 1015 1015 .dma_supported = ccio_dma_supported, 1016 1016 .alloc = ccio_alloc, 1017 1017 .free = ccio_free,
+1 -1
drivers/parisc/sba_iommu.c
··· 1069 1069 1070 1070 } 1071 1071 1072 - static struct dma_map_ops sba_ops = { 1072 + static const struct dma_map_ops sba_ops = { 1073 1073 .dma_supported = sba_dma_supported, 1074 1074 .alloc = sba_alloc, 1075 1075 .free = sba_free,
+1 -1
drivers/pci/host/vmd.c
··· 282 282 return &vmd->dev->dev; 283 283 } 284 284 285 - static struct dma_map_ops *vmd_dma_ops(struct device *dev) 285 + static const struct dma_map_ops *vmd_dma_ops(struct device *dev) 286 286 { 287 287 return get_dma_ops(to_vmd_dev(dev)); 288 288 }
+21 -21
include/linux/dma-mapping.h
··· 127 127 int is_phys; 128 128 }; 129 129 130 - extern struct dma_map_ops dma_noop_ops; 130 + extern const struct dma_map_ops dma_noop_ops; 131 131 132 132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 133 133 ··· 170 170 * dma dependent code. Code that depends on the dma-mapping 171 171 * API needs to set 'depends on HAS_DMA' in its Kconfig 172 172 */ 173 - extern struct dma_map_ops bad_dma_ops; 174 - static inline struct dma_map_ops *get_dma_ops(struct device *dev) 173 + extern const struct dma_map_ops bad_dma_ops; 174 + static inline const struct dma_map_ops *get_dma_ops(struct device *dev) 175 175 { 176 176 return &bad_dma_ops; 177 177 } ··· 182 182 enum dma_data_direction dir, 183 183 unsigned long attrs) 184 184 { 185 - struct dma_map_ops *ops = get_dma_ops(dev); 185 + const struct dma_map_ops *ops = get_dma_ops(dev); 186 186 dma_addr_t addr; 187 187 188 188 kmemcheck_mark_initialized(ptr, size); ··· 201 201 enum dma_data_direction dir, 202 202 unsigned long attrs) 203 203 { 204 - struct dma_map_ops *ops = get_dma_ops(dev); 204 + const struct dma_map_ops *ops = get_dma_ops(dev); 205 205 206 206 BUG_ON(!valid_dma_direction(dir)); 207 207 if (ops->unmap_page) ··· 217 217 int nents, enum dma_data_direction dir, 218 218 unsigned long attrs) 219 219 { 220 - struct dma_map_ops *ops = get_dma_ops(dev); 220 + const struct dma_map_ops *ops = get_dma_ops(dev); 221 221 int i, ents; 222 222 struct scatterlist *s; 223 223 ··· 235 235 int nents, enum dma_data_direction dir, 236 236 unsigned long attrs) 237 237 { 238 - struct dma_map_ops *ops = get_dma_ops(dev); 238 + const struct dma_map_ops *ops = get_dma_ops(dev); 239 239 240 240 BUG_ON(!valid_dma_direction(dir)); 241 241 debug_dma_unmap_sg(dev, sg, nents, dir); ··· 249 249 enum dma_data_direction dir, 250 250 unsigned long attrs) 251 251 { 252 - struct dma_map_ops *ops = get_dma_ops(dev); 252 + const struct dma_map_ops *ops = get_dma_ops(dev); 253 253 dma_addr_t addr; 254 254 255 255 kmemcheck_mark_initialized(page_address(page) + offset, size); ··· 265 265 enum dma_data_direction dir, 266 266 unsigned long attrs) 267 267 { 268 - struct dma_map_ops *ops = get_dma_ops(dev); 268 + const struct dma_map_ops *ops = get_dma_ops(dev); 269 269 270 270 BUG_ON(!valid_dma_direction(dir)); 271 271 if (ops->unmap_page) ··· 279 279 enum dma_data_direction dir, 280 280 unsigned long attrs) 281 281 { 282 - struct dma_map_ops *ops = get_dma_ops(dev); 282 + const struct dma_map_ops *ops = get_dma_ops(dev); 283 283 dma_addr_t addr; 284 284 285 285 BUG_ON(!valid_dma_direction(dir)); ··· 300 300 size_t size, enum dma_data_direction dir, 301 301 unsigned long attrs) 302 302 { 303 - struct dma_map_ops *ops = get_dma_ops(dev); 303 + const struct dma_map_ops *ops = get_dma_ops(dev); 304 304 305 305 BUG_ON(!valid_dma_direction(dir)); 306 306 if (ops->unmap_resource) ··· 312 312 size_t size, 313 313 enum dma_data_direction dir) 314 314 { 315 - struct dma_map_ops *ops = get_dma_ops(dev); 315 + const struct dma_map_ops *ops = get_dma_ops(dev); 316 316 317 317 BUG_ON(!valid_dma_direction(dir)); 318 318 if (ops->sync_single_for_cpu) ··· 324 324 dma_addr_t addr, size_t size, 325 325 enum dma_data_direction dir) 326 326 { 327 - struct dma_map_ops *ops = get_dma_ops(dev); 327 + const struct dma_map_ops *ops = get_dma_ops(dev); 328 328 329 329 BUG_ON(!valid_dma_direction(dir)); 330 330 if (ops->sync_single_for_device) ··· 364 364 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 365 365 int nelems, enum dma_data_direction dir) 366 366 { 367 - struct dma_map_ops *ops = get_dma_ops(dev); 367 + const struct dma_map_ops *ops = get_dma_ops(dev); 368 368 369 369 BUG_ON(!valid_dma_direction(dir)); 370 370 if (ops->sync_sg_for_cpu) ··· 376 376 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 377 377 int nelems, enum dma_data_direction dir) 378 378 { 379 - struct dma_map_ops *ops = get_dma_ops(dev); 379 + const struct dma_map_ops *ops = get_dma_ops(dev); 380 380 381 381 BUG_ON(!valid_dma_direction(dir)); 382 382 if (ops->sync_sg_for_device) ··· 421 421 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 422 422 dma_addr_t dma_addr, size_t size, unsigned long attrs) 423 423 { 424 - struct dma_map_ops *ops = get_dma_ops(dev); 424 + const struct dma_map_ops *ops = get_dma_ops(dev); 425 425 BUG_ON(!ops); 426 426 if (ops->mmap) 427 427 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); ··· 439 439 dma_addr_t dma_addr, size_t size, 440 440 unsigned long attrs) 441 441 { 442 - struct dma_map_ops *ops = get_dma_ops(dev); 442 + const struct dma_map_ops *ops = get_dma_ops(dev); 443 443 BUG_ON(!ops); 444 444 if (ops->get_sgtable) 445 445 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, ··· 457 457 dma_addr_t *dma_handle, gfp_t flag, 458 458 unsigned long attrs) 459 459 { 460 - struct dma_map_ops *ops = get_dma_ops(dev); 460 + const struct dma_map_ops *ops = get_dma_ops(dev); 461 461 void *cpu_addr; 462 462 463 463 BUG_ON(!ops); ··· 479 479 void *cpu_addr, dma_addr_t dma_handle, 480 480 unsigned long attrs) 481 481 { 482 - struct dma_map_ops *ops = get_dma_ops(dev); 482 + const struct dma_map_ops *ops = get_dma_ops(dev); 483 483 484 484 BUG_ON(!ops); 485 485 WARN_ON(irqs_disabled()); ··· 537 537 #ifndef HAVE_ARCH_DMA_SUPPORTED 538 538 static inline int dma_supported(struct device *dev, u64 mask) 539 539 { 540 - struct dma_map_ops *ops = get_dma_ops(dev); 540 + const struct dma_map_ops *ops = get_dma_ops(dev); 541 541 542 542 if (!ops) 543 543 return 0; ··· 550 550 #ifndef HAVE_ARCH_DMA_SET_MASK 551 551 static inline int dma_set_mask(struct device *dev, u64 mask) 552 552 { 553 - struct dma_map_ops *ops = get_dma_ops(dev); 553 + const struct dma_map_ops *ops = get_dma_ops(dev); 554 554 555 555 if (ops->set_dma_mask) 556 556 return ops->set_dma_mask(dev, mask);
+1 -1
include/linux/mic_bus.h
··· 90 90 }; 91 91 92 92 struct mbus_device * 93 - mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, 93 + mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ops, 94 94 struct mbus_hw_ops *hw_ops, int index, 95 95 void __iomem *mmio_va); 96 96 void mbus_unregister_device(struct mbus_device *mbdev);
+1 -1
include/xen/arm/hypervisor.h
··· 18 18 return PARAVIRT_LAZY_NONE; 19 19 } 20 20 21 - extern struct dma_map_ops *xen_dma_ops; 21 + extern const struct dma_map_ops *xen_dma_ops; 22 22 23 23 #ifdef CONFIG_XEN 24 24 void __init xen_early_init(void);
+1 -1
lib/dma-noop.c
··· 64 64 return 1; 65 65 } 66 66 67 - struct dma_map_ops dma_noop_ops = { 67 + const struct dma_map_ops dma_noop_ops = { 68 68 .alloc = dma_noop_alloc, 69 69 .free = dma_noop_free, 70 70 .map_page = dma_noop_map_page,