Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

add dma_get_ops to struct ia64_machine_vector

This adds dma_get_ops hook to struct ia64_machine_vector. We use
dma_get_ops() in arch/ia64/kernel/dma-mapping.c, which simply returns
the global dma_ops. This is for removing hwsw_dma_ops.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

FUJITA Tomonori and committed by
Ingo Molnar
c190ab0b cdc28d59

+39 -18
+24 -17
arch/ia64/include/asm/dma-mapping.h
··· 68 68 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 69 69 dma_addr_t *daddr, gfp_t gfp) 70 70 { 71 - return dma_ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); 71 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 72 + return ops->alloc_coherent(dev, size, daddr, gfp | GFP_DMA); 72 73 } 73 74 74 75 static inline void dma_free_coherent(struct device *dev, size_t size, 75 76 void *caddr, dma_addr_t daddr) 76 77 { 77 - dma_ops->free_coherent(dev, size, caddr, daddr); 78 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 79 + ops->free_coherent(dev, size, caddr, daddr); 78 80 } 79 81 80 82 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) ··· 87 85 enum dma_data_direction dir, 88 86 struct dma_attrs *attrs) 89 87 { 90 - return dma_ops->map_single_attrs(dev, caddr, size, dir, attrs); 88 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 89 + return ops->map_single_attrs(dev, caddr, size, dir, attrs); 91 90 } 92 91 93 92 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t daddr, ··· 96 93 enum dma_data_direction dir, 97 94 struct dma_attrs *attrs) 98 95 { 99 - dma_ops->unmap_single_attrs(dev, daddr, size, dir, attrs); 96 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 97 + ops->unmap_single_attrs(dev, daddr, size, dir, attrs); 100 98 } 101 99 102 100 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) ··· 107 103 int nents, enum dma_data_direction dir, 108 104 struct dma_attrs *attrs) 109 105 { 110 - return dma_ops->map_sg_attrs(dev, sgl, nents, dir, attrs); 106 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 107 + return ops->map_sg_attrs(dev, sgl, nents, dir, attrs); 111 108 } 112 109 113 110 static inline void dma_unmap_sg_attrs(struct device *dev, ··· 116 111 enum dma_data_direction dir, 117 112 struct dma_attrs *attrs) 118 113 { 119 - dma_ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs); 114 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 115 + ops->unmap_sg_attrs(dev, sgl, nents, dir, attrs); 120 116 } 121 117 122 118 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) ··· 127 121 size_t size, 128 122 enum dma_data_direction dir) 129 123 { 130 - dma_ops->sync_single_for_cpu(dev, daddr, size, dir); 124 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 125 + ops->sync_single_for_cpu(dev, daddr, size, dir); 131 126 } 132 127 133 128 static inline void dma_sync_sg_for_cpu(struct device *dev, 134 129 struct scatterlist *sgl, 135 130 int nents, enum dma_data_direction dir) 136 131 { 137 - dma_ops->sync_sg_for_cpu(dev, sgl, nents, dir); 132 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 133 + ops->sync_sg_for_cpu(dev, sgl, nents, dir); 138 134 } 139 135 140 136 static inline void dma_sync_single_for_device(struct device *dev, ··· 144 136 size_t size, 145 137 enum dma_data_direction dir) 146 138 { 147 - dma_ops->sync_single_for_device(dev, daddr, size, dir); 139 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 140 + ops->sync_single_for_device(dev, daddr, size, dir); 148 141 } 149 142 150 143 static inline void dma_sync_sg_for_device(struct device *dev, ··· 153 144 int nents, 154 145 enum dma_data_direction dir) 155 146 { 156 - dma_ops->sync_sg_for_device(dev, sgl, nents, dir); 147 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 148 + ops->sync_sg_for_device(dev, sgl, nents, dir); 157 149 } 158 150 159 151 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) 160 152 { 161 - return dma_ops->mapping_error(dev, daddr); 153 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 154 + return ops->mapping_error(dev, daddr); 162 155 } 163 156 164 157 #define dma_map_page(dev, pg, off, size, dir) \ ··· 180 169 181 170 static inline int dma_supported(struct device *dev, u64 mask) 182 171 { 183 - return dma_ops->dma_supported_op(dev, mask); 172 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 173 + return ops->dma_supported_op(dev, mask); 184 174 } 185 175 186 176 static inline int ··· 207 195 } 208 196 209 197 #define dma_is_consistent(d, h) (1) /* all we do is coherent memory... */ 210 - 211 - static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 212 - { 213 - return dma_ops; 214 - } 215 198 216 199 #endif /* _ASM_IA64_DMA_MAPPING_H */
+8
arch/ia64/include/asm/machvec.h
··· 45 45 46 46 /* DMA-mapping interface: */ 47 47 typedef void ia64_mv_dma_init (void); 48 + typedef struct dma_mapping_ops *ia64_mv_dma_get_ops(struct device *); 48 49 49 50 /* 50 51 * WARNING: The legacy I/O space is _architected_. Platforms are ··· 131 130 # define platform_global_tlb_purge ia64_mv.global_tlb_purge 132 131 # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish 133 132 # define platform_dma_init ia64_mv.dma_init 133 + # define platform_dma_get_ops ia64_mv.dma_get_ops 134 134 # define platform_irq_to_vector ia64_mv.irq_to_vector 135 135 # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq 136 136 # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem ··· 174 172 ia64_mv_global_tlb_purge_t *global_tlb_purge; 175 173 ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; 176 174 ia64_mv_dma_init *dma_init; 175 + ia64_mv_dma_get_ops *dma_get_ops; 177 176 ia64_mv_irq_to_vector *irq_to_vector; 178 177 ia64_mv_local_vector_to_irq *local_vector_to_irq; 179 178 ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; ··· 213 210 platform_global_tlb_purge, \ 214 211 platform_tlb_migrate_finish, \ 215 212 platform_dma_init, \ 213 + platform_dma_get_ops, \ 216 214 platform_irq_to_vector, \ 217 215 platform_local_vector_to_irq, \ 218 216 platform_pci_get_legacy_mem, \ ··· 250 246 # endif /* CONFIG_IA64_GENERIC */ 251 247 252 248 extern void swiotlb_dma_init(void); 249 + extern struct dma_mapping_ops *dma_get_ops(struct device *); 253 250 254 251 /* 255 252 * Define default versions so we can extend machvec for new platforms without having ··· 283 278 #endif 284 279 #ifndef platform_dma_init 285 280 # define platform_dma_init swiotlb_dma_init 281 + #endif 282 + #ifndef platform_dma_get_ops 283 + # define platform_dma_get_ops dma_get_ops 286 284 #endif 287 285 #ifndef platform_irq_to_vector 288 286 # define platform_irq_to_vector __ia64_irq_to_vector
+6
arch/ia64/kernel/dma-mapping.c
··· 2 2 3 3 struct dma_mapping_ops *dma_ops; 4 4 EXPORT_SYMBOL(dma_ops); 5 + 6 + struct dma_mapping_ops *dma_get_ops(struct device *dev) 7 + { 8 + return dma_ops; 9 + } 10 + EXPORT_SYMBOL(dma_get_ops);
+1 -1
arch/ia64/kernel/pci-dma.c
··· 81 81 82 82 int iommu_dma_supported(struct device *dev, u64 mask) 83 83 { 84 - struct dma_mapping_ops *ops = get_dma_ops(dev); 84 + struct dma_mapping_ops *ops = platform_dma_get_ops(dev); 85 85 86 86 if (ops->dma_supported_op) 87 87 return ops->dma_supported_op(dev, mask);