Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

remove hwsw_dma_ops

This removes remove hwsw_dma_ops (and hwsw_*
functions). hwsw_dma_get_ops can select swiotlb_dma_ops and
sba_dma_ops appropriately.

Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

FUJITA Tomonori and committed by
Ingo Molnar
c7b3aee8 c190ab0b

+14 -171
+12 -171
arch/ia64/hp/common/hwsw_iommu.c
··· 17 17 #include <linux/swiotlb.h> 18 18 #include <asm/machvec.h> 19 19 20 + extern struct dma_mapping_ops sba_dma_ops, swiotlb_dma_ops; 21 + 20 22 /* swiotlb declarations & definitions: */ 21 23 extern int swiotlb_late_init_with_default_size (size_t size); 22 - 23 - /* hwiommu declarations & definitions: */ 24 - 25 - extern void *sba_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); 26 - extern void sba_free_coherent (struct device *, size_t, void *, dma_addr_t); 27 - extern dma_addr_t sba_map_single_attrs(struct device *, void *, size_t, int, 28 - struct dma_attrs *); 29 - extern void sba_unmap_single_attrs(struct device *, dma_addr_t, size_t, int, 30 - struct dma_attrs *); 31 - extern int sba_map_sg_attrs(struct device *, struct scatterlist *, int, int, 32 - struct dma_attrs *); 33 - extern void sba_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, 34 - struct dma_attrs *); 35 - extern int sba_dma_supported (struct device *, u64); 36 - extern int sba_dma_mapping_error(struct device *, dma_addr_t); 37 - 38 - #define hwiommu_alloc_coherent sba_alloc_coherent 39 - #define hwiommu_free_coherent sba_free_coherent 40 - #define hwiommu_map_single_attrs sba_map_single_attrs 41 - #define hwiommu_unmap_single_attrs sba_unmap_single_attrs 42 - #define hwiommu_map_sg_attrs sba_map_sg_attrs 43 - #define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs 44 - #define hwiommu_dma_supported sba_dma_supported 45 - #define hwiommu_dma_mapping_error sba_dma_mapping_error 46 - #define hwiommu_sync_single_for_cpu machvec_dma_sync_single 47 - #define hwiommu_sync_sg_for_cpu machvec_dma_sync_sg 48 - #define hwiommu_sync_single_for_device machvec_dma_sync_single 49 - #define hwiommu_sync_sg_for_device machvec_dma_sync_sg 50 - 51 24 52 25 /* 53 26 * Note: we need to make the determination of whether or not to use 54 27 * the sw I/O TLB based purely on the device structure. Anything else 55 28 * would be unreliable or would be too intrusive. 56 29 */ 57 - static inline int 58 - use_swiotlb (struct device *dev) 30 + static inline int use_swiotlb(struct device *dev) 59 31 { 60 - return dev && dev->dma_mask && !hwiommu_dma_supported(dev, *dev->dma_mask); 32 + return dev && dev->dma_mask && 33 + !sba_dma_ops.dma_supported_op(dev, *dev->dma_mask); 61 34 } 62 35 63 - struct dma_mapping_ops hwsw_dma_ops; 36 + struct dma_mapping_ops *hwsw_dma_get_ops(struct device *dev) 37 + { 38 + if (use_swiotlb(dev)) 39 + return &swiotlb_dma_ops; 40 + return &sba_dma_ops; 41 + } 42 + EXPORT_SYMBOL(hwsw_dma_get_ops); 64 43 65 44 void __init 66 45 hwsw_init (void) 67 46 { 68 - dma_ops = &hwsw_dma_ops; 69 47 /* default to a smallish 2MB sw I/O TLB */ 70 48 if (swiotlb_late_init_with_default_size (2 * (1<<20)) != 0) { 71 49 #ifdef CONFIG_IA64_GENERIC ··· 56 78 #endif 57 79 } 58 80 } 59 - 60 - void * 61 - hwsw_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 62 - { 63 - if (use_swiotlb(dev)) 64 - return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 65 - else 66 - return hwiommu_alloc_coherent(dev, size, dma_handle, flags); 67 - } 68 - 69 - void 70 - hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) 71 - { 72 - if (use_swiotlb(dev)) 73 - swiotlb_free_coherent(dev, size, vaddr, dma_handle); 74 - else 75 - hwiommu_free_coherent(dev, size, vaddr, dma_handle); 76 - } 77 - 78 - dma_addr_t 79 - hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, 80 - struct dma_attrs *attrs) 81 - { 82 - if (use_swiotlb(dev)) 83 - return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); 84 - else 85 - return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); 86 - } 87 - EXPORT_SYMBOL(hwsw_map_single_attrs); 88 - 89 - void 90 - hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 91 - int dir, struct dma_attrs *attrs) 92 - { 93 - if (use_swiotlb(dev)) 94 - return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); 95 - else 96 - return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); 97 - } 98 - EXPORT_SYMBOL(hwsw_unmap_single_attrs); 99 - 100 - int 101 - hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, 102 - int dir, struct dma_attrs *attrs) 103 - { 104 - if (use_swiotlb(dev)) 105 - return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); 106 - else 107 - return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); 108 - } 109 - EXPORT_SYMBOL(hwsw_map_sg_attrs); 110 - 111 - void 112 - hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, 113 - int dir, struct dma_attrs *attrs) 114 - { 115 - if (use_swiotlb(dev)) 116 - return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); 117 - else 118 - return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); 119 - } 120 - EXPORT_SYMBOL(hwsw_unmap_sg_attrs); 121 - 122 - void 123 - hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) 124 - { 125 - if (use_swiotlb(dev)) 126 - swiotlb_sync_single_for_cpu(dev, addr, size, dir); 127 - else 128 - hwiommu_sync_single_for_cpu(dev, addr, size, dir); 129 - } 130 - 131 - void 132 - hwsw_sync_sg_for_cpu (struct device *dev, struct scatterlist *sg, int nelems, int dir) 133 - { 134 - if (use_swiotlb(dev)) 135 - swiotlb_sync_sg_for_cpu(dev, sg, nelems, dir); 136 - else 137 - hwiommu_sync_sg_for_cpu(dev, sg, nelems, dir); 138 - } 139 - 140 - void 141 - hwsw_sync_single_for_device (struct device *dev, dma_addr_t addr, size_t size, int dir) 142 - { 143 - if (use_swiotlb(dev)) 144 - swiotlb_sync_single_for_device(dev, addr, size, dir); 145 - else 146 - hwiommu_sync_single_for_device(dev, addr, size, dir); 147 - } 148 - 149 - void 150 - hwsw_sync_sg_for_device (struct device *dev, struct scatterlist *sg, int nelems, int dir) 151 - { 152 - if (use_swiotlb(dev)) 153 - swiotlb_sync_sg_for_device(dev, sg, nelems, dir); 154 - else 155 - hwiommu_sync_sg_for_device(dev, sg, nelems, dir); 156 - } 157 - 158 - int 159 - hwsw_dma_supported (struct device *dev, u64 mask) 160 - { 161 - if (hwiommu_dma_supported(dev, mask)) 162 - return 1; 163 - return swiotlb_dma_supported(dev, mask); 164 - } 165 - 166 - int 167 - hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 168 - { 169 - return hwiommu_dma_mapping_error(dev, dma_addr) || 170 - swiotlb_dma_mapping_error(dev, dma_addr); 171 - } 172 - 173 - EXPORT_SYMBOL(hwsw_dma_mapping_error); 174 - EXPORT_SYMBOL(hwsw_dma_supported); 175 - EXPORT_SYMBOL(hwsw_alloc_coherent); 176 - EXPORT_SYMBOL(hwsw_free_coherent); 177 - EXPORT_SYMBOL(hwsw_sync_single_for_cpu); 178 - EXPORT_SYMBOL(hwsw_sync_single_for_device); 179 - EXPORT_SYMBOL(hwsw_sync_sg_for_cpu); 180 - EXPORT_SYMBOL(hwsw_sync_sg_for_device); 181 - 182 - struct dma_mapping_ops hwsw_dma_ops = { 183 - .alloc_coherent = hwsw_alloc_coherent, 184 - .free_coherent = hwsw_free_coherent, 185 - .map_single_attrs = hwsw_map_single_attrs, 186 - .unmap_single_attrs = hwsw_unmap_single_attrs, 187 - .map_sg_attrs = hwsw_map_sg_attrs, 188 - .unmap_sg_attrs = hwsw_unmap_sg_attrs, 189 - .sync_single_for_cpu = hwsw_sync_single_for_cpu, 190 - .sync_sg_for_cpu = hwsw_sync_sg_for_cpu, 191 - .sync_single_for_device = hwsw_sync_single_for_device, 192 - .sync_sg_for_device = hwsw_sync_sg_for_device, 193 - .dma_supported_op = hwsw_dma_supported, 194 - .mapping_error = hwsw_dma_mapping_error, 195 - };
+2
arch/ia64/include/asm/machvec_hpzx1_swiotlb.h
··· 2 2 #define _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h 3 3 4 4 extern ia64_mv_setup_t dig_setup; 5 + extern ia64_mv_dma_get_ops hwsw_dma_get_ops; 5 6 6 7 /* 7 8 * This stuff has dual use! ··· 14 13 #define platform_name "hpzx1_swiotlb" 15 14 #define platform_setup dig_setup 16 15 #define platform_dma_init machvec_noop 16 + #define platform_dma_get_ops hwsw_dma_get_ops 17 17 18 18 #endif /* _ASM_IA64_MACHVEC_HPZX1_SWIOTLB_h */