Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

X86 & IA64: adapt for dma_map_ops changes

Adapt core x86 and IA64 architecture code for dma_map_ops changes: replace
alloc/free_coherent with generic alloc/free methods.

Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
[removed swiotlb related changes and replaced it with wrappers,
merged with IA64 patch to avoid inter-patch dependences in intel-iommu code]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Tony Luck <tony.luck@intel.com>

authored by

Andrzej Pietrasiewicz and committed by
Marek Szyprowski
baa676fc 613c4578

+99 -59
+6 -5
arch/ia64/hp/common/sba_iommu.c
··· 1130 1130 * See Documentation/DMA-API-HOWTO.txt 1131 1131 */ 1132 1132 static void * 1133 - sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) 1133 + sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1134 + gfp_t flags, struct dma_attrs *attrs) 1134 1135 { 1135 1136 struct ioc *ioc; 1136 1137 void *addr; ··· 1193 1192 * 1194 1193 * See Documentation/DMA-API-HOWTO.txt 1195 1194 */ 1196 - static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, 1197 - dma_addr_t dma_handle) 1195 + static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, 1196 + dma_addr_t dma_handle, struct dma_attrs *attrs) 1198 1197 { 1199 1198 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1200 1199 free_pages((unsigned long) vaddr, get_order(size)); ··· 2214 2213 __setup("sbapagesize=",sba_page_override); 2215 2214 2216 2215 struct dma_map_ops sba_dma_ops = { 2217 - .alloc_coherent = sba_alloc_coherent, 2218 - .free_coherent = sba_free_coherent, 2216 + .alloc = sba_alloc_coherent, 2217 + .free = sba_free_coherent, 2219 2218 .map_page = sba_map_page, 2220 2219 .unmap_page = sba_unmap_page, 2221 2220 .map_sg = sba_map_sg_attrs,
+12 -6
arch/ia64/include/asm/dma-mapping.h
··· 23 23 extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, 24 24 enum dma_data_direction); 25 25 26 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 27 - dma_addr_t *daddr, gfp_t gfp) 26 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 27 + 28 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 29 + dma_addr_t *daddr, gfp_t gfp, 30 + struct dma_attrs *attrs) 28 31 { 29 32 struct dma_map_ops *ops = platform_dma_get_ops(dev); 30 33 void *caddr; 31 34 32 - caddr = ops->alloc_coherent(dev, size, daddr, gfp); 35 + caddr = ops->alloc(dev, size, daddr, gfp, attrs); 33 36 debug_dma_alloc_coherent(dev, size, *daddr, caddr); 34 37 return caddr; 35 38 } 36 39 37 - static inline void dma_free_coherent(struct device *dev, size_t size, 38 - void *caddr, dma_addr_t daddr) 40 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 41 + 42 + static inline void dma_free_attrs(struct device *dev, size_t size, 43 + void *caddr, dma_addr_t daddr, 44 + struct dma_attrs *attrs) 39 45 { 40 46 struct dma_map_ops *ops = platform_dma_get_ops(dev); 41 47 debug_dma_free_coherent(dev, size, caddr, daddr); 42 - ops->free_coherent(dev, size, caddr, daddr); 48 + ops->free(dev, size, caddr, daddr, attrs); 43 49 } 44 50 45 51 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+11 -3
arch/ia64/kernel/pci-swiotlb.c
··· 15 15 EXPORT_SYMBOL(swiotlb); 16 16 17 17 static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, 18 - dma_addr_t *dma_handle, gfp_t gfp) 18 + dma_addr_t *dma_handle, gfp_t gfp, 19 + struct dma_attrs *attrs) 19 20 { 20 21 if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) 21 22 gfp |= GFP_DMA; 22 23 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); 23 24 } 24 25 26 + static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, 27 + void *vaddr, dma_addr_t dma_addr, 28 + struct dma_attrs *attrs) 29 + { 30 + swiotlb_free_coherent(dev, size, vaddr, dma_addr); 31 + } 32 + 25 33 struct dma_map_ops swiotlb_dma_ops = { 26 - .alloc_coherent = ia64_swiotlb_alloc_coherent, 27 - .free_coherent = swiotlb_free_coherent, 34 + .alloc = ia64_swiotlb_alloc_coherent, 35 + .free = ia64_swiotlb_free_coherent, 28 36 .map_page = swiotlb_map_page, 29 37 .unmap_page = swiotlb_unmap_page, 30 38 .map_sg = swiotlb_map_sg_attrs,
+5 -4
arch/ia64/sn/pci/pci_dma.c
··· 76 76 * more information. 77 77 */ 78 78 static void *sn_dma_alloc_coherent(struct device *dev, size_t size, 79 - dma_addr_t * dma_handle, gfp_t flags) 79 + dma_addr_t * dma_handle, gfp_t flags, 80 + struct dma_attrs *attrs) 80 81 { 81 82 void *cpuaddr; 82 83 unsigned long phys_addr; ··· 138 137 * any associated IOMMU mappings. 139 138 */ 140 139 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 141 - dma_addr_t dma_handle) 140 + dma_addr_t dma_handle, struct dma_attrs *attrs) 142 141 { 143 142 struct pci_dev *pdev = to_pci_dev(dev); 144 143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); ··· 467 466 } 468 467 469 468 static struct dma_map_ops sn_dma_ops = { 470 - .alloc_coherent = sn_dma_alloc_coherent, 471 - .free_coherent = sn_dma_free_coherent, 469 + .alloc = sn_dma_alloc_coherent, 470 + .free = sn_dma_free_coherent, 472 471 .map_page = sn_dma_map_page, 473 472 .unmap_page = sn_dma_unmap_page, 474 473 .map_sg = sn_dma_map_sg,
+16 -10
arch/x86/include/asm/dma-mapping.h
··· 59 59 extern int dma_set_mask(struct device *dev, u64 mask); 60 60 61 61 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 62 - dma_addr_t *dma_addr, gfp_t flag); 62 + dma_addr_t *dma_addr, gfp_t flag, 63 + struct dma_attrs *attrs); 63 64 64 65 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) 65 66 { ··· 112 111 return gfp; 113 112 } 114 113 114 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 115 + 115 116 static inline void * 116 - dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 117 - gfp_t gfp) 117 + dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, 118 + gfp_t gfp, struct dma_attrs *attrs) 118 119 { 119 120 struct dma_map_ops *ops = get_dma_ops(dev); 120 121 void *memory; ··· 132 129 if (!is_device_dma_capable(dev)) 133 130 return NULL; 134 131 135 - if (!ops->alloc_coherent) 132 + if (!ops->alloc) 136 133 return NULL; 137 134 138 - memory = ops->alloc_coherent(dev, size, dma_handle, 139 - dma_alloc_coherent_gfp_flags(dev, gfp)); 135 + memory = ops->alloc(dev, size, dma_handle, 136 + dma_alloc_coherent_gfp_flags(dev, gfp), attrs); 140 137 debug_dma_alloc_coherent(dev, size, *dma_handle, memory); 141 138 142 139 return memory; 143 140 } 144 141 145 - static inline void dma_free_coherent(struct device *dev, size_t size, 146 - void *vaddr, dma_addr_t bus) 142 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 143 + 144 + static inline void dma_free_attrs(struct device *dev, size_t size, 145 + void *vaddr, dma_addr_t bus, 146 + struct dma_attrs *attrs) 147 147 { 148 148 struct dma_map_ops *ops = get_dma_ops(dev); 149 149 ··· 156 150 return; 157 151 158 152 debug_dma_free_coherent(dev, size, vaddr, bus); 159 - if (ops->free_coherent) 160 - ops->free_coherent(dev, size, vaddr, bus); 153 + if (ops->free) 154 + ops->free(dev, size, vaddr, bus, attrs); 161 155 } 162 156 163 157 #endif
+6 -5
arch/x86/kernel/amd_gart_64.c
··· 477 477 /* allocate and map a coherent mapping */ 478 478 static void * 479 479 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 480 - gfp_t flag) 480 + gfp_t flag, struct dma_attrs *attrs) 481 481 { 482 482 dma_addr_t paddr; 483 483 unsigned long align_mask; ··· 500 500 } 501 501 __free_pages(page, get_order(size)); 502 502 } else 503 - return dma_generic_alloc_coherent(dev, size, dma_addr, flag); 503 + return dma_generic_alloc_coherent(dev, size, dma_addr, flag, 504 + attrs); 504 505 505 506 return NULL; 506 507 } ··· 509 508 /* free a coherent mapping */ 510 509 static void 511 510 gart_free_coherent(struct device *dev, size_t size, void *vaddr, 512 - dma_addr_t dma_addr) 511 + dma_addr_t dma_addr, struct dma_attrs *attrs) 513 512 { 514 513 gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); 515 514 free_pages((unsigned long)vaddr, get_order(size)); ··· 701 700 .unmap_sg = gart_unmap_sg, 702 701 .map_page = gart_map_page, 703 702 .unmap_page = gart_unmap_page, 704 - .alloc_coherent = gart_alloc_coherent, 705 - .free_coherent = gart_free_coherent, 703 + .alloc = gart_alloc_coherent, 704 + .free = gart_free_coherent, 706 705 .mapping_error = gart_mapping_error, 707 706 }; 708 707
+5 -4
arch/x86/kernel/pci-calgary_64.c
··· 431 431 } 432 432 433 433 static void* calgary_alloc_coherent(struct device *dev, size_t size, 434 - dma_addr_t *dma_handle, gfp_t flag) 434 + dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) 435 435 { 436 436 void *ret = NULL; 437 437 dma_addr_t mapping; ··· 464 464 } 465 465 466 466 static void calgary_free_coherent(struct device *dev, size_t size, 467 - void *vaddr, dma_addr_t dma_handle) 467 + void *vaddr, dma_addr_t dma_handle, 468 + struct dma_attrs *attrs) 468 469 { 469 470 unsigned int npages; 470 471 struct iommu_table *tbl = find_iommu_table(dev); ··· 478 477 } 479 478 480 479 static struct dma_map_ops calgary_dma_ops = { 481 - .alloc_coherent = calgary_alloc_coherent, 482 - .free_coherent = calgary_free_coherent, 480 + .alloc = calgary_alloc_coherent, 481 + .free = calgary_free_coherent, 483 482 .map_sg = calgary_map_sg, 484 483 .unmap_sg = calgary_unmap_sg, 485 484 .map_page = calgary_map_page,
+2 -1
arch/x86/kernel/pci-dma.c
··· 96 96 } 97 97 } 98 98 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 99 - dma_addr_t *dma_addr, gfp_t flag) 99 + dma_addr_t *dma_addr, gfp_t flag, 100 + struct dma_attrs *attrs) 100 101 { 101 102 unsigned long dma_mask; 102 103 struct page *page;
+3 -3
arch/x86/kernel/pci-nommu.c
··· 75 75 } 76 76 77 77 static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, 78 - dma_addr_t dma_addr) 78 + dma_addr_t dma_addr, struct dma_attrs *attrs) 79 79 { 80 80 free_pages((unsigned long)vaddr, get_order(size)); 81 81 } ··· 96 96 } 97 97 98 98 struct dma_map_ops nommu_dma_ops = { 99 - .alloc_coherent = dma_generic_alloc_coherent, 100 - .free_coherent = nommu_free_coherent, 99 + .alloc = dma_generic_alloc_coherent, 100 + .free = nommu_free_coherent, 101 101 .map_sg = nommu_map_sg, 102 102 .map_page = nommu_map_page, 103 103 .sync_single_for_device = nommu_sync_single_for_device,
+13 -4
arch/x86/kernel/pci-swiotlb.c
··· 15 15 int swiotlb __read_mostly; 16 16 17 17 static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 18 - dma_addr_t *dma_handle, gfp_t flags) 18 + dma_addr_t *dma_handle, gfp_t flags, 19 + struct dma_attrs *attrs) 19 20 { 20 21 void *vaddr; 21 22 22 - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); 23 + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, 24 + attrs); 23 25 if (vaddr) 24 26 return vaddr; 25 27 26 28 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); 27 29 } 28 30 31 + static void x86_swiotlb_free_coherent(struct device *dev, size_t size, 32 + void *vaddr, dma_addr_t dma_addr, 33 + struct dma_attrs *attrs) 34 + { 35 + swiotlb_free_coherent(dev, size, vaddr, dma_addr); 36 + } 37 + 29 38 static struct dma_map_ops swiotlb_dma_ops = { 30 39 .mapping_error = swiotlb_dma_mapping_error, 31 - .alloc_coherent = x86_swiotlb_alloc_coherent, 32 - .free_coherent = swiotlb_free_coherent, 40 + .alloc = x86_swiotlb_alloc_coherent, 41 + .free = x86_swiotlb_free_coherent, 33 42 .sync_single_for_cpu = swiotlb_sync_single_for_cpu, 34 43 .sync_single_for_device = swiotlb_sync_single_for_device, 35 44 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+2 -2
arch/x86/xen/pci-swiotlb-xen.c
··· 12 12 13 13 static struct dma_map_ops xen_swiotlb_dma_ops = { 14 14 .mapping_error = xen_swiotlb_dma_mapping_error, 15 - .alloc_coherent = xen_swiotlb_alloc_coherent, 16 - .free_coherent = xen_swiotlb_free_coherent, 15 + .alloc = xen_swiotlb_alloc_coherent, 16 + .free = xen_swiotlb_free_coherent, 17 17 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, 18 18 .sync_single_for_device = xen_swiotlb_sync_single_for_device, 19 19 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
+6 -4
drivers/iommu/amd_iommu.c
··· 2707 2707 * The exported alloc_coherent function for dma_ops. 2708 2708 */ 2709 2709 static void *alloc_coherent(struct device *dev, size_t size, 2710 - dma_addr_t *dma_addr, gfp_t flag) 2710 + dma_addr_t *dma_addr, gfp_t flag, 2711 + struct dma_attrs *attrs) 2711 2712 { 2712 2713 unsigned long flags; 2713 2714 void *virt_addr; ··· 2766 2765 * The exported free_coherent function for dma_ops. 2767 2766 */ 2768 2767 static void free_coherent(struct device *dev, size_t size, 2769 - void *virt_addr, dma_addr_t dma_addr) 2768 + void *virt_addr, dma_addr_t dma_addr, 2769 + struct dma_attrs *attrs) 2770 2770 { 2771 2771 unsigned long flags; 2772 2772 struct protection_domain *domain; ··· 2848 2846 } 2849 2847 2850 2848 static struct dma_map_ops amd_iommu_dma_ops = { 2851 - .alloc_coherent = alloc_coherent, 2852 - .free_coherent = free_coherent, 2849 + .alloc = alloc_coherent, 2850 + .free = free_coherent, 2853 2851 .map_page = map_page, 2854 2852 .unmap_page = unmap_page, 2855 2853 .map_sg = map_sg,
+5 -4
drivers/iommu/intel-iommu.c
··· 2938 2938 } 2939 2939 2940 2940 static void *intel_alloc_coherent(struct device *hwdev, size_t size, 2941 - dma_addr_t *dma_handle, gfp_t flags) 2941 + dma_addr_t *dma_handle, gfp_t flags, 2942 + struct dma_attrs *attrs) 2942 2943 { 2943 2944 void *vaddr; 2944 2945 int order; ··· 2971 2970 } 2972 2971 2973 2972 static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 2974 - dma_addr_t dma_handle) 2973 + dma_addr_t dma_handle, struct dma_attrs *attrs) 2975 2974 { 2976 2975 int order; 2977 2976 ··· 3116 3115 } 3117 3116 3118 3117 struct dma_map_ops intel_dma_ops = { 3119 - .alloc_coherent = intel_alloc_coherent, 3120 - .free_coherent = intel_free_coherent, 3118 + .alloc = intel_alloc_coherent, 3119 + .free = intel_free_coherent, 3121 3120 .map_sg = intel_map_sg, 3122 3121 .unmap_sg = intel_unmap_sg, 3123 3122 .map_page = intel_map_page,
+3 -2
drivers/xen/swiotlb-xen.c
··· 204 204 205 205 void * 206 206 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 207 - dma_addr_t *dma_handle, gfp_t flags) 207 + dma_addr_t *dma_handle, gfp_t flags, 208 + struct dma_attrs *attrs) 208 209 { 209 210 void *ret; 210 211 int order = get_order(size); ··· 254 253 255 254 void 256 255 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 257 - dma_addr_t dev_addr) 256 + dma_addr_t dev_addr, struct dma_attrs *attrs) 258 257 { 259 258 int order = get_order(size); 260 259 phys_addr_t phys;
+4 -2
include/xen/swiotlb-xen.h
··· 7 7 8 8 extern void 9 9 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 10 - dma_addr_t *dma_handle, gfp_t flags); 10 + dma_addr_t *dma_handle, gfp_t flags, 11 + struct dma_attrs *attrs); 11 12 12 13 extern void 13 14 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, 14 - void *vaddr, dma_addr_t dma_handle); 15 + void *vaddr, dma_addr_t dma_handle, 16 + struct dma_attrs *attrs); 15 17 16 18 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, 17 19 unsigned long offset, size_t size,