Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: use unsigned long for dma_attrs

The dma-mapping core and the implementations do not change the DMA
attributes passed by pointer. Thus the pointer can point to const data.
However the attributes do not have to be a bitfield. Instead unsigned
long will do fine:

1. This is just simpler. Both in terms of reading the code and setting
attributes. Instead of initializing local attributes on the stack
and passing pointer to it to dma_set_attr(), just set the bits.

2. It brings safeness and checking for const correctness because the
attributes are passed by value.

Semantic patches for this change (at least most of them):

virtual patch
virtual context

@r@
identifier f, attrs;

@@
f(...,
- struct dma_attrs *attrs
+ unsigned long attrs
, ...)
{
...
}

@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)

and

// Options: --all-includes
virtual patch
virtual context

@r@
identifier f, attrs;
type t;

@@
t f(..., struct dma_attrs *attrs);

@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)

Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Acked-by: Mark Salter <msalter@redhat.com> [c6x]
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris]
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm]
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp]
Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core]
Acked-by: David Vrabel <david.vrabel@citrix.com> [xen]
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb]
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390]
Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32]
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc]
Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Krzysztof Kozlowski and committed by
Linus Torvalds
00085f1e 1605d271

+705 -804
+14 -19
Documentation/DMA-API.txt
··· 369 369 dma_addr_t 370 370 dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, 371 371 enum dma_data_direction dir, 372 - struct dma_attrs *attrs) 372 + unsigned long attrs) 373 373 374 374 void 375 375 dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, 376 376 size_t size, enum dma_data_direction dir, 377 - struct dma_attrs *attrs) 377 + unsigned long attrs) 378 378 379 379 int 380 380 dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 381 381 int nents, enum dma_data_direction dir, 382 - struct dma_attrs *attrs) 382 + unsigned long attrs) 383 383 384 384 void 385 385 dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 386 386 int nents, enum dma_data_direction dir, 387 - struct dma_attrs *attrs) 387 + unsigned long attrs) 388 388 389 389 The four functions above are just like the counterpart functions 390 390 without the _attrs suffixes, except that they pass an optional 391 - struct dma_attrs*. 392 - 393 - struct dma_attrs encapsulates a set of "DMA attributes". For the 394 - definition of struct dma_attrs see linux/dma-attrs.h. 391 + dma_attrs. 395 392 396 393 The interpretation of DMA attributes is architecture-specific, and 397 394 each attribute should be documented in Documentation/DMA-attributes.txt. 398 395 399 - If struct dma_attrs* is NULL, the semantics of each of these 400 - functions is identical to those of the corresponding function 396 + If dma_attrs are 0, the semantics of each of these functions 397 + is identical to those of the corresponding function 401 398 without the _attrs suffix. As a result dma_map_single_attrs() 402 399 can generally replace dma_map_single(), etc. 403 400 ··· 402 405 you could pass an attribute DMA_ATTR_FOO when mapping memory 403 406 for DMA: 404 407 405 - #include <linux/dma-attrs.h> 406 - /* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and 408 + #include <linux/dma-mapping.h> 409 + /* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and 407 410 * documented in Documentation/DMA-attributes.txt */ 408 411 ... 409 412 410 - DEFINE_DMA_ATTRS(attrs); 411 - dma_set_attr(DMA_ATTR_FOO, &attrs); 413 + unsigned long attr; 414 + attr |= DMA_ATTR_FOO; 412 415 .... 413 - n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr); 416 + n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr); 414 417 .... 415 418 416 419 Architectures that care about DMA_ATTR_FOO would check for its ··· 419 422 420 423 void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr, 421 424 size_t size, enum dma_data_direction dir, 422 - struct dma_attrs *attrs) 425 + unsigned long attrs) 423 426 { 424 427 .... 425 - int foo = dma_get_attr(DMA_ATTR_FOO, attrs); 426 - .... 427 - if (foo) 428 + if (attrs & DMA_ATTR_FOO) 428 429 /* twizzle the frobnozzle */ 429 430 .... 430 431
+1 -1
Documentation/DMA-attributes.txt
··· 2 2 ============== 3 3 4 4 This document describes the semantics of the DMA attributes that are 5 - defined in linux/dma-attrs.h. 5 + defined in linux/dma-mapping.h. 6 6 7 7 DMA_ATTR_WRITE_BARRIER 8 8 ----------------------
-2
arch/alpha/include/asm/dma-mapping.h
··· 1 1 #ifndef _ALPHA_DMA_MAPPING_H 2 2 #define _ALPHA_DMA_MAPPING_H 3 3 4 - #include <linux/dma-attrs.h> 5 - 6 4 extern struct dma_map_ops *dma_ops; 7 5 8 6 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+1 -1
arch/alpha/kernel/pci-noop.c
··· 109 109 110 110 static void *alpha_noop_alloc_coherent(struct device *dev, size_t size, 111 111 dma_addr_t *dma_handle, gfp_t gfp, 112 - struct dma_attrs *attrs) 112 + unsigned long attrs) 113 113 { 114 114 void *ret; 115 115
+6 -6
arch/alpha/kernel/pci_iommu.c
··· 349 349 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page, 350 350 unsigned long offset, size_t size, 351 351 enum dma_data_direction dir, 352 - struct dma_attrs *attrs) 352 + unsigned long attrs) 353 353 { 354 354 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 355 355 int dac_allowed; ··· 369 369 370 370 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr, 371 371 size_t size, enum dma_data_direction dir, 372 - struct dma_attrs *attrs) 372 + unsigned long attrs) 373 373 { 374 374 unsigned long flags; 375 375 struct pci_dev *pdev = alpha_gendev_to_pci(dev); ··· 433 433 434 434 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size, 435 435 dma_addr_t *dma_addrp, gfp_t gfp, 436 - struct dma_attrs *attrs) 436 + unsigned long attrs) 437 437 { 438 438 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 439 439 void *cpu_addr; ··· 478 478 479 479 static void alpha_pci_free_coherent(struct device *dev, size_t size, 480 480 void *cpu_addr, dma_addr_t dma_addr, 481 - struct dma_attrs *attrs) 481 + unsigned long attrs) 482 482 { 483 483 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 484 484 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); ··· 651 651 652 652 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg, 653 653 int nents, enum dma_data_direction dir, 654 - struct dma_attrs *attrs) 654 + unsigned long attrs) 655 655 { 656 656 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 657 657 struct scatterlist *start, *end, *out; ··· 729 729 730 730 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg, 731 731 int nents, enum dma_data_direction dir, 732 - struct dma_attrs *attrs) 732 + unsigned long attrs) 733 733 { 734 734 struct pci_dev *pdev = alpha_gendev_to_pci(dev); 735 735 unsigned long flags;
+6 -6
arch/arc/mm/dma.c
··· 22 22 23 23 24 24 static void *arc_dma_alloc(struct device *dev, size_t size, 25 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 25 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 26 26 { 27 27 unsigned long order = get_order(size); 28 28 struct page *page; ··· 46 46 * (vs. always going to memory - thus are faster) 47 47 */ 48 48 if ((is_isa_arcv2() && ioc_exists) || 49 - dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) 49 + (attrs & DMA_ATTR_NON_CONSISTENT)) 50 50 need_coh = 0; 51 51 52 52 /* ··· 90 90 } 91 91 92 92 static void arc_dma_free(struct device *dev, size_t size, void *vaddr, 93 - dma_addr_t dma_handle, struct dma_attrs *attrs) 93 + dma_addr_t dma_handle, unsigned long attrs) 94 94 { 95 95 phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle); 96 96 struct page *page = virt_to_page(paddr); 97 97 int is_non_coh = 1; 98 98 99 - is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) || 99 + is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) || 100 100 (is_isa_arcv2() && ioc_exists); 101 101 102 102 if (PageHighMem(page) || !is_non_coh) ··· 130 130 131 131 static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, 132 132 unsigned long offset, size_t size, enum dma_data_direction dir, 133 - struct dma_attrs *attrs) 133 + unsigned long attrs) 134 134 { 135 135 phys_addr_t paddr = page_to_phys(page) + offset; 136 136 _dma_cache_sync(paddr, size, dir); ··· 138 138 } 139 139 140 140 static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, 141 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 141 + int nents, enum dma_data_direction dir, unsigned long attrs) 142 142 { 143 143 struct scatterlist *s; 144 144 int i;
+2 -2
arch/arm/common/dmabounce.c
··· 310 310 */ 311 311 static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, 312 312 unsigned long offset, size_t size, enum dma_data_direction dir, 313 - struct dma_attrs *attrs) 313 + unsigned long attrs) 314 314 { 315 315 dma_addr_t dma_addr; 316 316 int ret; ··· 344 344 * should be) 345 345 */ 346 346 static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 347 - enum dma_data_direction dir, struct dma_attrs *attrs) 347 + enum dma_data_direction dir, unsigned long attrs) 348 348 { 349 349 struct safe_buffer *buf; 350 350
+6 -7
arch/arm/include/asm/dma-mapping.h
··· 5 5 6 6 #include <linux/mm_types.h> 7 7 #include <linux/scatterlist.h> 8 - #include <linux/dma-attrs.h> 9 8 #include <linux/dma-debug.h> 10 9 11 10 #include <asm/memory.h> ··· 173 174 * to be the device-viewed address. 174 175 */ 175 176 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 176 - gfp_t gfp, struct dma_attrs *attrs); 177 + gfp_t gfp, unsigned long attrs); 177 178 178 179 /** 179 180 * arm_dma_free - free memory allocated by arm_dma_alloc ··· 190 191 * during and after this call executing are illegal. 191 192 */ 192 193 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 193 - dma_addr_t handle, struct dma_attrs *attrs); 194 + dma_addr_t handle, unsigned long attrs); 194 195 195 196 /** 196 197 * arm_dma_mmap - map a coherent DMA allocation into user space ··· 207 208 */ 208 209 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 209 210 void *cpu_addr, dma_addr_t dma_addr, size_t size, 210 - struct dma_attrs *attrs); 211 + unsigned long attrs); 211 212 212 213 /* 213 214 * This can be called during early boot to increase the size of the atomic ··· 261 262 * The scatter list versions of the above methods. 262 263 */ 263 264 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, 264 - enum dma_data_direction, struct dma_attrs *attrs); 265 + enum dma_data_direction, unsigned long attrs); 265 266 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, 266 - enum dma_data_direction, struct dma_attrs *attrs); 267 + enum dma_data_direction, unsigned long attrs); 267 268 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, 268 269 enum dma_data_direction); 269 270 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, 270 271 enum dma_data_direction); 271 272 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 272 273 void *cpu_addr, dma_addr_t dma_addr, size_t size, 273 - struct dma_attrs *attrs); 274 + unsigned long attrs); 274 275 275 276 #endif /* __KERNEL__ */ 276 277 #endif
+6 -10
arch/arm/include/asm/xen/page-coherent.h
··· 2 2 #define _ASM_ARM_XEN_PAGE_COHERENT_H 3 3 4 4 #include <asm/page.h> 5 - #include <linux/dma-attrs.h> 6 5 #include <linux/dma-mapping.h> 7 6 8 7 void __xen_dma_map_page(struct device *hwdev, struct page *page, 9 8 dma_addr_t dev_addr, unsigned long offset, size_t size, 10 - enum dma_data_direction dir, struct dma_attrs *attrs); 9 + enum dma_data_direction dir, unsigned long attrs); 11 10 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 12 11 size_t size, enum dma_data_direction dir, 13 - struct dma_attrs *attrs); 12 + unsigned long attrs); 14 13 void __xen_dma_sync_single_for_cpu(struct device *hwdev, 15 14 dma_addr_t handle, size_t size, enum dma_data_direction dir); 16 15 ··· 17 18 dma_addr_t handle, size_t size, enum dma_data_direction dir); 18 19 19 20 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t flags, 21 - struct dma_attrs *attrs) 21 + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 22 22 { 23 23 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); 24 24 } 25 25 26 26 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 27 - void *cpu_addr, dma_addr_t dma_handle, 28 - struct dma_attrs *attrs) 27 + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 29 28 { 30 29 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 31 30 } 32 31 33 32 static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 34 33 dma_addr_t dev_addr, unsigned long offset, size_t size, 35 - enum dma_data_direction dir, struct dma_attrs *attrs) 34 + enum dma_data_direction dir, unsigned long attrs) 36 35 { 37 36 unsigned long page_pfn = page_to_xen_pfn(page); 38 37 unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); ··· 55 58 } 56 59 57 60 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 58 - size_t size, enum dma_data_direction dir, 59 - struct dma_attrs *attrs) 61 + size_t size, enum dma_data_direction dir, unsigned long attrs) 60 62 { 61 63 unsigned long pfn = PFN_DOWN(handle); 62 64 /*
+63 -66
arch/arm/mm/dma-mapping.c
··· 128 128 */ 129 129 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 130 130 unsigned long offset, size_t size, enum dma_data_direction dir, 131 - struct dma_attrs *attrs) 131 + unsigned long attrs) 132 132 { 133 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 133 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 134 134 __dma_page_cpu_to_dev(page, offset, size, dir); 135 135 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 136 136 } 137 137 138 138 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 139 139 unsigned long offset, size_t size, enum dma_data_direction dir, 140 - struct dma_attrs *attrs) 140 + unsigned long attrs) 141 141 { 142 142 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 143 143 } ··· 157 157 * whatever the device wrote there. 158 158 */ 159 159 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 160 - size_t size, enum dma_data_direction dir, 161 - struct dma_attrs *attrs) 160 + size_t size, enum dma_data_direction dir, unsigned long attrs) 162 161 { 163 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 162 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 164 163 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 165 164 handle & ~PAGE_MASK, size, dir); 166 165 } ··· 197 198 EXPORT_SYMBOL(arm_dma_ops); 198 199 199 200 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 200 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 201 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 201 202 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 202 - dma_addr_t handle, struct dma_attrs *attrs); 203 + dma_addr_t handle, unsigned long attrs); 203 204 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 204 205 void *cpu_addr, dma_addr_t dma_addr, size_t size, 205 - struct dma_attrs *attrs); 206 + unsigned long attrs); 206 207 207 208 struct dma_map_ops arm_coherent_dma_ops = { 208 209 .alloc = arm_coherent_dma_alloc, ··· 638 639 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 639 640 } 640 641 641 - static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 642 + static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 642 643 { 643 - prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 644 - pgprot_writecombine(prot) : 645 - pgprot_dmacoherent(prot); 644 + prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 645 + pgprot_writecombine(prot) : 646 + pgprot_dmacoherent(prot); 646 647 return prot; 647 648 } 648 649 ··· 750 751 751 752 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 752 753 gfp_t gfp, pgprot_t prot, bool is_coherent, 753 - struct dma_attrs *attrs, const void *caller) 754 + unsigned long attrs, const void *caller) 754 755 { 755 756 u64 mask = get_coherent_dma_mask(dev); 756 757 struct page *page = NULL; ··· 763 764 .gfp = gfp, 764 765 .prot = prot, 765 766 .caller = caller, 766 - .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), 767 + .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 767 768 .coherent_flag = is_coherent ? COHERENT : NORMAL, 768 769 }; 769 770 ··· 833 834 * virtual and bus address for that space. 834 835 */ 835 836 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 836 - gfp_t gfp, struct dma_attrs *attrs) 837 + gfp_t gfp, unsigned long attrs) 837 838 { 838 839 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 839 840 ··· 842 843 } 843 844 844 845 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 845 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 846 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 846 847 { 847 848 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 848 849 attrs, __builtin_return_address(0)); ··· 850 851 851 852 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 852 853 void *cpu_addr, dma_addr_t dma_addr, size_t size, 853 - struct dma_attrs *attrs) 854 + unsigned long attrs) 854 855 { 855 856 int ret = -ENXIO; 856 857 #ifdef CONFIG_MMU ··· 878 879 */ 879 880 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 880 881 void *cpu_addr, dma_addr_t dma_addr, size_t size, 881 - struct dma_attrs *attrs) 882 + unsigned long attrs) 882 883 { 883 884 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 884 885 } 885 886 886 887 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 887 888 void *cpu_addr, dma_addr_t dma_addr, size_t size, 888 - struct dma_attrs *attrs) 889 + unsigned long attrs) 889 890 { 890 891 #ifdef CONFIG_MMU 891 892 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); ··· 897 898 * Free a buffer as defined by the above mapping. 898 899 */ 899 900 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 900 - dma_addr_t handle, struct dma_attrs *attrs, 901 + dma_addr_t handle, unsigned long attrs, 901 902 bool is_coherent) 902 903 { 903 904 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); ··· 907 908 .size = PAGE_ALIGN(size), 908 909 .cpu_addr = cpu_addr, 909 910 .page = page, 910 - .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs), 911 + .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 911 912 }; 912 913 913 914 buf = arm_dma_buffer_find(cpu_addr); ··· 919 920 } 920 921 921 922 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 922 - dma_addr_t handle, struct dma_attrs *attrs) 923 + dma_addr_t handle, unsigned long attrs) 923 924 { 924 925 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 925 926 } 926 927 927 928 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 928 - dma_addr_t handle, struct dma_attrs *attrs) 929 + dma_addr_t handle, unsigned long attrs) 929 930 { 930 931 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 931 932 } 932 933 933 934 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 934 935 void *cpu_addr, dma_addr_t handle, size_t size, 935 - struct dma_attrs *attrs) 936 + unsigned long attrs) 936 937 { 937 938 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 938 939 int ret; ··· 1065 1066 * here. 1066 1067 */ 1067 1068 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1068 - enum dma_data_direction dir, struct dma_attrs *attrs) 1069 + enum dma_data_direction dir, unsigned long attrs) 1069 1070 { 1070 1071 struct dma_map_ops *ops = get_dma_ops(dev); 1071 1072 struct scatterlist *s; ··· 1099 1100 * rules concerning calls here are the same as for dma_unmap_single(). 1100 1101 */ 1101 1102 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1102 - enum dma_data_direction dir, struct dma_attrs *attrs) 1103 + enum dma_data_direction dir, unsigned long attrs) 1103 1104 { 1104 1105 struct dma_map_ops *ops = get_dma_ops(dev); 1105 1106 struct scatterlist *s; ··· 1272 1273 static const int iommu_order_array[] = { 9, 8, 4, 0 }; 1273 1274 1274 1275 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1275 - gfp_t gfp, struct dma_attrs *attrs, 1276 + gfp_t gfp, unsigned long attrs, 1276 1277 int coherent_flag) 1277 1278 { 1278 1279 struct page **pages; ··· 1288 1289 if (!pages) 1289 1290 return NULL; 1290 1291 1291 - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1292 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1292 1293 { 1293 1294 unsigned long order = get_order(size); 1294 1295 struct page *page; ··· 1306 1307 } 1307 1308 1308 1309 /* Go straight to 4K chunks if caller says it's OK. */ 1309 - if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) 1310 + if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 1310 1311 order_idx = ARRAY_SIZE(iommu_order_array) - 1; 1311 1312 1312 1313 /* ··· 1362 1363 } 1363 1364 1364 1365 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1365 - size_t size, struct dma_attrs *attrs) 1366 + size_t size, unsigned long attrs) 1366 1367 { 1367 1368 int count = size >> PAGE_SHIFT; 1368 1369 int i; 1369 1370 1370 - if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1371 + if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1371 1372 dma_release_from_contiguous(dev, pages[0], count); 1372 1373 } else { 1373 1374 for (i = 0; i < count; i++) ··· 1459 1460 return (struct page **)page; 1460 1461 } 1461 1462 1462 - static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1463 + static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1463 1464 { 1464 1465 struct vm_struct *area; 1465 1466 1466 1467 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1467 1468 return __atomic_get_pages(cpu_addr); 1468 1469 1469 - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1470 + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1470 1471 return cpu_addr; 1471 1472 1472 1473 area = find_vm_area(cpu_addr); ··· 1510 1511 } 1511 1512 1512 1513 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 1513 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs, 1514 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 1514 1515 int coherent_flag) 1515 1516 { 1516 1517 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); ··· 1541 1542 if (*handle == DMA_ERROR_CODE) 1542 1543 goto err_buffer; 1543 1544 1544 - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1545 + if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1545 1546 return pages; 1546 1547 1547 1548 addr = __iommu_alloc_remap(pages, size, gfp, prot, ··· 1559 1560 } 1560 1561 1561 1562 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1562 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1563 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1563 1564 { 1564 1565 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 1565 1566 } 1566 1567 1567 1568 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 1568 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1569 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1569 1570 { 1570 1571 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 1571 1572 } 1572 1573 1573 1574 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1574 1575 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1575 - struct dma_attrs *attrs) 1576 + unsigned long attrs) 1576 1577 { 1577 1578 unsigned long uaddr = vma->vm_start; 1578 1579 unsigned long usize = vma->vm_end - vma->vm_start; ··· 1602 1603 } 1603 1604 static int arm_iommu_mmap_attrs(struct device *dev, 1604 1605 struct vm_area_struct *vma, void *cpu_addr, 1605 - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) 1606 + dma_addr_t dma_addr, size_t size, unsigned long attrs) 1606 1607 { 1607 1608 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1608 1609 ··· 1611 1612 1612 1613 static int arm_coherent_iommu_mmap_attrs(struct device *dev, 1613 1614 struct vm_area_struct *vma, void *cpu_addr, 1614 - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) 1615 + dma_addr_t dma_addr, size_t size, unsigned long attrs) 1615 1616 { 1616 1617 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 1617 1618 } ··· 1621 1622 * Must not be called with IRQs disabled. 1622 1623 */ 1623 1624 void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1624 - dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag) 1625 + dma_addr_t handle, unsigned long attrs, int coherent_flag) 1625 1626 { 1626 1627 struct page **pages; 1627 1628 size = PAGE_ALIGN(size); ··· 1637 1638 return; 1638 1639 } 1639 1640 1640 - if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1641 + if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) { 1641 1642 dma_common_free_remap(cpu_addr, size, 1642 1643 VM_ARM_DMA_CONSISTENT | VM_USERMAP); 1643 1644 } ··· 1647 1648 } 1648 1649 1649 1650 void arm_iommu_free_attrs(struct device *dev, size_t size, 1650 - void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) 1651 + void *cpu_addr, dma_addr_t handle, unsigned long attrs) 1651 1652 { 1652 1653 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 1653 1654 } 1654 1655 1655 1656 void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 1656 - void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) 1657 + void *cpu_addr, dma_addr_t handle, unsigned long attrs) 1657 1658 { 1658 1659 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 1659 1660 } 1660 1661 1661 1662 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1662 1663 void *cpu_addr, dma_addr_t dma_addr, 1663 - size_t size, struct dma_attrs *attrs) 1664 + size_t size, unsigned long attrs) 1664 1665 { 1665 1666 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1666 1667 struct page **pages = __iommu_get_pages(cpu_addr, attrs); ··· 1698 1699 */ 1699 1700 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1700 1701 size_t size, dma_addr_t *handle, 1701 - enum dma_data_direction dir, struct dma_attrs *attrs, 1702 + enum dma_data_direction dir, unsigned long attrs, 1702 1703 bool is_coherent) 1703 1704 { 1704 1705 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); ··· 1719 1720 phys_addr_t phys = page_to_phys(sg_page(s)); 1720 1721 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1721 1722 1722 - if (!is_coherent && 1723 - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1723 + if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1724 1724 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1725 1725 1726 1726 prot = __dma_direction_to_prot(dir); ··· 1740 1742 } 1741 1743 1742 1744 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1743 - enum dma_data_direction dir, struct dma_attrs *attrs, 1745 + enum dma_data_direction dir, unsigned long attrs, 1744 1746 bool is_coherent) 1745 1747 { 1746 1748 struct scatterlist *s = sg, *dma = sg, *start = sg; ··· 1798 1800 * obtained via sg_dma_{address,length}. 1799 1801 */ 1800 1802 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1801 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1803 + int nents, enum dma_data_direction dir, unsigned long attrs) 1802 1804 { 1803 1805 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1804 1806 } ··· 1816 1818 * sg_dma_{address,length}. 1817 1819 */ 1818 1820 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1819 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1821 + int nents, enum dma_data_direction dir, unsigned long attrs) 1820 1822 { 1821 1823 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1822 1824 } 1823 1825 1824 1826 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1825 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 1826 - bool is_coherent) 1827 + int nents, enum dma_data_direction dir, 1828 + unsigned long attrs, bool is_coherent) 1827 1829 { 1828 1830 struct scatterlist *s; 1829 1831 int i; ··· 1832 1834 if (sg_dma_len(s)) 1833 1835 __iommu_remove_mapping(dev, sg_dma_address(s), 1834 1836 sg_dma_len(s)); 1835 - if (!is_coherent && 1836 - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1837 + if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1837 1838 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1838 1839 s->length, dir); 1839 1840 } ··· 1849 1852 * rules concerning calls here are the same as for dma_unmap_single(). 1850 1853 */ 1851 1854 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1852 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1855 + int nents, enum dma_data_direction dir, 1856 + unsigned long attrs) 1853 1857 { 1854 1858 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1855 1859 } ··· 1866 1868 * rules concerning calls here are the same as for dma_unmap_single(). 1867 1869 */ 1868 1870 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1869 - enum dma_data_direction dir, struct dma_attrs *attrs) 1871 + enum dma_data_direction dir, 1872 + unsigned long attrs) 1870 1873 { 1871 1874 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1872 1875 } ··· 1920 1921 */ 1921 1922 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1922 1923 unsigned long offset, size_t size, enum dma_data_direction dir, 1923 - struct dma_attrs *attrs) 1924 + unsigned long attrs) 1924 1925 { 1925 1926 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1926 1927 dma_addr_t dma_addr; ··· 1954 1955 */ 1955 1956 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1956 1957 unsigned long offset, size_t size, enum dma_data_direction dir, 1957 - struct dma_attrs *attrs) 1958 + unsigned long attrs) 1958 1959 { 1959 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1960 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1960 1961 __dma_page_cpu_to_dev(page, offset, size, dir); 1961 1962 1962 1963 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); ··· 1972 1973 * Coherent IOMMU aware version of arm_dma_unmap_page() 1973 1974 */ 1974 1975 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1975 - size_t size, enum dma_data_direction dir, 1976 - struct dma_attrs *attrs) 1976 + size_t size, enum dma_data_direction dir, unsigned long attrs) 1977 1977 { 1978 1978 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1979 1979 dma_addr_t iova = handle & PAGE_MASK; ··· 1996 1998 * IOMMU aware version of arm_dma_unmap_page() 1997 1999 */ 1998 2000 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1999 - size_t size, enum dma_data_direction dir, 2000 - struct dma_attrs *attrs) 2001 + size_t size, enum dma_data_direction dir, unsigned long attrs) 2001 2002 { 2002 2003 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 2003 2004 dma_addr_t iova = handle & PAGE_MASK; ··· 2007 2010 if (!iova) 2008 2011 return; 2009 2012 2010 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 2013 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 2011 2014 __dma_page_dev_to_cpu(page, offset, size, dir); 2012 2015 2013 2016 iommu_unmap(mapping->domain, iova, len);
+4 -4
arch/arm/xen/mm.c
··· 98 98 99 99 void __xen_dma_map_page(struct device *hwdev, struct page *page, 100 100 dma_addr_t dev_addr, unsigned long offset, size_t size, 101 - enum dma_data_direction dir, struct dma_attrs *attrs) 101 + enum dma_data_direction dir, unsigned long attrs) 102 102 { 103 103 if (is_device_dma_coherent(hwdev)) 104 104 return; 105 - if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 105 + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 106 106 return; 107 107 108 108 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); ··· 110 110 111 111 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 112 112 size_t size, enum dma_data_direction dir, 113 - struct dma_attrs *attrs) 113 + unsigned long attrs) 114 114 115 115 { 116 116 if (is_device_dma_coherent(hwdev)) 117 117 return; 118 - if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 118 + if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 119 119 return; 120 120 121 121 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
+33 -33
arch/arm64/mm/dma-mapping.c
··· 32 32 33 33 static int swiotlb __read_mostly; 34 34 35 - static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, 35 + static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot, 36 36 bool coherent) 37 37 { 38 - if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) 38 + if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE)) 39 39 return pgprot_writecombine(prot); 40 40 return prot; 41 41 } ··· 91 91 92 92 static void *__dma_alloc_coherent(struct device *dev, size_t size, 93 93 dma_addr_t *dma_handle, gfp_t flags, 94 - struct dma_attrs *attrs) 94 + unsigned long attrs) 95 95 { 96 96 if (dev == NULL) { 97 97 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); ··· 121 121 122 122 static void __dma_free_coherent(struct device *dev, size_t size, 123 123 void *vaddr, dma_addr_t dma_handle, 124 - struct dma_attrs *attrs) 124 + unsigned long attrs) 125 125 { 126 126 bool freed; 127 127 phys_addr_t paddr = dma_to_phys(dev, dma_handle); ··· 140 140 141 141 static void *__dma_alloc(struct device *dev, size_t size, 142 142 dma_addr_t *dma_handle, gfp_t flags, 143 - struct dma_attrs *attrs) 143 + unsigned long attrs) 144 144 { 145 145 struct page *page; 146 146 void *ptr, *coherent_ptr; ··· 188 188 189 189 static void __dma_free(struct device *dev, size_t size, 190 190 void *vaddr, dma_addr_t dma_handle, 191 - struct dma_attrs *attrs) 191 + unsigned long attrs) 192 192 { 193 193 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); 194 194 ··· 205 205 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page, 206 206 unsigned long offset, size_t size, 207 207 enum dma_data_direction dir, 208 - struct dma_attrs *attrs) 208 + unsigned long attrs) 209 209 { 210 210 dma_addr_t dev_addr; 211 211 ··· 219 219 220 220 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr, 221 221 size_t size, enum dma_data_direction dir, 222 - struct dma_attrs *attrs) 222 + unsigned long attrs) 223 223 { 224 224 if (!is_device_dma_coherent(dev)) 225 225 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); ··· 228 228 229 229 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 230 230 int nelems, enum dma_data_direction dir, 231 - struct dma_attrs *attrs) 231 + unsigned long attrs) 232 232 { 233 233 struct scatterlist *sg; 234 234 int i, ret; ··· 245 245 static void __swiotlb_unmap_sg_attrs(struct device *dev, 246 246 struct scatterlist *sgl, int nelems, 247 247 enum dma_data_direction dir, 248 - struct dma_attrs *attrs) 248 + unsigned long attrs) 249 249 { 250 250 struct scatterlist *sg; 251 251 int i; ··· 306 306 static int __swiotlb_mmap(struct device *dev, 307 307 struct vm_area_struct *vma, 308 308 void *cpu_addr, dma_addr_t dma_addr, size_t size, 309 - struct dma_attrs *attrs) 309 + unsigned long attrs) 310 310 { 311 311 int ret = -ENXIO; 312 312 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> ··· 333 333 334 334 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, 335 335 void *cpu_addr, dma_addr_t handle, size_t size, 336 - struct dma_attrs *attrs) 336 + unsigned long attrs) 337 337 { 338 338 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 339 339 ··· 435 435 436 436 static void *__dummy_alloc(struct device *dev, size_t size, 437 437 dma_addr_t *dma_handle, gfp_t flags, 438 - struct dma_attrs *attrs) 438 + unsigned long attrs) 439 439 { 440 440 return NULL; 441 441 } 442 442 443 443 static void __dummy_free(struct device *dev, size_t size, 444 444 void *vaddr, dma_addr_t dma_handle, 445 - struct dma_attrs *attrs) 445 + unsigned long attrs) 446 446 { 447 447 } 448 448 449 449 static int __dummy_mmap(struct device *dev, 450 450 struct vm_area_struct *vma, 451 451 void *cpu_addr, dma_addr_t dma_addr, size_t size, 452 - struct dma_attrs *attrs) 452 + unsigned long attrs) 453 453 { 454 454 return -ENXIO; 455 455 } ··· 457 457 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page, 458 458 unsigned long offset, size_t size, 459 459 enum dma_data_direction dir, 460 - struct dma_attrs *attrs) 460 + unsigned long attrs) 461 461 { 462 462 return DMA_ERROR_CODE; 463 463 } 464 464 465 465 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr, 466 466 size_t size, enum dma_data_direction dir, 467 - struct dma_attrs *attrs) 467 + unsigned long attrs) 468 468 { 469 469 } 470 470 471 471 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl, 472 472 int nelems, enum dma_data_direction dir, 473 - struct dma_attrs *attrs) 473 + unsigned long attrs) 474 474 { 475 475 return 0; 476 476 } ··· 478 478 static void __dummy_unmap_sg(struct device *dev, 479 479 struct scatterlist *sgl, int nelems, 480 480 enum dma_data_direction dir, 481 - struct dma_attrs *attrs) 481 + unsigned long attrs) 482 482 { 483 483 } 484 484 ··· 553 553 554 554 static void *__iommu_alloc_attrs(struct device *dev, size_t size, 555 555 dma_addr_t *handle, gfp_t gfp, 556 - struct dma_attrs *attrs) 556 + unsigned long attrs) 557 557 { 558 558 bool coherent = is_device_dma_coherent(dev); 559 559 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent); ··· 613 613 } 614 614 615 615 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 616 - dma_addr_t handle, struct dma_attrs *attrs) 616 + dma_addr_t handle, unsigned long attrs) 617 617 { 618 618 size_t iosize = size; 619 619 ··· 629 629 * Hence how dodgy the below logic looks... 630 630 */ 631 631 if (__in_atomic_pool(cpu_addr, size)) { 632 - iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); 632 + iommu_dma_unmap_page(dev, handle, iosize, 0, 0); 633 633 __free_from_pool(cpu_addr, size); 634 634 } else if (is_vmalloc_addr(cpu_addr)){ 635 635 struct vm_struct *area = find_vm_area(cpu_addr); ··· 639 639 iommu_dma_free(dev, area->pages, iosize, &handle); 640 640 dma_common_free_remap(cpu_addr, size, VM_USERMAP); 641 641 } else { 642 - iommu_dma_unmap_page(dev, handle, iosize, 0, NULL); 642 + iommu_dma_unmap_page(dev, handle, iosize, 0, 0); 643 643 __free_pages(virt_to_page(cpu_addr), get_order(size)); 644 644 } 645 645 } 646 646 647 647 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 648 648 void *cpu_addr, dma_addr_t dma_addr, size_t size, 649 - struct dma_attrs *attrs) 649 + unsigned long attrs) 650 650 { 651 651 struct vm_struct *area; 652 652 int ret; ··· 666 666 667 667 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 668 668 void *cpu_addr, dma_addr_t dma_addr, 669 - size_t size, struct dma_attrs *attrs) 669 + size_t size, unsigned long attrs) 670 670 { 671 671 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 672 672 struct vm_struct *area = find_vm_area(cpu_addr); ··· 707 707 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page, 708 708 unsigned long offset, size_t size, 709 709 enum dma_data_direction dir, 710 - struct dma_attrs *attrs) 710 + unsigned long attrs) 711 711 { 712 712 bool coherent = is_device_dma_coherent(dev); 713 713 int prot = dma_direction_to_prot(dir, coherent); 714 714 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); 715 715 716 716 if (!iommu_dma_mapping_error(dev, dev_addr) && 717 - !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 717 + (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 718 718 __iommu_sync_single_for_device(dev, dev_addr, size, dir); 719 719 720 720 return dev_addr; ··· 722 722 723 723 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr, 724 724 size_t size, enum dma_data_direction dir, 725 - struct dma_attrs *attrs) 725 + unsigned long attrs) 726 726 { 727 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 727 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 728 728 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); 729 729 730 730 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); ··· 760 760 761 761 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl, 762 762 int nelems, enum dma_data_direction dir, 763 - struct dma_attrs *attrs) 763 + unsigned long attrs) 764 764 { 765 765 bool coherent = is_device_dma_coherent(dev); 766 766 767 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 767 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 768 768 __iommu_sync_sg_for_device(dev, sgl, nelems, dir); 769 769 770 770 return iommu_dma_map_sg(dev, sgl, nelems, ··· 774 774 static void __iommu_unmap_sg_attrs(struct device *dev, 775 775 struct scatterlist *sgl, int nelems, 776 776 enum dma_data_direction dir, 777 - struct dma_attrs *attrs) 777 + unsigned long attrs) 778 778 { 779 - if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 779 + if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 780 780 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir); 781 781 782 782 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
+6 -6
arch/avr32/mm/dma-coherent.c
··· 99 99 } 100 100 101 101 static void *avr32_dma_alloc(struct device *dev, size_t size, 102 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 102 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 103 103 { 104 104 struct page *page; 105 105 dma_addr_t phys; ··· 109 109 return NULL; 110 110 phys = page_to_phys(page); 111 111 112 - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { 112 + if (attrs & DMA_ATTR_WRITE_COMBINE) { 113 113 /* Now, map the page into P3 with write-combining turned on */ 114 114 *handle = phys; 115 115 return __ioremap(phys, size, _PAGE_BUFFER); ··· 119 119 } 120 120 121 121 static void avr32_dma_free(struct device *dev, size_t size, 122 - void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) 122 + void *cpu_addr, dma_addr_t handle, unsigned long attrs) 123 123 { 124 124 struct page *page; 125 125 126 - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) { 126 + if (attrs & DMA_ATTR_WRITE_COMBINE) { 127 127 iounmap(cpu_addr); 128 128 129 129 page = phys_to_page(handle); ··· 142 142 143 143 static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page, 144 144 unsigned long offset, size_t size, 145 - enum dma_data_direction direction, struct dma_attrs *attrs) 145 + enum dma_data_direction direction, unsigned long attrs) 146 146 { 147 147 void *cpu_addr = page_address(page) + offset; 148 148 ··· 152 152 153 153 static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist, 154 154 int nents, enum dma_data_direction direction, 155 - struct dma_attrs *attrs) 155 + unsigned long attrs) 156 156 { 157 157 int i; 158 158 struct scatterlist *sg;
+4 -4
arch/blackfin/kernel/dma-mapping.c
··· 79 79 } 80 80 81 81 static void *bfin_dma_alloc(struct device *dev, size_t size, 82 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 82 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 83 83 { 84 84 void *ret; 85 85 ··· 94 94 } 95 95 96 96 static void bfin_dma_free(struct device *dev, size_t size, void *vaddr, 97 - dma_addr_t dma_handle, struct dma_attrs *attrs) 97 + dma_addr_t dma_handle, unsigned long attrs) 98 98 { 99 99 __free_dma_pages((unsigned long)vaddr, get_pages(size)); 100 100 } ··· 111 111 112 112 static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list, 113 113 int nents, enum dma_data_direction direction, 114 - struct dma_attrs *attrs) 114 + unsigned long attrs) 115 115 { 116 116 struct scatterlist *sg; 117 117 int i; ··· 139 139 140 140 static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page, 141 141 unsigned long offset, size_t size, enum dma_data_direction dir, 142 - struct dma_attrs *attrs) 142 + unsigned long attrs) 143 143 { 144 144 dma_addr_t handle = (dma_addr_t)(page_address(page) + offset); 145 145
+2 -2
arch/c6x/include/asm/dma-mapping.h
··· 26 26 27 27 extern void coherent_mem_init(u32 start, u32 size); 28 28 void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 29 - gfp_t gfp, struct dma_attrs *attrs); 29 + gfp_t gfp, unsigned long attrs); 30 30 void c6x_dma_free(struct device *dev, size_t size, void *vaddr, 31 - dma_addr_t dma_handle, struct dma_attrs *attrs); 31 + dma_addr_t dma_handle, unsigned long attrs); 32 32 33 33 #endif /* _ASM_C6X_DMA_MAPPING_H */
+4 -5
arch/c6x/kernel/dma.c
··· 38 38 39 39 static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page, 40 40 unsigned long offset, size_t size, enum dma_data_direction dir, 41 - struct dma_attrs *attrs) 41 + unsigned long attrs) 42 42 { 43 43 dma_addr_t handle = virt_to_phys(page_address(page) + offset); 44 44 ··· 47 47 } 48 48 49 49 static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle, 50 - size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) 50 + size_t size, enum dma_data_direction dir, unsigned long attrs) 51 51 { 52 52 c6x_dma_sync(handle, size, dir); 53 53 } 54 54 55 55 static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist, 56 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 56 + int nents, enum dma_data_direction dir, unsigned long attrs) 57 57 { 58 58 struct scatterlist *sg; 59 59 int i; ··· 67 67 } 68 68 69 69 static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 70 - int nents, enum dma_data_direction dir, 71 - struct dma_attrs *attrs) 70 + int nents, enum dma_data_direction dir, unsigned long attrs) 72 71 { 73 72 struct scatterlist *sg; 74 73 int i;
+2 -2
arch/c6x/mm/dma-coherent.c
··· 74 74 * virtual and DMA address for that space. 75 75 */ 76 76 void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 77 - gfp_t gfp, struct dma_attrs *attrs) 77 + gfp_t gfp, unsigned long attrs) 78 78 { 79 79 u32 paddr; 80 80 int order; ··· 99 99 * Free DMA coherent memory as defined by the above mapping. 100 100 */ 101 101 void c6x_dma_free(struct device *dev, size_t size, void *vaddr, 102 - dma_addr_t dma_handle, struct dma_attrs *attrs) 102 + dma_addr_t dma_handle, unsigned long attrs) 103 103 { 104 104 int order; 105 105
+4 -5
arch/cris/arch-v32/drivers/pci/dma.c
··· 17 17 #include <asm/io.h> 18 18 19 19 static void *v32_dma_alloc(struct device *dev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 20 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 21 21 { 22 22 void *ret; 23 23 ··· 37 37 } 38 38 39 39 static void v32_dma_free(struct device *dev, size_t size, void *vaddr, 40 - dma_addr_t dma_handle, struct dma_attrs *attrs) 40 + dma_addr_t dma_handle, unsigned long attrs) 41 41 { 42 42 free_pages((unsigned long)vaddr, get_order(size)); 43 43 } 44 44 45 45 static inline dma_addr_t v32_dma_map_page(struct device *dev, 46 46 struct page *page, unsigned long offset, size_t size, 47 - enum dma_data_direction direction, 48 - struct dma_attrs *attrs) 47 + enum dma_data_direction direction, unsigned long attrs) 49 48 { 50 49 return page_to_phys(page) + offset; 51 50 } 52 51 53 52 static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, 54 53 int nents, enum dma_data_direction direction, 55 - struct dma_attrs *attrs) 54 + unsigned long attrs) 56 55 { 57 56 printk("Map sg\n"); 58 57 return nents;
+4 -4
arch/frv/mb93090-mb00/pci-dma-nommu.c
··· 35 35 static LIST_HEAD(dma_alloc_list); 36 36 37 37 static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, 38 - gfp_t gfp, struct dma_attrs *attrs) 38 + gfp_t gfp, unsigned long attrs) 39 39 { 40 40 struct dma_alloc_record *new; 41 41 struct list_head *this = &dma_alloc_list; ··· 86 86 } 87 87 88 88 static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, 89 - dma_addr_t dma_handle, struct dma_attrs *attrs) 89 + dma_addr_t dma_handle, unsigned long attrs) 90 90 { 91 91 struct dma_alloc_record *rec; 92 92 unsigned long flags; ··· 107 107 108 108 static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, 109 109 int nents, enum dma_data_direction direction, 110 - struct dma_attrs *attrs) 110 + unsigned long attrs) 111 111 { 112 112 int i; 113 113 struct scatterlist *sg; ··· 124 124 125 125 static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, 126 126 unsigned long offset, size_t size, 127 - enum dma_data_direction direction, struct dma_attrs *attrs) 127 + enum dma_data_direction direction, unsigned long attrs) 128 128 { 129 129 BUG_ON(direction == DMA_NONE); 130 130 flush_dcache_page(page);
+4 -5
arch/frv/mb93090-mb00/pci-dma.c
··· 19 19 #include <asm/io.h> 20 20 21 21 static void *frv_dma_alloc(struct device *hwdev, size_t size, 22 - dma_addr_t *dma_handle, gfp_t gfp, 23 - struct dma_attrs *attrs) 22 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 24 23 { 25 24 void *ret; 26 25 ··· 31 32 } 32 33 33 34 static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr, 34 - dma_addr_t dma_handle, struct dma_attrs *attrs) 35 + dma_addr_t dma_handle, unsigned long attrs) 35 36 { 36 37 consistent_free(vaddr); 37 38 } 38 39 39 40 static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist, 40 41 int nents, enum dma_data_direction direction, 41 - struct dma_attrs *attrs) 42 + unsigned long attrs) 42 43 { 43 44 unsigned long dampr2; 44 45 void *vaddr; ··· 68 69 69 70 static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page, 70 71 unsigned long offset, size_t size, 71 - enum dma_data_direction direction, struct dma_attrs *attrs) 72 + enum dma_data_direction direction, unsigned long attrs) 72 73 { 73 74 flush_dcache_page(page); 74 75 return (dma_addr_t) page_to_phys(page) + offset;
+4 -4
arch/h8300/kernel/dma.c
··· 12 12 13 13 static void *dma_alloc(struct device *dev, size_t size, 14 14 dma_addr_t *dma_handle, gfp_t gfp, 15 - struct dma_attrs *attrs) 15 + unsigned long attrs) 16 16 { 17 17 void *ret; 18 18 ··· 32 32 33 33 static void dma_free(struct device *dev, size_t size, 34 34 void *vaddr, dma_addr_t dma_handle, 35 - struct dma_attrs *attrs) 35 + unsigned long attrs) 36 36 37 37 { 38 38 free_pages((unsigned long)vaddr, get_order(size)); ··· 41 41 static dma_addr_t map_page(struct device *dev, struct page *page, 42 42 unsigned long offset, size_t size, 43 43 enum dma_data_direction direction, 44 - struct dma_attrs *attrs) 44 + unsigned long attrs) 45 45 { 46 46 return page_to_phys(page) + offset; 47 47 } 48 48 49 49 static int map_sg(struct device *dev, struct scatterlist *sgl, 50 50 int nents, enum dma_data_direction direction, 51 - struct dma_attrs *attrs) 51 + unsigned long attrs) 52 52 { 53 53 struct scatterlist *sg; 54 54 int i;
-1
arch/hexagon/include/asm/dma-mapping.h
··· 26 26 #include <linux/mm.h> 27 27 #include <linux/scatterlist.h> 28 28 #include <linux/dma-debug.h> 29 - #include <linux/dma-attrs.h> 30 29 #include <asm/io.h> 31 30 32 31 struct device;
+4 -4
arch/hexagon/kernel/dma.c
··· 51 51 52 52 static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, 53 53 dma_addr_t *dma_addr, gfp_t flag, 54 - struct dma_attrs *attrs) 54 + unsigned long attrs) 55 55 { 56 56 void *ret; 57 57 ··· 84 84 } 85 85 86 86 static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, 87 - dma_addr_t dma_addr, struct dma_attrs *attrs) 87 + dma_addr_t dma_addr, unsigned long attrs) 88 88 { 89 89 gen_pool_free(coherent_pool, (unsigned long) vaddr, size); 90 90 } ··· 105 105 106 106 static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, 107 107 int nents, enum dma_data_direction dir, 108 - struct dma_attrs *attrs) 108 + unsigned long attrs) 109 109 { 110 110 struct scatterlist *s; 111 111 int i; ··· 172 172 static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, 173 173 unsigned long offset, size_t size, 174 174 enum dma_data_direction dir, 175 - struct dma_attrs *attrs) 175 + unsigned long attrs) 176 176 { 177 177 dma_addr_t bus = page_to_phys(page) + offset; 178 178 WARN_ON(size == 0);
+11 -11
arch/ia64/hp/common/sba_iommu.c
··· 919 919 static dma_addr_t sba_map_page(struct device *dev, struct page *page, 920 920 unsigned long poff, size_t size, 921 921 enum dma_data_direction dir, 922 - struct dma_attrs *attrs) 922 + unsigned long attrs) 923 923 { 924 924 struct ioc *ioc; 925 925 void *addr = page_address(page) + poff; ··· 1005 1005 1006 1006 static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr, 1007 1007 size_t size, enum dma_data_direction dir, 1008 - struct dma_attrs *attrs) 1008 + unsigned long attrs) 1009 1009 { 1010 1010 return sba_map_page(dev, virt_to_page(addr), 1011 1011 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs); ··· 1046 1046 * See Documentation/DMA-API-HOWTO.txt 1047 1047 */ 1048 1048 static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, 1049 - enum dma_data_direction dir, struct dma_attrs *attrs) 1049 + enum dma_data_direction dir, unsigned long attrs) 1050 1050 { 1051 1051 struct ioc *ioc; 1052 1052 #if DELAYED_RESOURCE_CNT > 0 ··· 1115 1115 } 1116 1116 1117 1117 void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, 1118 - enum dma_data_direction dir, struct dma_attrs *attrs) 1118 + enum dma_data_direction dir, unsigned long attrs) 1119 1119 { 1120 1120 sba_unmap_page(dev, iova, size, dir, attrs); 1121 1121 } ··· 1130 1130 */ 1131 1131 static void * 1132 1132 sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1133 - gfp_t flags, struct dma_attrs *attrs) 1133 + gfp_t flags, unsigned long attrs) 1134 1134 { 1135 1135 struct ioc *ioc; 1136 1136 void *addr; ··· 1175 1175 * device to map single to get an iova mapping. 1176 1176 */ 1177 1177 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, 1178 - size, 0, NULL); 1178 + size, 0, 0); 1179 1179 1180 1180 return addr; 1181 1181 } ··· 1191 1191 * See Documentation/DMA-API-HOWTO.txt 1192 1192 */ 1193 1193 static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, 1194 - dma_addr_t dma_handle, struct dma_attrs *attrs) 1194 + dma_addr_t dma_handle, unsigned long attrs) 1195 1195 { 1196 - sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); 1196 + sba_unmap_single_attrs(dev, dma_handle, size, 0, 0); 1197 1197 free_pages((unsigned long) vaddr, get_order(size)); 1198 1198 } 1199 1199 ··· 1442 1442 1443 1443 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1444 1444 int nents, enum dma_data_direction dir, 1445 - struct dma_attrs *attrs); 1445 + unsigned long attrs); 1446 1446 /** 1447 1447 * sba_map_sg - map Scatter/Gather list 1448 1448 * @dev: instance of PCI owned by the driver that's asking. ··· 1455 1455 */ 1456 1456 static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, 1457 1457 int nents, enum dma_data_direction dir, 1458 - struct dma_attrs *attrs) 1458 + unsigned long attrs) 1459 1459 { 1460 1460 struct ioc *ioc; 1461 1461 int coalesced, filled = 0; ··· 1551 1551 */ 1552 1552 static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, 1553 1553 int nents, enum dma_data_direction dir, 1554 - struct dma_attrs *attrs) 1554 + unsigned long attrs) 1555 1555 { 1556 1556 #ifdef ASSERT_PDIR_SANITY 1557 1557 struct ioc *ioc;
-1
arch/ia64/include/asm/machvec.h
··· 22 22 struct task_struct; 23 23 struct pci_dev; 24 24 struct msi_desc; 25 - struct dma_attrs; 26 25 27 26 typedef void ia64_mv_setup_t (char **); 28 27 typedef void ia64_mv_cpu_init_t (void);
+2 -2
arch/ia64/kernel/pci-swiotlb.c
··· 16 16 17 17 static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, 18 18 dma_addr_t *dma_handle, gfp_t gfp, 19 - struct dma_attrs *attrs) 19 + unsigned long attrs) 20 20 { 21 21 if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) 22 22 gfp |= GFP_DMA; ··· 25 25 26 26 static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, 27 27 void *vaddr, dma_addr_t dma_addr, 28 - struct dma_attrs *attrs) 28 + unsigned long attrs) 29 29 { 30 30 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 31 31 }
+8 -14
arch/ia64/sn/pci/pci_dma.c
··· 77 77 */ 78 78 static void *sn_dma_alloc_coherent(struct device *dev, size_t size, 79 79 dma_addr_t * dma_handle, gfp_t flags, 80 - struct dma_attrs *attrs) 80 + unsigned long attrs) 81 81 { 82 82 void *cpuaddr; 83 83 unsigned long phys_addr; ··· 138 138 * any associated IOMMU mappings. 139 139 */ 140 140 static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 141 - dma_addr_t dma_handle, struct dma_attrs *attrs) 141 + dma_addr_t dma_handle, unsigned long attrs) 142 142 { 143 143 struct pci_dev *pdev = to_pci_dev(dev); 144 144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); ··· 176 176 static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page, 177 177 unsigned long offset, size_t size, 178 178 enum dma_data_direction dir, 179 - struct dma_attrs *attrs) 179 + unsigned long attrs) 180 180 { 181 181 void *cpu_addr = page_address(page) + offset; 182 182 dma_addr_t dma_addr; 183 183 unsigned long phys_addr; 184 184 struct pci_dev *pdev = to_pci_dev(dev); 185 185 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 186 - int dmabarr; 187 - 188 - dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); 189 186 190 187 BUG_ON(!dev_is_pci(dev)); 191 188 192 189 phys_addr = __pa(cpu_addr); 193 - if (dmabarr) 190 + if (attrs & DMA_ATTR_WRITE_BARRIER) 194 191 dma_addr = provider->dma_map_consistent(pdev, phys_addr, 195 192 size, SN_DMA_ADDR_PHYS); 196 193 else ··· 215 218 */ 216 219 static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 217 220 size_t size, enum dma_data_direction dir, 218 - struct dma_attrs *attrs) 221 + unsigned long attrs) 219 222 { 220 223 struct pci_dev *pdev = to_pci_dev(dev); 221 224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); ··· 237 240 */ 238 241 static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 239 242 int nhwentries, enum dma_data_direction dir, 240 - struct dma_attrs *attrs) 243 + unsigned long attrs) 241 244 { 242 245 int i; 243 246 struct pci_dev *pdev = to_pci_dev(dev); ··· 270 273 */ 271 274 static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, 272 275 int nhwentries, enum dma_data_direction dir, 273 - struct dma_attrs *attrs) 276 + unsigned long attrs) 274 277 { 275 278 unsigned long phys_addr; 276 279 struct scatterlist *saved_sg = sgl, *sg; 277 280 struct pci_dev *pdev = to_pci_dev(dev); 278 281 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); 279 282 int i; 280 - int dmabarr; 281 - 282 - dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); 283 283 284 284 BUG_ON(!dev_is_pci(dev)); 285 285 ··· 286 292 for_each_sg(sgl, sg, nhwentries, i) { 287 293 dma_addr_t dma_addr; 288 294 phys_addr = SG_ENT_PHYS_ADDRESS(sg); 289 - if (dmabarr) 295 + if (attrs & DMA_ATTR_WRITE_BARRIER) 290 296 dma_addr = provider->dma_map_consistent(pdev, 291 297 phys_addr, 292 298 sg->length,
+6 -6
arch/m68k/kernel/dma.c
··· 19 19 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE) 20 20 21 21 static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 22 - gfp_t flag, struct dma_attrs *attrs) 22 + gfp_t flag, unsigned long attrs) 23 23 { 24 24 struct page *page, **map; 25 25 pgprot_t pgprot; ··· 62 62 } 63 63 64 64 static void m68k_dma_free(struct device *dev, size_t size, void *addr, 65 - dma_addr_t handle, struct dma_attrs *attrs) 65 + dma_addr_t handle, unsigned long attrs) 66 66 { 67 67 pr_debug("dma_free_coherent: %p, %x\n", addr, handle); 68 68 vfree(addr); ··· 73 73 #include <asm/cacheflush.h> 74 74 75 75 static void *m68k_dma_alloc(struct device *dev, size_t size, 76 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 76 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 77 77 { 78 78 void *ret; 79 79 /* ignore region specifiers */ ··· 91 91 } 92 92 93 93 static void m68k_dma_free(struct device *dev, size_t size, void *vaddr, 94 - dma_addr_t dma_handle, struct dma_attrs *attrs) 94 + dma_addr_t dma_handle, unsigned long attrs) 95 95 { 96 96 free_pages((unsigned long)vaddr, get_order(size)); 97 97 } ··· 130 130 131 131 static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page, 132 132 unsigned long offset, size_t size, enum dma_data_direction dir, 133 - struct dma_attrs *attrs) 133 + unsigned long attrs) 134 134 { 135 135 dma_addr_t handle = page_to_phys(page) + offset; 136 136 ··· 139 139 } 140 140 141 141 static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist, 142 - int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 142 + int nents, enum dma_data_direction dir, unsigned long attrs) 143 143 { 144 144 int i; 145 145 struct scatterlist *sg;
+8 -8
arch/metag/kernel/dma.c
··· 172 172 * virtual and bus address for that space. 173 173 */ 174 174 static void *metag_dma_alloc(struct device *dev, size_t size, 175 - dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 175 + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 176 176 { 177 177 struct page *page; 178 178 struct metag_vm_region *c; ··· 268 268 * free a page as defined by the above mapping. 269 269 */ 270 270 static void metag_dma_free(struct device *dev, size_t size, void *vaddr, 271 - dma_addr_t dma_handle, struct dma_attrs *attrs) 271 + dma_addr_t dma_handle, unsigned long attrs) 272 272 { 273 273 struct metag_vm_region *c; 274 274 unsigned long flags, addr; ··· 331 331 332 332 static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma, 333 333 void *cpu_addr, dma_addr_t dma_addr, size_t size, 334 - struct dma_attrs *attrs) 334 + unsigned long attrs) 335 335 { 336 336 unsigned long flags, user_size, kern_size; 337 337 struct metag_vm_region *c; 338 338 int ret = -ENXIO; 339 339 340 - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) 340 + if (attrs & DMA_ATTR_WRITE_COMBINE) 341 341 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 342 342 else 343 343 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ··· 482 482 483 483 static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page, 484 484 unsigned long offset, size_t size, 485 - enum dma_data_direction direction, struct dma_attrs *attrs) 485 + enum dma_data_direction direction, unsigned long attrs) 486 486 { 487 487 dma_sync_for_device((void *)(page_to_phys(page) + offset), size, 488 488 direction); ··· 491 491 492 492 static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 493 493 size_t size, enum dma_data_direction direction, 494 - struct dma_attrs *attrs) 494 + unsigned long attrs) 495 495 { 496 496 dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 497 497 } 498 498 499 499 static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist, 500 500 int nents, enum dma_data_direction direction, 501 - struct dma_attrs *attrs) 501 + unsigned long attrs) 502 502 { 503 503 struct scatterlist *sg; 504 504 int i; ··· 516 516 517 517 static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 518 518 int nhwentries, enum dma_data_direction direction, 519 - struct dma_attrs *attrs) 519 + unsigned long attrs) 520 520 { 521 521 struct scatterlist *sg; 522 522 int i;
-1
arch/microblaze/include/asm/dma-mapping.h
··· 25 25 #include <linux/mm.h> 26 26 #include <linux/scatterlist.h> 27 27 #include <linux/dma-debug.h> 28 - #include <linux/dma-attrs.h> 29 28 #include <asm/io.h> 30 29 #include <asm/cacheflush.h> 31 30
+6 -6
arch/microblaze/kernel/dma.c
··· 17 17 18 18 static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 19 19 dma_addr_t *dma_handle, gfp_t flag, 20 - struct dma_attrs *attrs) 20 + unsigned long attrs) 21 21 { 22 22 #ifdef NOT_COHERENT_CACHE 23 23 return consistent_alloc(flag, size, dma_handle); ··· 42 42 43 43 static void dma_direct_free_coherent(struct device *dev, size_t size, 44 44 void *vaddr, dma_addr_t dma_handle, 45 - struct dma_attrs *attrs) 45 + unsigned long attrs) 46 46 { 47 47 #ifdef NOT_COHERENT_CACHE 48 48 consistent_free(size, vaddr); ··· 53 53 54 54 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 55 55 int nents, enum dma_data_direction direction, 56 - struct dma_attrs *attrs) 56 + unsigned long attrs) 57 57 { 58 58 struct scatterlist *sg; 59 59 int i; ··· 78 78 unsigned long offset, 79 79 size_t size, 80 80 enum dma_data_direction direction, 81 - struct dma_attrs *attrs) 81 + unsigned long attrs) 82 82 { 83 83 __dma_sync(page_to_phys(page) + offset, size, direction); 84 84 return page_to_phys(page) + offset; ··· 88 88 dma_addr_t dma_address, 89 89 size_t size, 90 90 enum dma_data_direction direction, 91 - struct dma_attrs *attrs) 91 + unsigned long attrs) 92 92 { 93 93 /* There is not necessary to do cache cleanup 94 94 * ··· 157 157 static 158 158 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 159 159 void *cpu_addr, dma_addr_t handle, size_t size, 160 - struct dma_attrs *attrs) 160 + unsigned long attrs) 161 161 { 162 162 #ifdef CONFIG_MMU 163 163 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+4 -4
arch/mips/cavium-octeon/dma-octeon.c
··· 125 125 126 126 static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page, 127 127 unsigned long offset, size_t size, enum dma_data_direction direction, 128 - struct dma_attrs *attrs) 128 + unsigned long attrs) 129 129 { 130 130 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, 131 131 direction, attrs); ··· 135 135 } 136 136 137 137 static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg, 138 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) 138 + int nents, enum dma_data_direction direction, unsigned long attrs) 139 139 { 140 140 int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs); 141 141 mb(); ··· 157 157 } 158 158 159 159 static void *octeon_dma_alloc_coherent(struct device *dev, size_t size, 160 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 160 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 161 161 { 162 162 void *ret; 163 163 ··· 189 189 } 190 190 191 191 static void octeon_dma_free_coherent(struct device *dev, size_t size, 192 - void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) 192 + void *vaddr, dma_addr_t dma_handle, unsigned long attrs) 193 193 { 194 194 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 195 195 }
+5 -5
arch/mips/loongson64/common/dma-swiotlb.c
··· 10 10 #include <dma-coherence.h> 11 11 12 12 static void *loongson_dma_alloc_coherent(struct device *dev, size_t size, 13 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 13 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 14 14 { 15 15 void *ret; 16 16 ··· 41 41 } 42 42 43 43 static void loongson_dma_free_coherent(struct device *dev, size_t size, 44 - void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) 44 + void *vaddr, dma_addr_t dma_handle, unsigned long attrs) 45 45 { 46 46 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 47 47 } ··· 49 49 static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page, 50 50 unsigned long offset, size_t size, 51 51 enum dma_data_direction dir, 52 - struct dma_attrs *attrs) 52 + unsigned long attrs) 53 53 { 54 54 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size, 55 55 dir, attrs); ··· 59 59 60 60 static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg, 61 61 int nents, enum dma_data_direction dir, 62 - struct dma_attrs *attrs) 62 + unsigned long attrs) 63 63 { 64 - int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL); 64 + int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0); 65 65 mb(); 66 66 67 67 return r;
+10 -10
arch/mips/mm/dma-default.c
··· 131 131 } 132 132 133 133 static void *mips_dma_alloc_coherent(struct device *dev, size_t size, 134 - dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs) 134 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 135 135 { 136 136 void *ret; 137 137 struct page *page = NULL; ··· 141 141 * XXX: seems like the coherent and non-coherent implementations could 142 142 * be consolidated. 143 143 */ 144 - if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) 144 + if (attrs & DMA_ATTR_NON_CONSISTENT) 145 145 return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp); 146 146 147 147 gfp = massage_gfp_flags(dev, gfp); ··· 176 176 } 177 177 178 178 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, 179 - dma_addr_t dma_handle, struct dma_attrs *attrs) 179 + dma_addr_t dma_handle, unsigned long attrs) 180 180 { 181 181 unsigned long addr = (unsigned long) vaddr; 182 182 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 183 183 struct page *page = NULL; 184 184 185 - if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { 185 + if (attrs & DMA_ATTR_NON_CONSISTENT) { 186 186 mips_dma_free_noncoherent(dev, size, vaddr, dma_handle); 187 187 return; 188 188 } ··· 200 200 201 201 static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma, 202 202 void *cpu_addr, dma_addr_t dma_addr, size_t size, 203 - struct dma_attrs *attrs) 203 + unsigned long attrs) 204 204 { 205 205 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 206 206 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; ··· 214 214 215 215 pfn = page_to_pfn(virt_to_page((void *)addr)); 216 216 217 - if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) 217 + if (attrs & DMA_ATTR_WRITE_COMBINE) 218 218 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 219 219 else 220 220 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); ··· 291 291 } 292 292 293 293 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 294 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) 294 + size_t size, enum dma_data_direction direction, unsigned long attrs) 295 295 { 296 296 if (cpu_needs_post_dma_flush(dev)) 297 297 __dma_sync(dma_addr_to_page(dev, dma_addr), ··· 301 301 } 302 302 303 303 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist, 304 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) 304 + int nents, enum dma_data_direction direction, unsigned long attrs) 305 305 { 306 306 int i; 307 307 struct scatterlist *sg; ··· 322 322 323 323 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page, 324 324 unsigned long offset, size_t size, enum dma_data_direction direction, 325 - struct dma_attrs *attrs) 325 + unsigned long attrs) 326 326 { 327 327 if (!plat_device_is_coherent(dev)) 328 328 __dma_sync(page, offset, size, direction); ··· 332 332 333 333 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 334 334 int nhwentries, enum dma_data_direction direction, 335 - struct dma_attrs *attrs) 335 + unsigned long attrs) 336 336 { 337 337 int i; 338 338 struct scatterlist *sg;
+2 -2
arch/mips/netlogic/common/nlm-dma.c
··· 45 45 static char *nlm_swiotlb; 46 46 47 47 static void *nlm_dma_alloc_coherent(struct device *dev, size_t size, 48 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 48 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 49 49 { 50 50 /* ignore region specifiers */ 51 51 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); ··· 62 62 } 63 63 64 64 static void nlm_dma_free_coherent(struct device *dev, size_t size, 65 - void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) 65 + void *vaddr, dma_addr_t dma_handle, unsigned long attrs) 66 66 { 67 67 swiotlb_free_coherent(dev, size, vaddr, dma_handle); 68 68 }
+4 -4
arch/mn10300/mm/dma-alloc.c
··· 21 21 static unsigned long pci_sram_allocated = 0xbc000000; 22 22 23 23 static void *mn10300_dma_alloc(struct device *dev, size_t size, 24 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 24 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 25 25 { 26 26 unsigned long addr; 27 27 void *ret; ··· 63 63 } 64 64 65 65 static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr, 66 - dma_addr_t dma_handle, struct dma_attrs *attrs) 66 + dma_addr_t dma_handle, unsigned long attrs) 67 67 { 68 68 unsigned long addr = (unsigned long) vaddr & ~0x20000000; 69 69 ··· 75 75 76 76 static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist, 77 77 int nents, enum dma_data_direction direction, 78 - struct dma_attrs *attrs) 78 + unsigned long attrs) 79 79 { 80 80 struct scatterlist *sg; 81 81 int i; ··· 92 92 93 93 static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page, 94 94 unsigned long offset, size_t size, 95 - enum dma_data_direction direction, struct dma_attrs *attrs) 95 + enum dma_data_direction direction, unsigned long attrs) 96 96 { 97 97 return page_to_bus(page) + offset; 98 98 }
+6 -6
arch/nios2/mm/dma-mapping.c
··· 59 59 } 60 60 61 61 static void *nios2_dma_alloc(struct device *dev, size_t size, 62 - dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 62 + dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 63 63 { 64 64 void *ret; 65 65 ··· 84 84 } 85 85 86 86 static void nios2_dma_free(struct device *dev, size_t size, void *vaddr, 87 - dma_addr_t dma_handle, struct dma_attrs *attrs) 87 + dma_addr_t dma_handle, unsigned long attrs) 88 88 { 89 89 unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr); 90 90 ··· 93 93 94 94 static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg, 95 95 int nents, enum dma_data_direction direction, 96 - struct dma_attrs *attrs) 96 + unsigned long attrs) 97 97 { 98 98 int i; 99 99 ··· 113 113 static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page, 114 114 unsigned long offset, size_t size, 115 115 enum dma_data_direction direction, 116 - struct dma_attrs *attrs) 116 + unsigned long attrs) 117 117 { 118 118 void *addr = page_address(page) + offset; 119 119 ··· 123 123 124 124 static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 125 125 size_t size, enum dma_data_direction direction, 126 - struct dma_attrs *attrs) 126 + unsigned long attrs) 127 127 { 128 128 __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction); 129 129 } 130 130 131 131 static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 132 132 int nhwentries, enum dma_data_direction direction, 133 - struct dma_attrs *attrs) 133 + unsigned long attrs) 134 134 { 135 135 void *addr; 136 136 int i;
+10 -11
arch/openrisc/kernel/dma.c
··· 22 22 #include <linux/dma-mapping.h> 23 23 #include <linux/dma-debug.h> 24 24 #include <linux/export.h> 25 - #include <linux/dma-attrs.h> 26 25 27 26 #include <asm/cpuinfo.h> 28 27 #include <asm/spr_defs.h> ··· 82 83 static void * 83 84 or1k_dma_alloc(struct device *dev, size_t size, 84 85 dma_addr_t *dma_handle, gfp_t gfp, 85 - struct dma_attrs *attrs) 86 + unsigned long attrs) 86 87 { 87 88 unsigned long va; 88 89 void *page; ··· 100 101 101 102 va = (unsigned long)page; 102 103 103 - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { 104 + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { 104 105 /* 105 106 * We need to iterate through the pages, clearing the dcache for 106 107 * them and setting the cache-inhibit bit. ··· 116 117 117 118 static void 118 119 or1k_dma_free(struct device *dev, size_t size, void *vaddr, 119 - dma_addr_t dma_handle, struct dma_attrs *attrs) 120 + dma_addr_t dma_handle, unsigned long attrs) 120 121 { 121 122 unsigned long va = (unsigned long)vaddr; 122 123 struct mm_walk walk = { ··· 124 125 .mm = &init_mm 125 126 }; 126 127 127 - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) { 128 + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) { 128 129 /* walk_page_range shouldn't be able to fail here */ 129 130 WARN_ON(walk_page_range(va, va + size, &walk)); 130 131 } ··· 136 137 or1k_map_page(struct device *dev, struct page *page, 137 138 unsigned long offset, size_t size, 138 139 enum dma_data_direction dir, 139 - struct dma_attrs *attrs) 140 + unsigned long attrs) 140 141 { 141 142 unsigned long cl; 142 143 dma_addr_t addr = page_to_phys(page) + offset; ··· 169 170 static void 170 171 or1k_unmap_page(struct device *dev, dma_addr_t dma_handle, 171 172 size_t size, enum dma_data_direction dir, 172 - struct dma_attrs *attrs) 173 + unsigned long attrs) 173 174 { 174 175 /* Nothing special to do here... */ 175 176 } ··· 177 178 static int 178 179 or1k_map_sg(struct device *dev, struct scatterlist *sg, 179 180 int nents, enum dma_data_direction dir, 180 - struct dma_attrs *attrs) 181 + unsigned long attrs) 181 182 { 182 183 struct scatterlist *s; 183 184 int i; 184 185 185 186 for_each_sg(sg, s, nents, i) { 186 187 s->dma_address = or1k_map_page(dev, sg_page(s), s->offset, 187 - s->length, dir, NULL); 188 + s->length, dir, 0); 188 189 } 189 190 190 191 return nents; ··· 193 194 static void 194 195 or1k_unmap_sg(struct device *dev, struct scatterlist *sg, 195 196 int nents, enum dma_data_direction dir, 196 - struct dma_attrs *attrs) 197 + unsigned long attrs) 197 198 { 198 199 struct scatterlist *s; 199 200 int i; 200 201 201 202 for_each_sg(sg, s, nents, i) { 202 - or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL); 203 + or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0); 203 204 } 204 205 } 205 206
+9 -9
arch/parisc/kernel/pci-dma.c
··· 414 414 __initcall(pcxl_dma_init); 415 415 416 416 static void *pa11_dma_alloc(struct device *dev, size_t size, 417 - dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) 417 + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 418 418 { 419 419 unsigned long vaddr; 420 420 unsigned long paddr; ··· 441 441 } 442 442 443 443 static void pa11_dma_free(struct device *dev, size_t size, void *vaddr, 444 - dma_addr_t dma_handle, struct dma_attrs *attrs) 444 + dma_addr_t dma_handle, unsigned long attrs) 445 445 { 446 446 int order; 447 447 ··· 454 454 455 455 static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page, 456 456 unsigned long offset, size_t size, 457 - enum dma_data_direction direction, struct dma_attrs *attrs) 457 + enum dma_data_direction direction, unsigned long attrs) 458 458 { 459 459 void *addr = page_address(page) + offset; 460 460 BUG_ON(direction == DMA_NONE); ··· 465 465 466 466 static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 467 467 size_t size, enum dma_data_direction direction, 468 - struct dma_attrs *attrs) 468 + unsigned long attrs) 469 469 { 470 470 BUG_ON(direction == DMA_NONE); 471 471 ··· 484 484 485 485 static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, 486 486 int nents, enum dma_data_direction direction, 487 - struct dma_attrs *attrs) 487 + unsigned long attrs) 488 488 { 489 489 int i; 490 490 struct scatterlist *sg; ··· 503 503 504 504 static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 505 505 int nents, enum dma_data_direction direction, 506 - struct dma_attrs *attrs) 506 + unsigned long attrs) 507 507 { 508 508 int i; 509 509 struct scatterlist *sg; ··· 577 577 }; 578 578 579 579 static void *pcx_dma_alloc(struct device *dev, size_t size, 580 - dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) 580 + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 581 581 { 582 582 void *addr; 583 583 584 - if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) 584 + if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) 585 585 return NULL; 586 586 587 587 addr = (void *)__get_free_pages(flag, get_order(size)); ··· 592 592 } 593 593 594 594 static void pcx_dma_free(struct device *dev, size_t size, void *vaddr, 595 - dma_addr_t iova, struct dma_attrs *attrs) 595 + dma_addr_t iova, unsigned long attrs) 596 596 { 597 597 free_pages((unsigned long)vaddr, get_order(size)); 598 598 return;
+3 -4
arch/powerpc/include/asm/dma-mapping.h
··· 13 13 /* need struct page definitions */ 14 14 #include <linux/mm.h> 15 15 #include <linux/scatterlist.h> 16 - #include <linux/dma-attrs.h> 17 16 #include <linux/dma-debug.h> 18 17 #include <asm/io.h> 19 18 #include <asm/swiotlb.h> ··· 24 25 /* Some dma direct funcs must be visible for use in other dma_ops */ 25 26 extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size, 26 27 dma_addr_t *dma_handle, gfp_t flag, 27 - struct dma_attrs *attrs); 28 + unsigned long attrs); 28 29 extern void __dma_direct_free_coherent(struct device *dev, size_t size, 29 30 void *vaddr, dma_addr_t dma_handle, 30 - struct dma_attrs *attrs); 31 + unsigned long attrs); 31 32 extern int dma_direct_mmap_coherent(struct device *dev, 32 33 struct vm_area_struct *vma, 33 34 void *cpu_addr, dma_addr_t handle, 34 - size_t size, struct dma_attrs *attrs); 35 + size_t size, unsigned long attrs); 35 36 36 37 #ifdef CONFIG_NOT_COHERENT_CACHE 37 38 /*
+5 -5
arch/powerpc/include/asm/iommu.h
··· 53 53 long index, long npages, 54 54 unsigned long uaddr, 55 55 enum dma_data_direction direction, 56 - struct dma_attrs *attrs); 56 + unsigned long attrs); 57 57 #ifdef CONFIG_IOMMU_API 58 58 /* 59 59 * Exchanges existing TCE with new TCE plus direction bits; ··· 248 248 struct scatterlist *sglist, int nelems, 249 249 unsigned long mask, 250 250 enum dma_data_direction direction, 251 - struct dma_attrs *attrs); 251 + unsigned long attrs); 252 252 extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, 253 253 struct scatterlist *sglist, 254 254 int nelems, 255 255 enum dma_data_direction direction, 256 - struct dma_attrs *attrs); 256 + unsigned long attrs); 257 257 258 258 extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 259 259 size_t size, dma_addr_t *dma_handle, ··· 264 264 struct page *page, unsigned long offset, 265 265 size_t size, unsigned long mask, 266 266 enum dma_data_direction direction, 267 - struct dma_attrs *attrs); 267 + unsigned long attrs); 268 268 extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 269 269 size_t size, enum dma_data_direction direction, 270 - struct dma_attrs *attrs); 270 + unsigned long attrs); 271 271 272 272 extern void iommu_init_early_pSeries(void); 273 273 extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
+6 -6
arch/powerpc/kernel/dma-iommu.c
··· 18 18 */ 19 19 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 20 20 dma_addr_t *dma_handle, gfp_t flag, 21 - struct dma_attrs *attrs) 21 + unsigned long attrs) 22 22 { 23 23 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 24 24 dma_handle, dev->coherent_dma_mask, flag, ··· 27 27 28 28 static void dma_iommu_free_coherent(struct device *dev, size_t size, 29 29 void *vaddr, dma_addr_t dma_handle, 30 - struct dma_attrs *attrs) 30 + unsigned long attrs) 31 31 { 32 32 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); 33 33 } ··· 40 40 static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, 41 41 unsigned long offset, size_t size, 42 42 enum dma_data_direction direction, 43 - struct dma_attrs *attrs) 43 + unsigned long attrs) 44 44 { 45 45 return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, 46 46 size, device_to_mask(dev), direction, attrs); ··· 49 49 50 50 static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 51 51 size_t size, enum dma_data_direction direction, 52 - struct dma_attrs *attrs) 52 + unsigned long attrs) 53 53 { 54 54 iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, 55 55 attrs); ··· 58 58 59 59 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 60 60 int nelems, enum dma_data_direction direction, 61 - struct dma_attrs *attrs) 61 + unsigned long attrs) 62 62 { 63 63 return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems, 64 64 device_to_mask(dev), direction, attrs); ··· 66 66 67 67 static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, 68 68 int nelems, enum dma_data_direction direction, 69 - struct dma_attrs *attrs) 69 + unsigned long attrs) 70 70 { 71 71 ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems, 72 72 direction, attrs);
+9 -9
arch/powerpc/kernel/dma.c
··· 64 64 65 65 void *__dma_direct_alloc_coherent(struct device *dev, size_t size, 66 66 dma_addr_t *dma_handle, gfp_t flag, 67 - struct dma_attrs *attrs) 67 + unsigned long attrs) 68 68 { 69 69 void *ret; 70 70 #ifdef CONFIG_NOT_COHERENT_CACHE ··· 121 121 122 122 void __dma_direct_free_coherent(struct device *dev, size_t size, 123 123 void *vaddr, dma_addr_t dma_handle, 124 - struct dma_attrs *attrs) 124 + unsigned long attrs) 125 125 { 126 126 #ifdef CONFIG_NOT_COHERENT_CACHE 127 127 __dma_free_coherent(size, vaddr); ··· 132 132 133 133 static void *dma_direct_alloc_coherent(struct device *dev, size_t size, 134 134 dma_addr_t *dma_handle, gfp_t flag, 135 - struct dma_attrs *attrs) 135 + unsigned long attrs) 136 136 { 137 137 struct iommu_table *iommu; 138 138 ··· 156 156 157 157 static void dma_direct_free_coherent(struct device *dev, size_t size, 158 158 void *vaddr, dma_addr_t dma_handle, 159 - struct dma_attrs *attrs) 159 + unsigned long attrs) 160 160 { 161 161 struct iommu_table *iommu; 162 162 ··· 177 177 178 178 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 179 179 void *cpu_addr, dma_addr_t handle, size_t size, 180 - struct dma_attrs *attrs) 180 + unsigned long attrs) 181 181 { 182 182 unsigned long pfn; 183 183 ··· 195 195 196 196 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, 197 197 int nents, enum dma_data_direction direction, 198 - struct dma_attrs *attrs) 198 + unsigned long attrs) 199 199 { 200 200 struct scatterlist *sg; 201 201 int i; ··· 211 211 212 212 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, 213 213 int nents, enum dma_data_direction direction, 214 - struct dma_attrs *attrs) 214 + unsigned long attrs) 215 215 { 216 216 } 217 217 ··· 232 232 unsigned long offset, 233 233 size_t size, 234 234 enum dma_data_direction dir, 235 - struct dma_attrs *attrs) 235 + unsigned long attrs) 236 236 { 237 237 BUG_ON(dir == DMA_NONE); 238 238 __dma_sync_page(page, offset, size, dir); ··· 243 243 dma_addr_t dma_address, 244 244 size_t size, 245 245 enum dma_data_direction direction, 246 - struct dma_attrs *attrs) 246 + unsigned long attrs) 247 247 { 248 248 } 249 249
+6 -6
arch/powerpc/kernel/ibmebus.c
··· 65 65 size_t size, 66 66 dma_addr_t *dma_handle, 67 67 gfp_t flag, 68 - struct dma_attrs *attrs) 68 + unsigned long attrs) 69 69 { 70 70 void *mem; 71 71 ··· 78 78 static void ibmebus_free_coherent(struct device *dev, 79 79 size_t size, void *vaddr, 80 80 dma_addr_t dma_handle, 81 - struct dma_attrs *attrs) 81 + unsigned long attrs) 82 82 { 83 83 kfree(vaddr); 84 84 } ··· 88 88 unsigned long offset, 89 89 size_t size, 90 90 enum dma_data_direction direction, 91 - struct dma_attrs *attrs) 91 + unsigned long attrs) 92 92 { 93 93 return (dma_addr_t)(page_address(page) + offset); 94 94 } ··· 97 97 dma_addr_t dma_addr, 98 98 size_t size, 99 99 enum dma_data_direction direction, 100 - struct dma_attrs *attrs) 100 + unsigned long attrs) 101 101 { 102 102 return; 103 103 } ··· 105 105 static int ibmebus_map_sg(struct device *dev, 106 106 struct scatterlist *sgl, 107 107 int nents, enum dma_data_direction direction, 108 - struct dma_attrs *attrs) 108 + unsigned long attrs) 109 109 { 110 110 struct scatterlist *sg; 111 111 int i; ··· 121 121 static void ibmebus_unmap_sg(struct device *dev, 122 122 struct scatterlist *sg, 123 123 int nents, enum dma_data_direction direction, 124 - struct dma_attrs *attrs) 124 + unsigned long attrs) 125 125 { 126 126 return; 127 127 }
+6 -6
arch/powerpc/kernel/iommu.c
··· 307 307 void *page, unsigned int npages, 308 308 enum dma_data_direction direction, 309 309 unsigned long mask, unsigned int align_order, 310 - struct dma_attrs *attrs) 310 + unsigned long attrs) 311 311 { 312 312 unsigned long entry; 313 313 dma_addr_t ret = DMA_ERROR_CODE; ··· 431 431 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 432 432 struct scatterlist *sglist, int nelems, 433 433 unsigned long mask, enum dma_data_direction direction, 434 - struct dma_attrs *attrs) 434 + unsigned long attrs) 435 435 { 436 436 dma_addr_t dma_next = 0, dma_addr; 437 437 struct scatterlist *s, *outs, *segstart; ··· 574 574 575 575 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, 576 576 int nelems, enum dma_data_direction direction, 577 - struct dma_attrs *attrs) 577 + unsigned long attrs) 578 578 { 579 579 struct scatterlist *sg; 580 580 ··· 753 753 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 754 754 struct page *page, unsigned long offset, size_t size, 755 755 unsigned long mask, enum dma_data_direction direction, 756 - struct dma_attrs *attrs) 756 + unsigned long attrs) 757 757 { 758 758 dma_addr_t dma_handle = DMA_ERROR_CODE; 759 759 void *vaddr; ··· 790 790 791 791 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 792 792 size_t size, enum dma_data_direction direction, 793 - struct dma_attrs *attrs) 793 + unsigned long attrs) 794 794 { 795 795 unsigned int npages; 796 796 ··· 845 845 nio_pages = size >> tbl->it_page_shift; 846 846 io_order = get_iommu_order(size, tbl); 847 847 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 848 - mask >> tbl->it_page_shift, io_order, NULL); 848 + mask >> tbl->it_page_shift, io_order, 0); 849 849 if (mapping == DMA_ERROR_CODE) { 850 850 free_pages((unsigned long)ret, order); 851 851 return NULL;
+6 -6
arch/powerpc/kernel/vio.c
··· 482 482 483 483 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, 484 484 dma_addr_t *dma_handle, gfp_t flag, 485 - struct dma_attrs *attrs) 485 + unsigned long attrs) 486 486 { 487 487 struct vio_dev *viodev = to_vio_dev(dev); 488 488 void *ret; ··· 503 503 504 504 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, 505 505 void *vaddr, dma_addr_t dma_handle, 506 - struct dma_attrs *attrs) 506 + unsigned long attrs) 507 507 { 508 508 struct vio_dev *viodev = to_vio_dev(dev); 509 509 ··· 515 515 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, 516 516 unsigned long offset, size_t size, 517 517 enum dma_data_direction direction, 518 - struct dma_attrs *attrs) 518 + unsigned long attrs) 519 519 { 520 520 struct vio_dev *viodev = to_vio_dev(dev); 521 521 struct iommu_table *tbl; ··· 539 539 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 540 540 size_t size, 541 541 enum dma_data_direction direction, 542 - struct dma_attrs *attrs) 542 + unsigned long attrs) 543 543 { 544 544 struct vio_dev *viodev = to_vio_dev(dev); 545 545 struct iommu_table *tbl; ··· 552 552 553 553 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 554 554 int nelems, enum dma_data_direction direction, 555 - struct dma_attrs *attrs) 555 + unsigned long attrs) 556 556 { 557 557 struct vio_dev *viodev = to_vio_dev(dev); 558 558 struct iommu_table *tbl; ··· 588 588 static void vio_dma_iommu_unmap_sg(struct device *dev, 589 589 struct scatterlist *sglist, int nelems, 590 590 enum dma_data_direction direction, 591 - struct dma_attrs *attrs) 591 + unsigned long attrs) 592 592 { 593 593 struct vio_dev *viodev = to_vio_dev(dev); 594 594 struct iommu_table *tbl;
+14 -14
arch/powerpc/platforms/cell/iommu.c
··· 166 166 167 167 static int tce_build_cell(struct iommu_table *tbl, long index, long npages, 168 168 unsigned long uaddr, enum dma_data_direction direction, 169 - struct dma_attrs *attrs) 169 + unsigned long attrs) 170 170 { 171 171 int i; 172 172 unsigned long *io_pte, base_pte; ··· 193 193 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M | 194 194 CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask); 195 195 #endif 196 - if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))) 196 + if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING)) 197 197 base_pte &= ~CBE_IOPTE_SO_RW; 198 198 199 199 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); ··· 526 526 527 527 __set_bit(0, window->table.it_map); 528 528 tce_build_cell(&window->table, window->table.it_offset, 1, 529 - (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL); 529 + (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0); 530 530 531 531 return window; 532 532 } ··· 572 572 573 573 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, 574 574 dma_addr_t *dma_handle, gfp_t flag, 575 - struct dma_attrs *attrs) 575 + unsigned long attrs) 576 576 { 577 577 if (iommu_fixed_is_weak) 578 578 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), ··· 586 586 587 587 static void dma_fixed_free_coherent(struct device *dev, size_t size, 588 588 void *vaddr, dma_addr_t dma_handle, 589 - struct dma_attrs *attrs) 589 + unsigned long attrs) 590 590 { 591 591 if (iommu_fixed_is_weak) 592 592 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, ··· 598 598 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, 599 599 unsigned long offset, size_t size, 600 600 enum dma_data_direction direction, 601 - struct dma_attrs *attrs) 601 + unsigned long attrs) 602 602 { 603 - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 603 + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 604 604 return dma_direct_ops.map_page(dev, page, offset, size, 605 605 direction, attrs); 606 606 else ··· 611 611 612 612 static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr, 613 613 size_t size, enum dma_data_direction direction, 614 - struct dma_attrs *attrs) 614 + unsigned long attrs) 615 615 { 616 - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 616 + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 617 617 dma_direct_ops.unmap_page(dev, dma_addr, size, direction, 618 618 attrs); 619 619 else ··· 623 623 624 624 static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg, 625 625 int nents, enum dma_data_direction direction, 626 - struct dma_attrs *attrs) 626 + unsigned long attrs) 627 627 { 628 - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 628 + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 629 629 return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs); 630 630 else 631 631 return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg, ··· 635 635 636 636 static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg, 637 637 int nents, enum dma_data_direction direction, 638 - struct dma_attrs *attrs) 638 + unsigned long attrs) 639 639 { 640 - if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)) 640 + if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING)) 641 641 dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs); 642 642 else 643 643 ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, ··· 1162 1162 pciep = of_find_node_by_type(NULL, "pcie-endpoint"); 1163 1163 1164 1164 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0)) 1165 - iommu_fixed_is_weak = 1; 1165 + iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING; 1166 1166 1167 1167 of_node_put(pciep); 1168 1168
+1 -1
arch/powerpc/platforms/pasemi/iommu.c
··· 88 88 static int iobmap_build(struct iommu_table *tbl, long index, 89 89 long npages, unsigned long uaddr, 90 90 enum dma_data_direction direction, 91 - struct dma_attrs *attrs) 91 + unsigned long attrs) 92 92 { 93 93 u32 *ip; 94 94 u32 rpn;
+4 -4
arch/powerpc/platforms/powernv/npu-dma.c
··· 73 73 74 74 static void *dma_npu_alloc(struct device *dev, size_t size, 75 75 dma_addr_t *dma_handle, gfp_t flag, 76 - struct dma_attrs *attrs) 76 + unsigned long attrs) 77 77 { 78 78 NPU_DMA_OP_UNSUPPORTED(); 79 79 return NULL; ··· 81 81 82 82 static void dma_npu_free(struct device *dev, size_t size, 83 83 void *vaddr, dma_addr_t dma_handle, 84 - struct dma_attrs *attrs) 84 + unsigned long attrs) 85 85 { 86 86 NPU_DMA_OP_UNSUPPORTED(); 87 87 } ··· 89 89 static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, 90 90 unsigned long offset, size_t size, 91 91 enum dma_data_direction direction, 92 - struct dma_attrs *attrs) 92 + unsigned long attrs) 93 93 { 94 94 NPU_DMA_OP_UNSUPPORTED(); 95 95 return 0; ··· 97 97 98 98 static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, 99 99 int nelems, enum dma_data_direction direction, 100 - struct dma_attrs *attrs) 100 + unsigned long attrs) 101 101 { 102 102 NPU_DMA_OP_UNSUPPORTED(); 103 103 return 0;
+2 -2
arch/powerpc/platforms/powernv/pci-ioda.c
··· 1806 1806 static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index, 1807 1807 long npages, unsigned long uaddr, 1808 1808 enum dma_data_direction direction, 1809 - struct dma_attrs *attrs) 1809 + unsigned long attrs) 1810 1810 { 1811 1811 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, 1812 1812 attrs); ··· 1950 1950 static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, 1951 1951 long npages, unsigned long uaddr, 1952 1952 enum dma_data_direction direction, 1953 - struct dma_attrs *attrs) 1953 + unsigned long attrs) 1954 1954 { 1955 1955 int ret = pnv_tce_build(tbl, index, npages, uaddr, direction, 1956 1956 attrs);
+1 -1
arch/powerpc/platforms/powernv/pci.c
··· 704 704 705 705 int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 706 706 unsigned long uaddr, enum dma_data_direction direction, 707 - struct dma_attrs *attrs) 707 + unsigned long attrs) 708 708 { 709 709 u64 proto_tce = iommu_direction_to_tce_perm(direction); 710 710 u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
+1 -1
arch/powerpc/platforms/powernv/pci.h
··· 181 181 extern struct pci_ops pnv_pci_ops; 182 182 extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages, 183 183 unsigned long uaddr, enum dma_data_direction direction, 184 - struct dma_attrs *attrs); 184 + unsigned long attrs); 185 185 extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages); 186 186 extern int pnv_tce_xchg(struct iommu_table *tbl, long index, 187 187 unsigned long *hpa, enum dma_data_direction *direction);
+9 -9
arch/powerpc/platforms/ps3/system-bus.c
··· 516 516 */ 517 517 static void * ps3_alloc_coherent(struct device *_dev, size_t size, 518 518 dma_addr_t *dma_handle, gfp_t flag, 519 - struct dma_attrs *attrs) 519 + unsigned long attrs) 520 520 { 521 521 int result; 522 522 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); ··· 553 553 } 554 554 555 555 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, 556 - dma_addr_t dma_handle, struct dma_attrs *attrs) 556 + dma_addr_t dma_handle, unsigned long attrs) 557 557 { 558 558 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 559 559 ··· 569 569 570 570 static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, 571 571 unsigned long offset, size_t size, enum dma_data_direction direction, 572 - struct dma_attrs *attrs) 572 + unsigned long attrs) 573 573 { 574 574 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 575 575 int result; ··· 592 592 static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, 593 593 unsigned long offset, size_t size, 594 594 enum dma_data_direction direction, 595 - struct dma_attrs *attrs) 595 + unsigned long attrs) 596 596 { 597 597 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 598 598 int result; ··· 626 626 } 627 627 628 628 static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, 629 - size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) 629 + size_t size, enum dma_data_direction direction, unsigned long attrs) 630 630 { 631 631 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 632 632 int result; ··· 640 640 } 641 641 642 642 static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl, 643 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) 643 + int nents, enum dma_data_direction direction, unsigned long attrs) 644 644 { 645 645 #if defined(CONFIG_PS3_DYNAMIC_DMA) 646 646 BUG_ON("do"); ··· 670 670 static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg, 671 671 int nents, 672 672 enum dma_data_direction direction, 673 - struct dma_attrs *attrs) 673 + unsigned long attrs) 674 674 { 675 675 BUG(); 676 676 return 0; 677 677 } 678 678 679 679 static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg, 680 - int nents, enum dma_data_direction direction, struct dma_attrs *attrs) 680 + int nents, enum dma_data_direction direction, unsigned long attrs) 681 681 { 682 682 #if defined(CONFIG_PS3_DYNAMIC_DMA) 683 683 BUG_ON("do"); ··· 686 686 687 687 static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg, 688 688 int nents, enum dma_data_direction direction, 689 - struct dma_attrs *attrs) 689 + unsigned long attrs) 690 690 { 691 691 BUG(); 692 692 }
+3 -3
arch/powerpc/platforms/pseries/iommu.c
··· 123 123 static int tce_build_pSeries(struct iommu_table *tbl, long index, 124 124 long npages, unsigned long uaddr, 125 125 enum dma_data_direction direction, 126 - struct dma_attrs *attrs) 126 + unsigned long attrs) 127 127 { 128 128 u64 proto_tce; 129 129 __be64 *tcep, *tces; ··· 173 173 static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, 174 174 long npages, unsigned long uaddr, 175 175 enum dma_data_direction direction, 176 - struct dma_attrs *attrs) 176 + unsigned long attrs) 177 177 { 178 178 u64 rc = 0; 179 179 u64 proto_tce, tce; ··· 216 216 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, 217 217 long npages, unsigned long uaddr, 218 218 enum dma_data_direction direction, 219 - struct dma_attrs *attrs) 219 + unsigned long attrs) 220 220 { 221 221 u64 rc = 0; 222 222 u64 proto_tce;
+1 -1
arch/powerpc/sysdev/dart_iommu.c
··· 185 185 static int dart_build(struct iommu_table *tbl, long index, 186 186 long npages, unsigned long uaddr, 187 187 enum dma_data_direction direction, 188 - struct dma_attrs *attrs) 188 + unsigned long attrs) 189 189 { 190 190 unsigned int *dp, *orig_dp; 191 191 unsigned int rpn;
-1
arch/s390/include/asm/dma-mapping.h
··· 5 5 #include <linux/types.h> 6 6 #include <linux/mm.h> 7 7 #include <linux/scatterlist.h> 8 - #include <linux/dma-attrs.h> 9 8 #include <linux/dma-debug.h> 10 9 #include <linux/io.h> 11 10
+12 -11
arch/s390/pci/pci_dma.c
··· 286 286 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page, 287 287 unsigned long offset, size_t size, 288 288 enum dma_data_direction direction, 289 - struct dma_attrs *attrs) 289 + unsigned long attrs) 290 290 { 291 291 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 292 292 unsigned long nr_pages, iommu_page_index; ··· 332 332 333 333 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr, 334 334 size_t size, enum dma_data_direction direction, 335 - struct dma_attrs *attrs) 335 + unsigned long attrs) 336 336 { 337 337 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 338 338 unsigned long iommu_page_index; ··· 355 355 356 356 static void *s390_dma_alloc(struct device *dev, size_t size, 357 357 dma_addr_t *dma_handle, gfp_t flag, 358 - struct dma_attrs *attrs) 358 + unsigned long attrs) 359 359 { 360 360 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 361 361 struct page *page; ··· 370 370 pa = page_to_phys(page); 371 371 memset((void *) pa, 0, size); 372 372 373 - map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL); 373 + map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); 374 374 if (dma_mapping_error(dev, map)) { 375 375 free_pages(pa, get_order(size)); 376 376 return NULL; ··· 384 384 385 385 static void s390_dma_free(struct device *dev, size_t size, 386 386 void *pa, dma_addr_t dma_handle, 387 - struct dma_attrs *attrs) 387 + unsigned long attrs) 388 388 { 389 389 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); 390 390 391 391 size = PAGE_ALIGN(size); 392 392 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); 393 - s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); 393 + s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0); 394 394 free_pages((unsigned long) pa, get_order(size)); 395 395 } 396 396 397 397 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg, 398 398 int nr_elements, enum dma_data_direction dir, 399 - struct dma_attrs *attrs) 399 + unsigned long attrs) 400 400 { 401 401 int mapped_elements = 0; 402 402 struct scatterlist *s; ··· 405 405 for_each_sg(sg, s, nr_elements, i) { 406 406 struct page *page = sg_page(s); 407 407 s->dma_address = s390_dma_map_pages(dev, page, s->offset, 408 - s->length, dir, NULL); 408 + s->length, dir, 0); 409 409 if (!dma_mapping_error(dev, s->dma_address)) { 410 410 s->dma_length = s->length; 411 411 mapped_elements++; ··· 419 419 for_each_sg(sg, s, mapped_elements, i) { 420 420 if (s->dma_address) 421 421 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, 422 - dir, NULL); 422 + dir, 0); 423 423 s->dma_address = 0; 424 424 s->dma_length = 0; 425 425 } ··· 429 429 430 430 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 431 431 int nr_elements, enum dma_data_direction dir, 432 - struct dma_attrs *attrs) 432 + unsigned long attrs) 433 433 { 434 434 struct scatterlist *s; 435 435 int i; 436 436 437 437 for_each_sg(sg, s, nr_elements, i) { 438 - s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL); 438 + s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, 439 + 0); 439 440 s->dma_address = 0; 440 441 s->dma_length = 0; 441 442 }
+2 -2
arch/sh/include/asm/dma-mapping.h
··· 17 17 /* arch/sh/mm/consistent.c */ 18 18 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 19 19 dma_addr_t *dma_addr, gfp_t flag, 20 - struct dma_attrs *attrs); 20 + unsigned long attrs); 21 21 extern void dma_generic_free_coherent(struct device *dev, size_t size, 22 22 void *vaddr, dma_addr_t dma_handle, 23 - struct dma_attrs *attrs); 23 + unsigned long attrs); 24 24 25 25 #endif /* __ASM_SH_DMA_MAPPING_H */
+2 -2
arch/sh/kernel/dma-nommu.c
··· 13 13 static dma_addr_t nommu_map_page(struct device *dev, struct page *page, 14 14 unsigned long offset, size_t size, 15 15 enum dma_data_direction dir, 16 - struct dma_attrs *attrs) 16 + unsigned long attrs) 17 17 { 18 18 dma_addr_t addr = page_to_phys(page) + offset; 19 19 ··· 25 25 26 26 static int nommu_map_sg(struct device *dev, struct scatterlist *sg, 27 27 int nents, enum dma_data_direction dir, 28 - struct dma_attrs *attrs) 28 + unsigned long attrs) 29 29 { 30 30 struct scatterlist *s; 31 31 int i;
+2 -2
arch/sh/mm/consistent.c
··· 34 34 35 35 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 36 36 dma_addr_t *dma_handle, gfp_t gfp, 37 - struct dma_attrs *attrs) 37 + unsigned long attrs) 38 38 { 39 39 void *ret, *ret_nocache; 40 40 int order = get_order(size); ··· 66 66 67 67 void dma_generic_free_coherent(struct device *dev, size_t size, 68 68 void *vaddr, dma_addr_t dma_handle, 69 - struct dma_attrs *attrs) 69 + unsigned long attrs) 70 70 { 71 71 int order = get_order(size); 72 72 unsigned long pfn = dma_handle >> PAGE_SHIFT;
+6 -6
arch/sparc/kernel/iommu.c
··· 196 196 197 197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size, 198 198 dma_addr_t *dma_addrp, gfp_t gfp, 199 - struct dma_attrs *attrs) 199 + unsigned long attrs) 200 200 { 201 201 unsigned long order, first_page; 202 202 struct iommu *iommu; ··· 245 245 246 246 static void dma_4u_free_coherent(struct device *dev, size_t size, 247 247 void *cpu, dma_addr_t dvma, 248 - struct dma_attrs *attrs) 248 + unsigned long attrs) 249 249 { 250 250 struct iommu *iommu; 251 251 unsigned long order, npages; ··· 263 263 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page, 264 264 unsigned long offset, size_t sz, 265 265 enum dma_data_direction direction, 266 - struct dma_attrs *attrs) 266 + unsigned long attrs) 267 267 { 268 268 struct iommu *iommu; 269 269 struct strbuf *strbuf; ··· 385 385 386 386 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr, 387 387 size_t sz, enum dma_data_direction direction, 388 - struct dma_attrs *attrs) 388 + unsigned long attrs) 389 389 { 390 390 struct iommu *iommu; 391 391 struct strbuf *strbuf; ··· 431 431 432 432 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 433 433 int nelems, enum dma_data_direction direction, 434 - struct dma_attrs *attrs) 434 + unsigned long attrs) 435 435 { 436 436 struct scatterlist *s, *outs, *segstart; 437 437 unsigned long flags, handle, prot, ctx; ··· 607 607 608 608 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 609 609 int nelems, enum dma_data_direction direction, 610 - struct dma_attrs *attrs) 610 + unsigned long attrs) 611 611 { 612 612 unsigned long flags, ctx; 613 613 struct scatterlist *sg;
+12 -12
arch/sparc/kernel/ioport.c
··· 260 260 */ 261 261 static void *sbus_alloc_coherent(struct device *dev, size_t len, 262 262 dma_addr_t *dma_addrp, gfp_t gfp, 263 - struct dma_attrs *attrs) 263 + unsigned long attrs) 264 264 { 265 265 struct platform_device *op = to_platform_device(dev); 266 266 unsigned long len_total = PAGE_ALIGN(len); ··· 315 315 } 316 316 317 317 static void sbus_free_coherent(struct device *dev, size_t n, void *p, 318 - dma_addr_t ba, struct dma_attrs *attrs) 318 + dma_addr_t ba, unsigned long attrs) 319 319 { 320 320 struct resource *res; 321 321 struct page *pgv; ··· 355 355 static dma_addr_t sbus_map_page(struct device *dev, struct page *page, 356 356 unsigned long offset, size_t len, 357 357 enum dma_data_direction dir, 358 - struct dma_attrs *attrs) 358 + unsigned long attrs) 359 359 { 360 360 void *va = page_address(page) + offset; 361 361 ··· 371 371 } 372 372 373 373 static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n, 374 - enum dma_data_direction dir, struct dma_attrs *attrs) 374 + enum dma_data_direction dir, unsigned long attrs) 375 375 { 376 376 mmu_release_scsi_one(dev, ba, n); 377 377 } 378 378 379 379 static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, 380 - enum dma_data_direction dir, struct dma_attrs *attrs) 380 + enum dma_data_direction dir, unsigned long attrs) 381 381 { 382 382 mmu_get_scsi_sgl(dev, sg, n); 383 383 return n; 384 384 } 385 385 386 386 static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n, 387 - enum dma_data_direction dir, struct dma_attrs *attrs) 387 + enum dma_data_direction dir, unsigned long attrs) 388 388 { 389 389 mmu_release_scsi_sgl(dev, sg, n); 390 390 } ··· 429 429 */ 430 430 static void *pci32_alloc_coherent(struct device *dev, size_t len, 431 431 dma_addr_t *pba, gfp_t gfp, 432 - struct dma_attrs *attrs) 432 + unsigned long attrs) 433 433 { 434 434 unsigned long len_total = PAGE_ALIGN(len); 435 435 void *va; ··· 482 482 * past this call are illegal. 483 483 */ 484 484 static void pci32_free_coherent(struct device *dev, size_t n, void *p, 485 - dma_addr_t ba, struct dma_attrs *attrs) 485 + dma_addr_t ba, unsigned long attrs) 486 486 { 487 487 struct resource *res; 488 488 ··· 518 518 static dma_addr_t pci32_map_page(struct device *dev, struct page *page, 519 519 unsigned long offset, size_t size, 520 520 enum dma_data_direction dir, 521 - struct dma_attrs *attrs) 521 + unsigned long attrs) 522 522 { 523 523 /* IIep is write-through, not flushing. */ 524 524 return page_to_phys(page) + offset; 525 525 } 526 526 527 527 static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, 528 - enum dma_data_direction dir, struct dma_attrs *attrs) 528 + enum dma_data_direction dir, unsigned long attrs) 529 529 { 530 530 if (dir != PCI_DMA_TODEVICE) 531 531 dma_make_coherent(ba, PAGE_ALIGN(size)); ··· 548 548 */ 549 549 static int pci32_map_sg(struct device *device, struct scatterlist *sgl, 550 550 int nents, enum dma_data_direction dir, 551 - struct dma_attrs *attrs) 551 + unsigned long attrs) 552 552 { 553 553 struct scatterlist *sg; 554 554 int n; ··· 567 567 */ 568 568 static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, 569 569 int nents, enum dma_data_direction dir, 570 - struct dma_attrs *attrs) 570 + unsigned long attrs) 571 571 { 572 572 struct scatterlist *sg; 573 573 int n;
+6 -6
arch/sparc/kernel/pci_sun4v.c
··· 130 130 131 131 static void *dma_4v_alloc_coherent(struct device *dev, size_t size, 132 132 dma_addr_t *dma_addrp, gfp_t gfp, 133 - struct dma_attrs *attrs) 133 + unsigned long attrs) 134 134 { 135 135 unsigned long flags, order, first_page, npages, n; 136 136 struct iommu *iommu; ··· 213 213 } 214 214 215 215 static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, 216 - dma_addr_t dvma, struct dma_attrs *attrs) 216 + dma_addr_t dvma, unsigned long attrs) 217 217 { 218 218 struct pci_pbm_info *pbm; 219 219 struct iommu *iommu; ··· 235 235 static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, 236 236 unsigned long offset, size_t sz, 237 237 enum dma_data_direction direction, 238 - struct dma_attrs *attrs) 238 + unsigned long attrs) 239 239 { 240 240 struct iommu *iommu; 241 241 unsigned long flags, npages, oaddr; ··· 294 294 295 295 static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, 296 296 size_t sz, enum dma_data_direction direction, 297 - struct dma_attrs *attrs) 297 + unsigned long attrs) 298 298 { 299 299 struct pci_pbm_info *pbm; 300 300 struct iommu *iommu; ··· 322 322 323 323 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, 324 324 int nelems, enum dma_data_direction direction, 325 - struct dma_attrs *attrs) 325 + unsigned long attrs) 326 326 { 327 327 struct scatterlist *s, *outs, *segstart; 328 328 unsigned long flags, handle, prot; ··· 466 466 467 467 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, 468 468 int nelems, enum dma_data_direction direction, 469 - struct dma_attrs *attrs) 469 + unsigned long attrs) 470 470 { 471 471 struct pci_pbm_info *pbm; 472 472 struct scatterlist *sg;
+14 -14
arch/tile/kernel/pci-dma.c
··· 34 34 35 35 static void *tile_dma_alloc_coherent(struct device *dev, size_t size, 36 36 dma_addr_t *dma_handle, gfp_t gfp, 37 - struct dma_attrs *attrs) 37 + unsigned long attrs) 38 38 { 39 39 u64 dma_mask = (dev && dev->coherent_dma_mask) ? 40 40 dev->coherent_dma_mask : DMA_BIT_MASK(32); ··· 78 78 */ 79 79 static void tile_dma_free_coherent(struct device *dev, size_t size, 80 80 void *vaddr, dma_addr_t dma_handle, 81 - struct dma_attrs *attrs) 81 + unsigned long attrs) 82 82 { 83 83 homecache_free_pages((unsigned long)vaddr, get_order(size)); 84 84 } ··· 202 202 203 203 static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist, 204 204 int nents, enum dma_data_direction direction, 205 - struct dma_attrs *attrs) 205 + unsigned long attrs) 206 206 { 207 207 struct scatterlist *sg; 208 208 int i; ··· 224 224 225 225 static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 226 226 int nents, enum dma_data_direction direction, 227 - struct dma_attrs *attrs) 227 + unsigned long attrs) 228 228 { 229 229 struct scatterlist *sg; 230 230 int i; ··· 240 240 static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page, 241 241 unsigned long offset, size_t size, 242 242 enum dma_data_direction direction, 243 - struct dma_attrs *attrs) 243 + unsigned long attrs) 244 244 { 245 245 BUG_ON(!valid_dma_direction(direction)); 246 246 ··· 252 252 253 253 static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 254 254 size_t size, enum dma_data_direction direction, 255 - struct dma_attrs *attrs) 255 + unsigned long attrs) 256 256 { 257 257 BUG_ON(!valid_dma_direction(direction)); 258 258 ··· 343 343 344 344 static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size, 345 345 dma_addr_t *dma_handle, gfp_t gfp, 346 - struct dma_attrs *attrs) 346 + unsigned long attrs) 347 347 { 348 348 int node = dev_to_node(dev); 349 349 int order = get_order(size); ··· 368 368 */ 369 369 static void tile_pci_dma_free_coherent(struct device *dev, size_t size, 370 370 void *vaddr, dma_addr_t dma_handle, 371 - struct dma_attrs *attrs) 371 + unsigned long attrs) 372 372 { 373 373 homecache_free_pages((unsigned long)vaddr, get_order(size)); 374 374 } 375 375 376 376 static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist, 377 377 int nents, enum dma_data_direction direction, 378 - struct dma_attrs *attrs) 378 + unsigned long attrs) 379 379 { 380 380 struct scatterlist *sg; 381 381 int i; ··· 400 400 static void tile_pci_dma_unmap_sg(struct device *dev, 401 401 struct scatterlist *sglist, int nents, 402 402 enum dma_data_direction direction, 403 - struct dma_attrs *attrs) 403 + unsigned long attrs) 404 404 { 405 405 struct scatterlist *sg; 406 406 int i; ··· 416 416 static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page, 417 417 unsigned long offset, size_t size, 418 418 enum dma_data_direction direction, 419 - struct dma_attrs *attrs) 419 + unsigned long attrs) 420 420 { 421 421 BUG_ON(!valid_dma_direction(direction)); 422 422 ··· 429 429 static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 430 430 size_t size, 431 431 enum dma_data_direction direction, 432 - struct dma_attrs *attrs) 432 + unsigned long attrs) 433 433 { 434 434 BUG_ON(!valid_dma_direction(direction)); 435 435 ··· 531 531 #ifdef CONFIG_SWIOTLB 532 532 static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size, 533 533 dma_addr_t *dma_handle, gfp_t gfp, 534 - struct dma_attrs *attrs) 534 + unsigned long attrs) 535 535 { 536 536 gfp |= GFP_DMA; 537 537 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); ··· 539 539 540 540 static void tile_swiotlb_free_coherent(struct device *dev, size_t size, 541 541 void *vaddr, dma_addr_t dma_addr, 542 - struct dma_attrs *attrs) 542 + unsigned long attrs) 543 543 { 544 544 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 545 545 }
+2 -2
arch/unicore32/mm/dma-swiotlb.c
··· 19 19 20 20 static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size, 21 21 dma_addr_t *dma_handle, gfp_t flags, 22 - struct dma_attrs *attrs) 22 + unsigned long attrs) 23 23 { 24 24 return swiotlb_alloc_coherent(dev, size, dma_handle, flags); 25 25 } 26 26 27 27 static void unicore_swiotlb_free_coherent(struct device *dev, size_t size, 28 28 void *vaddr, dma_addr_t dma_addr, 29 - struct dma_attrs *attrs) 29 + unsigned long attrs) 30 30 { 31 31 swiotlb_free_coherent(dev, size, vaddr, dma_addr); 32 32 }
+2 -3
arch/x86/include/asm/dma-mapping.h
··· 9 9 #include <linux/kmemcheck.h> 10 10 #include <linux/scatterlist.h> 11 11 #include <linux/dma-debug.h> 12 - #include <linux/dma-attrs.h> 13 12 #include <asm/io.h> 14 13 #include <asm/swiotlb.h> 15 14 #include <linux/dma-contiguous.h> ··· 47 48 48 49 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, 49 50 dma_addr_t *dma_addr, gfp_t flag, 50 - struct dma_attrs *attrs); 51 + unsigned long attrs); 51 52 52 53 extern void dma_generic_free_coherent(struct device *dev, size_t size, 53 54 void *vaddr, dma_addr_t dma_addr, 54 - struct dma_attrs *attrs); 55 + unsigned long attrs); 55 56 56 57 #ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 57 58 extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
+2 -2
arch/x86/include/asm/swiotlb.h
··· 31 31 32 32 extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 33 33 dma_addr_t *dma_handle, gfp_t flags, 34 - struct dma_attrs *attrs); 34 + unsigned long attrs); 35 35 extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, 36 36 void *vaddr, dma_addr_t dma_addr, 37 - struct dma_attrs *attrs); 37 + unsigned long attrs); 38 38 39 39 #endif /* _ASM_X86_SWIOTLB_H */
+4 -5
arch/x86/include/asm/xen/page-coherent.h
··· 2 2 #define _ASM_X86_XEN_PAGE_COHERENT_H 3 3 4 4 #include <asm/page.h> 5 - #include <linux/dma-attrs.h> 6 5 #include <linux/dma-mapping.h> 7 6 8 7 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 9 8 dma_addr_t *dma_handle, gfp_t flags, 10 - struct dma_attrs *attrs) 9 + unsigned long attrs) 11 10 { 12 11 void *vstart = (void*)__get_free_pages(flags, get_order(size)); 13 12 *dma_handle = virt_to_phys(vstart); ··· 15 16 16 17 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 17 18 void *cpu_addr, dma_addr_t dma_handle, 18 - struct dma_attrs *attrs) 19 + unsigned long attrs) 19 20 { 20 21 free_pages((unsigned long) cpu_addr, get_order(size)); 21 22 } 22 23 23 24 static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 24 25 dma_addr_t dev_addr, unsigned long offset, size_t size, 25 - enum dma_data_direction dir, struct dma_attrs *attrs) { } 26 + enum dma_data_direction dir, unsigned long attrs) { } 26 27 27 28 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28 29 size_t size, enum dma_data_direction dir, 29 - struct dma_attrs *attrs) { } 30 + unsigned long attrs) { } 30 31 31 32 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 32 33 dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
+10 -10
arch/x86/kernel/amd_gart_64.c
··· 241 241 static dma_addr_t gart_map_page(struct device *dev, struct page *page, 242 242 unsigned long offset, size_t size, 243 243 enum dma_data_direction dir, 244 - struct dma_attrs *attrs) 244 + unsigned long attrs) 245 245 { 246 246 unsigned long bus; 247 247 phys_addr_t paddr = page_to_phys(page) + offset; ··· 263 263 */ 264 264 static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, 265 265 size_t size, enum dma_data_direction dir, 266 - struct dma_attrs *attrs) 266 + unsigned long attrs) 267 267 { 268 268 unsigned long iommu_page; 269 269 int npages; ··· 285 285 * Wrapper for pci_unmap_single working with scatterlists. 286 286 */ 287 287 static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 288 - enum dma_data_direction dir, struct dma_attrs *attrs) 288 + enum dma_data_direction dir, unsigned long attrs) 289 289 { 290 290 struct scatterlist *s; 291 291 int i; ··· 293 293 for_each_sg(sg, s, nents, i) { 294 294 if (!s->dma_length || !s->length) 295 295 break; 296 - gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL); 296 + gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0); 297 297 } 298 298 } 299 299 ··· 315 315 addr = dma_map_area(dev, addr, s->length, dir, 0); 316 316 if (addr == bad_dma_addr) { 317 317 if (i > 0) 318 - gart_unmap_sg(dev, sg, i, dir, NULL); 318 + gart_unmap_sg(dev, sg, i, dir, 0); 319 319 nents = 0; 320 320 sg[0].dma_length = 0; 321 321 break; ··· 386 386 * Merge chunks that have page aligned sizes into a continuous mapping. 387 387 */ 388 388 static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, 389 - enum dma_data_direction dir, struct dma_attrs *attrs) 389 + enum dma_data_direction dir, unsigned long attrs) 390 390 { 391 391 struct scatterlist *s, *ps, *start_sg, *sgmap; 392 392 int need = 0, nextneed, i, out, start; ··· 456 456 457 457 error: 458 458 flush_gart(); 459 - gart_unmap_sg(dev, sg, out, dir, NULL); 459 + gart_unmap_sg(dev, sg, out, dir, 0); 460 460 461 461 /* When it was forced or merged try again in a dumb way */ 462 462 if (force_iommu || iommu_merge) { ··· 476 476 /* allocate and map a coherent mapping */ 477 477 static void * 478 478 gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, 479 - gfp_t flag, struct dma_attrs *attrs) 479 + gfp_t flag, unsigned long attrs) 480 480 { 481 481 dma_addr_t paddr; 482 482 unsigned long align_mask; ··· 508 508 /* free a coherent mapping */ 509 509 static void 510 510 gart_free_coherent(struct device *dev, size_t size, void *vaddr, 511 - dma_addr_t dma_addr, struct dma_attrs *attrs) 511 + dma_addr_t dma_addr, unsigned long attrs) 512 512 { 513 - gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); 513 + gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); 514 514 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); 515 515 } 516 516
+7 -7
arch/x86/kernel/pci-calgary_64.c
··· 340 340 341 341 static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist, 342 342 int nelems,enum dma_data_direction dir, 343 - struct dma_attrs *attrs) 343 + unsigned long attrs) 344 344 { 345 345 struct iommu_table *tbl = find_iommu_table(dev); 346 346 struct scatterlist *s; ··· 364 364 365 365 static int calgary_map_sg(struct device *dev, struct scatterlist *sg, 366 366 int nelems, enum dma_data_direction dir, 367 - struct dma_attrs *attrs) 367 + unsigned long attrs) 368 368 { 369 369 struct iommu_table *tbl = find_iommu_table(dev); 370 370 struct scatterlist *s; ··· 396 396 397 397 return nelems; 398 398 error: 399 - calgary_unmap_sg(dev, sg, nelems, dir, NULL); 399 + calgary_unmap_sg(dev, sg, nelems, dir, 0); 400 400 for_each_sg(sg, s, nelems, i) { 401 401 sg->dma_address = DMA_ERROR_CODE; 402 402 sg->dma_length = 0; ··· 407 407 static dma_addr_t calgary_map_page(struct device *dev, struct page *page, 408 408 unsigned long offset, size_t size, 409 409 enum dma_data_direction dir, 410 - struct dma_attrs *attrs) 410 + unsigned long attrs) 411 411 { 412 412 void *vaddr = page_address(page) + offset; 413 413 unsigned long uaddr; ··· 422 422 423 423 static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr, 424 424 size_t size, enum dma_data_direction dir, 425 - struct dma_attrs *attrs) 425 + unsigned long attrs) 426 426 { 427 427 struct iommu_table *tbl = find_iommu_table(dev); 428 428 unsigned int npages; ··· 432 432 } 433 433 434 434 static void* calgary_alloc_coherent(struct device *dev, size_t size, 435 - dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) 435 + dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) 436 436 { 437 437 void *ret = NULL; 438 438 dma_addr_t mapping; ··· 466 466 467 467 static void calgary_free_coherent(struct device *dev, size_t size, 468 468 void *vaddr, dma_addr_t dma_handle, 469 - struct dma_attrs *attrs) 469 + unsigned long attrs) 470 470 { 471 471 unsigned int npages; 472 472 struct iommu_table *tbl = find_iommu_table(dev);
+2 -2
arch/x86/kernel/pci-dma.c
··· 77 77 } 78 78 void *dma_generic_alloc_coherent(struct device *dev, size_t size, 79 79 dma_addr_t *dma_addr, gfp_t flag, 80 - struct dma_attrs *attrs) 80 + unsigned long attrs) 81 81 { 82 82 unsigned long dma_mask; 83 83 struct page *page; ··· 120 120 } 121 121 122 122 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, 123 - dma_addr_t dma_addr, struct dma_attrs *attrs) 123 + dma_addr_t dma_addr, unsigned long attrs) 124 124 { 125 125 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 126 126 struct page *page = virt_to_page(vaddr);
+2 -2
arch/x86/kernel/pci-nommu.c
··· 28 28 static dma_addr_t nommu_map_page(struct device *dev, struct page *page, 29 29 unsigned long offset, size_t size, 30 30 enum dma_data_direction dir, 31 - struct dma_attrs *attrs) 31 + unsigned long attrs) 32 32 { 33 33 dma_addr_t bus = page_to_phys(page) + offset; 34 34 WARN_ON(size == 0); ··· 55 55 */ 56 56 static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, 57 57 int nents, enum dma_data_direction dir, 58 - struct dma_attrs *attrs) 58 + unsigned long attrs) 59 59 { 60 60 struct scatterlist *s; 61 61 int i;
+2 -2
arch/x86/kernel/pci-swiotlb.c
··· 16 16 17 17 void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 18 18 dma_addr_t *dma_handle, gfp_t flags, 19 - struct dma_attrs *attrs) 19 + unsigned long attrs) 20 20 { 21 21 void *vaddr; 22 22 ··· 37 37 38 38 void x86_swiotlb_free_coherent(struct device *dev, size_t size, 39 39 void *vaddr, dma_addr_t dma_addr, 40 - struct dma_attrs *attrs) 40 + unsigned long attrs) 41 41 { 42 42 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) 43 43 swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+1 -1
arch/x86/pci/sta2x11-fixup.c
··· 169 169 size_t size, 170 170 dma_addr_t *dma_handle, 171 171 gfp_t flags, 172 - struct dma_attrs *attrs) 172 + unsigned long attrs) 173 173 { 174 174 void *vaddr; 175 175
+8 -8
arch/x86/pci/vmd.c
··· 274 274 } 275 275 276 276 static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, 277 - gfp_t flag, struct dma_attrs *attrs) 277 + gfp_t flag, unsigned long attrs) 278 278 { 279 279 return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, 280 280 attrs); 281 281 } 282 282 283 283 static void vmd_free(struct device *dev, size_t size, void *vaddr, 284 - dma_addr_t addr, struct dma_attrs *attrs) 284 + dma_addr_t addr, unsigned long attrs) 285 285 { 286 286 return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, 287 287 attrs); ··· 289 289 290 290 static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, 291 291 void *cpu_addr, dma_addr_t addr, size_t size, 292 - struct dma_attrs *attrs) 292 + unsigned long attrs) 293 293 { 294 294 return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, 295 295 size, attrs); ··· 297 297 298 298 static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, 299 299 void *cpu_addr, dma_addr_t addr, size_t size, 300 - struct dma_attrs *attrs) 300 + unsigned long attrs) 301 301 { 302 302 return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, 303 303 addr, size, attrs); ··· 306 306 static dma_addr_t vmd_map_page(struct device *dev, struct page *page, 307 307 unsigned long offset, size_t size, 308 308 enum dma_data_direction dir, 309 - struct dma_attrs *attrs) 309 + unsigned long attrs) 310 310 { 311 311 return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, 312 312 dir, attrs); 313 313 } 314 314 315 315 static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, 316 - enum dma_data_direction dir, struct dma_attrs *attrs) 316 + enum dma_data_direction dir, unsigned long attrs) 317 317 { 318 318 vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); 319 319 } 320 320 321 321 static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, 322 - enum dma_data_direction dir, struct dma_attrs *attrs) 322 + enum dma_data_direction dir, unsigned long attrs) 323 323 { 324 324 return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); 325 325 } 326 326 327 327 static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 328 - enum dma_data_direction dir, struct dma_attrs *attrs) 328 + enum dma_data_direction dir, unsigned long attrs) 329 329 { 330 330 vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); 331 331 }
+6 -6
arch/xtensa/kernel/pci-dma.c
··· 142 142 143 143 static void *xtensa_dma_alloc(struct device *dev, size_t size, 144 144 dma_addr_t *handle, gfp_t flag, 145 - struct dma_attrs *attrs) 145 + unsigned long attrs) 146 146 { 147 147 unsigned long ret; 148 148 unsigned long uncached = 0; ··· 171 171 } 172 172 173 173 static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr, 174 - dma_addr_t dma_handle, struct dma_attrs *attrs) 174 + dma_addr_t dma_handle, unsigned long attrs) 175 175 { 176 176 unsigned long addr = (unsigned long)vaddr + 177 177 XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; ··· 185 185 static dma_addr_t xtensa_map_page(struct device *dev, struct page *page, 186 186 unsigned long offset, size_t size, 187 187 enum dma_data_direction dir, 188 - struct dma_attrs *attrs) 188 + unsigned long attrs) 189 189 { 190 190 dma_addr_t dma_handle = page_to_phys(page) + offset; 191 191 ··· 195 195 196 196 static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle, 197 197 size_t size, enum dma_data_direction dir, 198 - struct dma_attrs *attrs) 198 + unsigned long attrs) 199 199 { 200 200 xtensa_sync_single_for_cpu(dev, dma_handle, size, dir); 201 201 } 202 202 203 203 static int xtensa_map_sg(struct device *dev, struct scatterlist *sg, 204 204 int nents, enum dma_data_direction dir, 205 - struct dma_attrs *attrs) 205 + unsigned long attrs) 206 206 { 207 207 struct scatterlist *s; 208 208 int i; ··· 217 217 static void xtensa_unmap_sg(struct device *dev, 218 218 struct scatterlist *sg, int nents, 219 219 enum dma_data_direction dir, 220 - struct dma_attrs *attrs) 220 + unsigned long attrs) 221 221 { 222 222 struct scatterlist *s; 223 223 int i;
+1 -1
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
··· 52 52 53 53 ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie, 54 54 exynos_gem->dma_addr, exynos_gem->size, 55 - &exynos_gem->dma_attrs); 55 + exynos_gem->dma_attrs); 56 56 if (ret < 0) { 57 57 DRM_ERROR("failed to mmap.\n"); 58 58 return ret;
+5 -7
drivers/gpu/drm/exynos/exynos_drm_g2d.c
··· 17 17 #include <linux/slab.h> 18 18 #include <linux/workqueue.h> 19 19 #include <linux/dma-mapping.h> 20 - #include <linux/dma-attrs.h> 21 20 #include <linux/of.h> 22 21 23 22 #include <drm/drmP.h> ··· 234 235 struct mutex cmdlist_mutex; 235 236 dma_addr_t cmdlist_pool; 236 237 void *cmdlist_pool_virt; 237 - struct dma_attrs cmdlist_dma_attrs; 238 + unsigned long cmdlist_dma_attrs; 238 239 239 240 /* runqueue*/ 240 241 struct g2d_runqueue_node *runqueue_node; ··· 255 256 int ret; 256 257 struct g2d_buf_info *buf_info; 257 258 258 - init_dma_attrs(&g2d->cmdlist_dma_attrs); 259 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); 259 + g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE; 260 260 261 261 g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev), 262 262 G2D_CMDLIST_POOL_SIZE, 263 263 &g2d->cmdlist_pool, GFP_KERNEL, 264 - &g2d->cmdlist_dma_attrs); 264 + g2d->cmdlist_dma_attrs); 265 265 if (!g2d->cmdlist_pool_virt) { 266 266 dev_err(dev, "failed to allocate dma memory\n"); 267 267 return -ENOMEM; ··· 293 295 err: 294 296 dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE, 295 297 g2d->cmdlist_pool_virt, 296 - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 298 + g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); 297 299 return ret; 298 300 } 299 301 ··· 307 309 dma_free_attrs(to_dma_dev(subdrv->drm_dev), 308 310 G2D_CMDLIST_POOL_SIZE, 309 311 g2d->cmdlist_pool_virt, 310 - g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 312 + g2d->cmdlist_pool, g2d->cmdlist_dma_attrs); 311 313 } 312 314 } 313 315
+10 -10
drivers/gpu/drm/exynos/exynos_drm_gem.c
··· 24 24 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem) 25 25 { 26 26 struct drm_device *dev = exynos_gem->base.dev; 27 - enum dma_attr attr; 27 + unsigned long attr; 28 28 unsigned int nr_pages; 29 29 struct sg_table sgt; 30 30 int ret = -ENOMEM; ··· 34 34 return 0; 35 35 } 36 36 37 - init_dma_attrs(&exynos_gem->dma_attrs); 37 + exynos_gem->dma_attrs = 0; 38 38 39 39 /* 40 40 * if EXYNOS_BO_CONTIG, fully physically contiguous memory ··· 42 42 * as possible. 43 43 */ 44 44 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) 45 - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs); 45 + exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS; 46 46 47 47 /* 48 48 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping ··· 54 54 else 55 55 attr = DMA_ATTR_NON_CONSISTENT; 56 56 57 - dma_set_attr(attr, &exynos_gem->dma_attrs); 58 - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs); 57 + exynos_gem->dma_attrs |= attr; 58 + exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 59 59 60 60 nr_pages = exynos_gem->size >> PAGE_SHIFT; 61 61 ··· 67 67 68 68 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size, 69 69 &exynos_gem->dma_addr, GFP_KERNEL, 70 - &exynos_gem->dma_attrs); 70 + exynos_gem->dma_attrs); 71 71 if (!exynos_gem->cookie) { 72 72 DRM_ERROR("failed to allocate buffer.\n"); 73 73 goto err_free; ··· 75 75 76 76 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie, 77 77 exynos_gem->dma_addr, exynos_gem->size, 78 - &exynos_gem->dma_attrs); 78 + exynos_gem->dma_attrs); 79 79 if (ret < 0) { 80 80 DRM_ERROR("failed to get sgtable.\n"); 81 81 goto err_dma_free; ··· 99 99 sg_free_table(&sgt); 100 100 err_dma_free: 101 101 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 102 - exynos_gem->dma_addr, &exynos_gem->dma_attrs); 102 + exynos_gem->dma_addr, exynos_gem->dma_attrs); 103 103 err_free: 104 104 drm_free_large(exynos_gem->pages); 105 105 ··· 120 120 121 121 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie, 122 122 (dma_addr_t)exynos_gem->dma_addr, 123 - &exynos_gem->dma_attrs); 123 + exynos_gem->dma_attrs); 124 124 125 125 drm_free_large(exynos_gem->pages); 126 126 } ··· 346 346 347 347 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie, 348 348 exynos_gem->dma_addr, exynos_gem->size, 349 - &exynos_gem->dma_attrs); 349 + exynos_gem->dma_attrs); 350 350 if (ret < 0) { 351 351 DRM_ERROR("failed to mmap.\n"); 352 352 return ret;
+1 -1
drivers/gpu/drm/exynos/exynos_drm_gem.h
··· 50 50 void *cookie; 51 51 void __iomem *kvaddr; 52 52 dma_addr_t dma_addr; 53 - struct dma_attrs dma_attrs; 53 + unsigned long dma_attrs; 54 54 struct page **pages; 55 55 struct sg_table *sgt; 56 56 };
+6 -7
drivers/gpu/drm/mediatek/mtk_drm_gem.c
··· 54 54 55 55 obj = &mtk_gem->base; 56 56 57 - init_dma_attrs(&mtk_gem->dma_attrs); 58 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs); 57 + mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; 59 58 60 59 if (!alloc_kmap) 61 - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs); 60 + mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 62 61 63 62 mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size, 64 63 &mtk_gem->dma_addr, GFP_KERNEL, 65 - &mtk_gem->dma_attrs); 64 + mtk_gem->dma_attrs); 66 65 if (!mtk_gem->cookie) { 67 66 DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size); 68 67 ret = -ENOMEM; ··· 92 93 drm_prime_gem_destroy(obj, mtk_gem->sg); 93 94 else 94 95 dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie, 95 - mtk_gem->dma_addr, &mtk_gem->dma_attrs); 96 + mtk_gem->dma_addr, mtk_gem->dma_attrs); 96 97 97 98 /* release file pointer to gem object. */ 98 99 drm_gem_object_release(obj); ··· 172 173 vma->vm_pgoff = 0; 173 174 174 175 ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie, 175 - mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs); 176 + mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs); 176 177 if (ret) 177 178 drm_gem_vm_close(vma); 178 179 ··· 223 224 224 225 ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie, 225 226 mtk_gem->dma_addr, obj->size, 226 - &mtk_gem->dma_attrs); 227 + mtk_gem->dma_attrs); 227 228 if (ret) { 228 229 DRM_ERROR("failed to allocate sgt, %d\n", ret); 229 230 kfree(sgt);
+1 -1
drivers/gpu/drm/mediatek/mtk_drm_gem.h
··· 35 35 void *cookie; 36 36 void *kvaddr; 37 37 dma_addr_t dma_addr; 38 - struct dma_attrs dma_attrs; 38 + unsigned long dma_attrs; 39 39 struct sg_table *sg; 40 40 }; 41 41
+6 -7
drivers/gpu/drm/msm/msm_drv.c
··· 238 238 } 239 239 240 240 if (priv->vram.paddr) { 241 - DEFINE_DMA_ATTRS(attrs); 242 - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 241 + unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; 243 242 drm_mm_takedown(&priv->vram.mm); 244 243 dma_free_attrs(dev, priv->vram.size, NULL, 245 - priv->vram.paddr, &attrs); 244 + priv->vram.paddr, attrs); 246 245 } 247 246 248 247 component_unbind_all(dev, ddev); ··· 309 310 } 310 311 311 312 if (size) { 312 - DEFINE_DMA_ATTRS(attrs); 313 + unsigned long attrs = 0; 313 314 void *p; 314 315 315 316 priv->vram.size = size; 316 317 317 318 drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); 318 319 319 - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 320 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 320 + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 321 + attrs |= DMA_ATTR_WRITE_COMBINE; 321 322 322 323 /* note that for no-kernel-mapping, the vaddr returned 323 324 * is bogus, but non-null if allocation succeeded: 324 325 */ 325 326 p = dma_alloc_attrs(dev->dev, size, 326 - &priv->vram.paddr, GFP_KERNEL, &attrs); 327 + &priv->vram.paddr, GFP_KERNEL, attrs); 327 328 if (!p) { 328 329 dev_err(dev->dev, "failed to allocate VRAM\n"); 329 330 priv->vram.paddr = 0;
+6 -7
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
··· 109 109 u16 iommu_bit; 110 110 111 111 /* Only used by DMA API */ 112 - struct dma_attrs attrs; 112 + unsigned long attrs; 113 113 }; 114 114 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base) 115 115 ··· 293 293 goto out; 294 294 295 295 dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr, 296 - node->handle, &imem->attrs); 296 + node->handle, imem->attrs); 297 297 298 298 out: 299 299 return node; ··· 386 386 387 387 node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT, 388 388 &node->handle, GFP_KERNEL, 389 - &imem->attrs); 389 + imem->attrs); 390 390 if (!node->base.vaddr) { 391 391 nvkm_error(subdev, "cannot allocate DMA memory\n"); 392 392 return -ENOMEM; ··· 597 597 598 598 nvkm_info(&imem->base.subdev, "using IOMMU\n"); 599 599 } else { 600 - init_dma_attrs(&imem->attrs); 601 - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs); 602 - dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs); 603 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs); 600 + imem->attrs = DMA_ATTR_NON_CONSISTENT | 601 + DMA_ATTR_WEAK_ORDERING | 602 + DMA_ATTR_WRITE_COMBINE; 604 603 605 604 nvkm_info(&imem->base.subdev, "using DMA API\n"); 606 605 }
+7 -10
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
··· 17 17 #include <drm/drm_gem.h> 18 18 #include <drm/drm_vma_manager.h> 19 19 20 - #include <linux/dma-attrs.h> 21 - 22 20 #include "rockchip_drm_drv.h" 23 21 #include "rockchip_drm_gem.h" 24 22 ··· 26 28 struct drm_gem_object *obj = &rk_obj->base; 27 29 struct drm_device *drm = obj->dev; 28 30 29 - init_dma_attrs(&rk_obj->dma_attrs); 30 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs); 31 + rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE; 31 32 32 33 if (!alloc_kmap) 33 - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs); 34 + rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 34 35 35 36 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size, 36 37 &rk_obj->dma_addr, GFP_KERNEL, 37 - &rk_obj->dma_attrs); 38 + rk_obj->dma_attrs); 38 39 if (!rk_obj->kvaddr) { 39 40 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size); 40 41 return -ENOMEM; ··· 48 51 struct drm_device *drm = obj->dev; 49 52 50 53 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr, 51 - &rk_obj->dma_attrs); 54 + rk_obj->dma_attrs); 52 55 } 53 56 54 57 static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, ··· 67 70 vma->vm_pgoff = 0; 68 71 69 72 ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, 70 - obj->size, &rk_obj->dma_attrs); 73 + obj->size, rk_obj->dma_attrs); 71 74 if (ret) 72 75 drm_gem_vm_close(vma); 73 76 ··· 259 262 260 263 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr, 261 264 rk_obj->dma_addr, obj->size, 262 - &rk_obj->dma_attrs); 265 + rk_obj->dma_attrs); 263 266 if (ret) { 264 267 DRM_ERROR("failed to allocate sgt, %d\n", ret); 265 268 kfree(sgt); ··· 273 276 { 274 277 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj); 275 278 276 - if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs)) 279 + if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) 277 280 return NULL; 278 281 279 282 return rk_obj->kvaddr;
+1 -1
drivers/gpu/drm/rockchip/rockchip_drm_gem.h
··· 23 23 24 24 void *kvaddr; 25 25 dma_addr_t dma_addr; 26 - struct dma_attrs dma_attrs; 26 + unsigned long dma_attrs; 27 27 }; 28 28 29 29 struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
+3 -4
drivers/infiniband/core/umem.c
··· 37 37 #include <linux/sched.h> 38 38 #include <linux/export.h> 39 39 #include <linux/hugetlb.h> 40 - #include <linux/dma-attrs.h> 41 40 #include <linux/slab.h> 42 41 #include <rdma/ib_umem_odp.h> 43 42 ··· 91 92 unsigned long npages; 92 93 int ret; 93 94 int i; 94 - DEFINE_DMA_ATTRS(attrs); 95 + unsigned long dma_attrs = 0; 95 96 struct scatterlist *sg, *sg_list_start; 96 97 int need_release = 0; 97 98 98 99 if (dmasync) 99 - dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); 100 + dma_attrs |= DMA_ATTR_WRITE_BARRIER; 100 101 101 102 if (!size) 102 103 return ERR_PTR(-EINVAL); ··· 214 215 umem->sg_head.sgl, 215 216 umem->npages, 216 217 DMA_BIDIRECTIONAL, 217 - &attrs); 218 + dma_attrs); 218 219 219 220 if (umem->nmap <= 0) { 220 221 ret = -ENOMEM;
+6 -6
drivers/iommu/amd_iommu.c
··· 2375 2375 static dma_addr_t map_page(struct device *dev, struct page *page, 2376 2376 unsigned long offset, size_t size, 2377 2377 enum dma_data_direction dir, 2378 - struct dma_attrs *attrs) 2378 + unsigned long attrs) 2379 2379 { 2380 2380 phys_addr_t paddr = page_to_phys(page) + offset; 2381 2381 struct protection_domain *domain; ··· 2398 2398 * The exported unmap_single function for dma_ops. 2399 2399 */ 2400 2400 static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 2401 - enum dma_data_direction dir, struct dma_attrs *attrs) 2401 + enum dma_data_direction dir, unsigned long attrs) 2402 2402 { 2403 2403 struct protection_domain *domain; 2404 2404 struct dma_ops_domain *dma_dom; ··· 2444 2444 */ 2445 2445 static int map_sg(struct device *dev, struct scatterlist *sglist, 2446 2446 int nelems, enum dma_data_direction direction, 2447 - struct dma_attrs *attrs) 2447 + unsigned long attrs) 2448 2448 { 2449 2449 int mapped_pages = 0, npages = 0, prot = 0, i; 2450 2450 struct protection_domain *domain; ··· 2525 2525 */ 2526 2526 static void unmap_sg(struct device *dev, struct scatterlist *sglist, 2527 2527 int nelems, enum dma_data_direction dir, 2528 - struct dma_attrs *attrs) 2528 + unsigned long attrs) 2529 2529 { 2530 2530 struct protection_domain *domain; 2531 2531 struct dma_ops_domain *dma_dom; ··· 2548 2548 */ 2549 2549 static void *alloc_coherent(struct device *dev, size_t size, 2550 2550 dma_addr_t *dma_addr, gfp_t flag, 2551 - struct dma_attrs *attrs) 2551 + unsigned long attrs) 2552 2552 { 2553 2553 u64 dma_mask = dev->coherent_dma_mask; 2554 2554 struct protection_domain *domain; ··· 2604 2604 */ 2605 2605 static void free_coherent(struct device *dev, size_t size, 2606 2606 void *virt_addr, dma_addr_t dma_addr, 2607 - struct dma_attrs *attrs) 2607 + unsigned long attrs) 2608 2608 { 2609 2609 struct protection_domain *domain; 2610 2610 struct dma_ops_domain *dma_dom;
+4 -4
drivers/iommu/dma-iommu.c
··· 286 286 * or NULL on failure. 287 287 */ 288 288 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, 289 - struct dma_attrs *attrs, int prot, dma_addr_t *handle, 289 + unsigned long attrs, int prot, dma_addr_t *handle, 290 290 void (*flush_page)(struct device *, const void *, phys_addr_t)) 291 291 { 292 292 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); ··· 306 306 } else { 307 307 size = ALIGN(size, min_size); 308 308 } 309 - if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs)) 309 + if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 310 310 alloc_sizes = min_size; 311 311 312 312 count = PAGE_ALIGN(size) >> PAGE_SHIFT; ··· 400 400 } 401 401 402 402 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, 403 - enum dma_data_direction dir, struct dma_attrs *attrs) 403 + enum dma_data_direction dir, unsigned long attrs) 404 404 { 405 405 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); 406 406 } ··· 560 560 } 561 561 562 562 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 563 - enum dma_data_direction dir, struct dma_attrs *attrs) 563 + enum dma_data_direction dir, unsigned long attrs) 564 564 { 565 565 /* 566 566 * The scatterlist segments are mapped into a single
+6 -6
drivers/iommu/intel-iommu.c
··· 3552 3552 static dma_addr_t intel_map_page(struct device *dev, struct page *page, 3553 3553 unsigned long offset, size_t size, 3554 3554 enum dma_data_direction dir, 3555 - struct dma_attrs *attrs) 3555 + unsigned long attrs) 3556 3556 { 3557 3557 return __intel_map_single(dev, page_to_phys(page) + offset, size, 3558 3558 dir, *dev->dma_mask); ··· 3711 3711 3712 3712 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, 3713 3713 size_t size, enum dma_data_direction dir, 3714 - struct dma_attrs *attrs) 3714 + unsigned long attrs) 3715 3715 { 3716 3716 intel_unmap(dev, dev_addr, size); 3717 3717 } 3718 3718 3719 3719 static void *intel_alloc_coherent(struct device *dev, size_t size, 3720 3720 dma_addr_t *dma_handle, gfp_t flags, 3721 - struct dma_attrs *attrs) 3721 + unsigned long attrs) 3722 3722 { 3723 3723 struct page *page = NULL; 3724 3724 int order; ··· 3764 3764 } 3765 3765 3766 3766 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, 3767 - dma_addr_t dma_handle, struct dma_attrs *attrs) 3767 + dma_addr_t dma_handle, unsigned long attrs) 3768 3768 { 3769 3769 int order; 3770 3770 struct page *page = virt_to_page(vaddr); ··· 3779 3779 3780 3780 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, 3781 3781 int nelems, enum dma_data_direction dir, 3782 - struct dma_attrs *attrs) 3782 + unsigned long attrs) 3783 3783 { 3784 3784 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; 3785 3785 unsigned long nrpages = 0; ··· 3808 3808 } 3809 3809 3810 3810 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, 3811 - enum dma_data_direction dir, struct dma_attrs *attrs) 3811 + enum dma_data_direction dir, unsigned long attrs) 3812 3812 { 3813 3813 int i; 3814 3814 struct dmar_domain *domain;
+8 -18
drivers/media/platform/sti/bdisp/bdisp-hw.c
··· 430 430 */ 431 431 void bdisp_hw_free_nodes(struct bdisp_ctx *ctx) 432 432 { 433 - if (ctx && ctx->node[0]) { 434 - DEFINE_DMA_ATTRS(attrs); 435 - 436 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 433 + if (ctx && ctx->node[0]) 437 434 dma_free_attrs(ctx->bdisp_dev->dev, 438 435 sizeof(struct bdisp_node) * MAX_NB_NODE, 439 - ctx->node[0], ctx->node_paddr[0], &attrs); 440 - } 436 + ctx->node[0], ctx->node_paddr[0], 437 + DMA_ATTR_WRITE_COMBINE); 441 438 } 442 439 443 440 /** ··· 452 455 unsigned int i, node_size = sizeof(struct bdisp_node); 453 456 void *base; 454 457 dma_addr_t paddr; 455 - DEFINE_DMA_ATTRS(attrs); 456 458 457 459 /* Allocate all the nodes within a single memory page */ 458 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 459 460 base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr, 460 - GFP_KERNEL | GFP_DMA, &attrs); 461 + GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE); 461 462 if (!base) { 462 463 dev_err(dev, "%s no mem\n", __func__); 463 464 return -ENOMEM; ··· 488 493 { 489 494 int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); 490 495 491 - if (bdisp_h_filter[0].virt) { 492 - DEFINE_DMA_ATTRS(attrs); 493 - 494 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 496 + if (bdisp_h_filter[0].virt) 495 497 dma_free_attrs(dev, size, bdisp_h_filter[0].virt, 496 - bdisp_h_filter[0].paddr, &attrs); 497 - } 498 + bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE); 498 499 } 499 500 500 501 /** ··· 507 516 unsigned int i, size; 508 517 void *base; 509 518 dma_addr_t paddr; 510 - DEFINE_DMA_ATTRS(attrs); 511 519 512 520 /* Allocate all the filters within a single memory page */ 513 521 size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER); 514 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 515 - base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs); 522 + base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, 523 + DMA_ATTR_WRITE_COMBINE); 516 524 if (!base) 517 525 return -ENOMEM; 518 526
+11 -17
drivers/media/v4l2-core/videobuf2-dma-contig.c
··· 27 27 unsigned long size; 28 28 void *cookie; 29 29 dma_addr_t dma_addr; 30 - struct dma_attrs attrs; 30 + unsigned long attrs; 31 31 enum dma_data_direction dma_dir; 32 32 struct sg_table *dma_sgt; 33 33 struct frame_vector *vec; ··· 130 130 kfree(buf->sgt_base); 131 131 } 132 132 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr, 133 - &buf->attrs); 133 + buf->attrs); 134 134 put_device(buf->dev); 135 135 kfree(buf); 136 136 } 137 137 138 - static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs, 138 + static void *vb2_dc_alloc(struct device *dev, unsigned long attrs, 139 139 unsigned long size, enum dma_data_direction dma_dir, 140 140 gfp_t gfp_flags) 141 141 { ··· 146 146 return ERR_PTR(-ENOMEM); 147 147 148 148 if (attrs) 149 - buf->attrs = *attrs; 149 + buf->attrs = attrs; 150 150 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr, 151 - GFP_KERNEL | gfp_flags, &buf->attrs); 151 + GFP_KERNEL | gfp_flags, buf->attrs); 152 152 if (!buf->cookie) { 153 153 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size); 154 154 kfree(buf); 155 155 return ERR_PTR(-ENOMEM); 156 156 } 157 157 158 - if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs)) 158 + if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 159 159 buf->vaddr = buf->cookie; 160 160 161 161 /* Prevent the device from being released while the buffer is used */ ··· 189 189 vma->vm_pgoff = 0; 190 190 191 191 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, 192 - buf->dma_addr, buf->size, &buf->attrs); 192 + buf->dma_addr, buf->size, buf->attrs); 193 193 194 194 if (ret) { 195 195 pr_err("Remapping memory failed, error: %d\n", ret); ··· 372 372 } 373 373 374 374 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr, 375 - buf->size, &buf->attrs); 375 + buf->size, buf->attrs); 376 376 if (ret < 0) { 377 377 dev_err(buf->dev, "failed to get scatterlist from DMA API\n"); 378 378 kfree(sgt); ··· 421 421 struct page **pages; 422 422 423 423 if (sgt) { 424 - DEFINE_DMA_ATTRS(attrs); 425 - 426 - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 427 424 /* 428 425 * No need to sync to CPU, it's already synced to the CPU 429 426 * since the finish() memop will have been called before this. 430 427 */ 431 428 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 432 - buf->dma_dir, &attrs); 429 + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 433 430 pages = frame_vector_pages(buf->vec); 434 431 /* sgt should exist only if vector contains pages... */ 435 432 BUG_ON(IS_ERR(pages)); ··· 481 484 struct sg_table *sgt; 482 485 unsigned long contig_size; 483 486 unsigned long dma_align = dma_get_cache_alignment(); 484 - DEFINE_DMA_ATTRS(attrs); 485 - 486 - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 487 487 488 488 /* Only cache aligned DMA transfers are reliable */ 489 489 if (!IS_ALIGNED(vaddr | size, dma_align)) { ··· 542 548 * prepare() memop is called. 543 549 */ 544 550 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 545 - buf->dma_dir, &attrs); 551 + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 546 552 if (sgt->nents <= 0) { 547 553 pr_err("failed to map scatterlist\n"); 548 554 ret = -EIO; ··· 566 572 567 573 fail_map_sg: 568 574 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 569 - buf->dma_dir, &attrs); 575 + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 570 576 571 577 fail_sgt_init: 572 578 sg_free_table(sgt);
+5 -16
drivers/media/v4l2-core/videobuf2-dma-sg.c
··· 95 95 return 0; 96 96 } 97 97 98 - static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs, 98 + static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, 99 99 unsigned long size, enum dma_data_direction dma_dir, 100 100 gfp_t gfp_flags) 101 101 { ··· 103 103 struct sg_table *sgt; 104 104 int ret; 105 105 int num_pages; 106 - DEFINE_DMA_ATTRS(attrs); 107 - 108 - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 109 106 110 107 if (WARN_ON(dev == NULL)) 111 108 return NULL; ··· 141 144 * prepare() memop is called. 142 145 */ 143 146 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 144 - buf->dma_dir, &attrs); 147 + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 145 148 if (!sgt->nents) 146 149 goto fail_map; 147 150 ··· 176 179 int i = buf->num_pages; 177 180 178 181 if (atomic_dec_and_test(&buf->refcount)) { 179 - DEFINE_DMA_ATTRS(attrs); 180 - 181 - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 182 182 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, 183 183 buf->num_pages); 184 184 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 185 - buf->dma_dir, &attrs); 185 + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 186 186 if (buf->vaddr) 187 187 vm_unmap_ram(buf->vaddr, buf->num_pages); 188 188 sg_free_table(buf->dma_sgt); ··· 222 228 { 223 229 struct vb2_dma_sg_buf *buf; 224 230 struct sg_table *sgt; 225 - DEFINE_DMA_ATTRS(attrs); 226 231 struct frame_vector *vec; 227 232 228 - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 229 233 buf = kzalloc(sizeof *buf, GFP_KERNEL); 230 234 if (!buf) 231 235 return NULL; ··· 254 262 * prepare() memop is called. 255 263 */ 256 264 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, 257 - buf->dma_dir, &attrs); 265 + buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); 258 266 if (!sgt->nents) 259 267 goto userptr_fail_map; 260 268 ··· 278 286 struct vb2_dma_sg_buf *buf = buf_priv; 279 287 struct sg_table *sgt = &buf->sg_table; 280 288 int i = buf->num_pages; 281 - DEFINE_DMA_ATTRS(attrs); 282 - 283 - dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); 284 289 285 290 dprintk(1, "%s: Releasing userspace buffer of %d pages\n", 286 291 __func__, buf->num_pages); 287 292 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, 288 - &attrs); 293 + DMA_ATTR_SKIP_CPU_SYNC); 289 294 if (buf->vaddr) 290 295 vm_unmap_ram(buf->vaddr, buf->num_pages); 291 296 sg_free_table(buf->dma_sgt);
+1 -1
drivers/media/v4l2-core/videobuf2-vmalloc.c
··· 33 33 34 34 static void vb2_vmalloc_put(void *buf_priv); 35 35 36 - static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs, 36 + static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs, 37 37 unsigned long size, enum dma_data_direction dma_dir, 38 38 gfp_t gfp_flags) 39 39 {
+10 -10
drivers/misc/mic/host/mic_boot.c
··· 38 38 static dma_addr_t 39 39 _mic_dma_map_page(struct device *dev, struct page *page, 40 40 unsigned long offset, size_t size, 41 - enum dma_data_direction dir, struct dma_attrs *attrs) 41 + enum dma_data_direction dir, unsigned long attrs) 42 42 { 43 43 void *va = phys_to_virt(page_to_phys(page)) + offset; 44 44 struct mic_device *mdev = vpdev_to_mdev(dev); ··· 48 48 49 49 static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 50 50 size_t size, enum dma_data_direction dir, 51 - struct dma_attrs *attrs) 51 + unsigned long attrs) 52 52 { 53 53 struct mic_device *mdev = vpdev_to_mdev(dev); 54 54 ··· 144 144 145 145 static void *__mic_dma_alloc(struct device *dev, size_t size, 146 146 dma_addr_t *dma_handle, gfp_t gfp, 147 - struct dma_attrs *attrs) 147 + unsigned long attrs) 148 148 { 149 149 struct scif_hw_dev *scdev = dev_get_drvdata(dev); 150 150 struct mic_device *mdev = scdev_to_mdev(scdev); ··· 164 164 } 165 165 166 166 static void __mic_dma_free(struct device *dev, size_t size, void *vaddr, 167 - dma_addr_t dma_handle, struct dma_attrs *attrs) 167 + dma_addr_t dma_handle, unsigned long attrs) 168 168 { 169 169 struct scif_hw_dev *scdev = dev_get_drvdata(dev); 170 170 struct mic_device *mdev = scdev_to_mdev(scdev); ··· 176 176 static dma_addr_t 177 177 __mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset, 178 178 size_t size, enum dma_data_direction dir, 179 - struct dma_attrs *attrs) 179 + unsigned long attrs) 180 180 { 181 181 void *va = phys_to_virt(page_to_phys(page)) + offset; 182 182 struct scif_hw_dev *scdev = dev_get_drvdata(dev); ··· 188 188 static void 189 189 __mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 190 190 size_t size, enum dma_data_direction dir, 191 - struct dma_attrs *attrs) 191 + unsigned long attrs) 192 192 { 193 193 struct scif_hw_dev *scdev = dev_get_drvdata(dev); 194 194 struct mic_device *mdev = scdev_to_mdev(scdev); ··· 198 198 199 199 static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg, 200 200 int nents, enum dma_data_direction dir, 201 - struct dma_attrs *attrs) 201 + unsigned long attrs) 202 202 { 203 203 struct scif_hw_dev *scdev = dev_get_drvdata(dev); 204 204 struct mic_device *mdev = scdev_to_mdev(scdev); ··· 229 229 static void __mic_dma_unmap_sg(struct device *dev, 230 230 struct scatterlist *sg, int nents, 231 231 enum dma_data_direction dir, 232 - struct dma_attrs *attrs) 232 + unsigned long attrs) 233 233 { 234 234 struct scif_hw_dev *scdev = dev_get_drvdata(dev); 235 235 struct mic_device *mdev = scdev_to_mdev(scdev); ··· 327 327 static dma_addr_t 328 328 mic_dma_map_page(struct device *dev, struct page *page, 329 329 unsigned long offset, size_t size, enum dma_data_direction dir, 330 - struct dma_attrs *attrs) 330 + unsigned long attrs) 331 331 { 332 332 void *va = phys_to_virt(page_to_phys(page)) + offset; 333 333 struct mic_device *mdev = dev_get_drvdata(dev->parent); ··· 338 338 static void 339 339 mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 340 340 size_t size, enum dma_data_direction dir, 341 - struct dma_attrs *attrs) 341 + unsigned long attrs) 342 342 { 343 343 struct mic_device *mdev = dev_get_drvdata(dev->parent); 344 344 mic_unmap_single(mdev, dma_addr, size);
+8 -8
drivers/parisc/ccio-dma.c
··· 790 790 static dma_addr_t 791 791 ccio_map_page(struct device *dev, struct page *page, unsigned long offset, 792 792 size_t size, enum dma_data_direction direction, 793 - struct dma_attrs *attrs) 793 + unsigned long attrs) 794 794 { 795 795 return ccio_map_single(dev, page_address(page) + offset, size, 796 796 direction); ··· 806 806 */ 807 807 static void 808 808 ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, 809 - enum dma_data_direction direction, struct dma_attrs *attrs) 809 + enum dma_data_direction direction, unsigned long attrs) 810 810 { 811 811 struct ioc *ioc; 812 812 unsigned long flags; ··· 844 844 */ 845 845 static void * 846 846 ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, 847 - struct dma_attrs *attrs) 847 + unsigned long attrs) 848 848 { 849 849 void *ret; 850 850 #if 0 ··· 878 878 */ 879 879 static void 880 880 ccio_free(struct device *dev, size_t size, void *cpu_addr, 881 - dma_addr_t dma_handle, struct dma_attrs *attrs) 881 + dma_addr_t dma_handle, unsigned long attrs) 882 882 { 883 - ccio_unmap_page(dev, dma_handle, size, 0, NULL); 883 + ccio_unmap_page(dev, dma_handle, size, 0, 0); 884 884 free_pages((unsigned long)cpu_addr, get_order(size)); 885 885 } 886 886 ··· 907 907 */ 908 908 static int 909 909 ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 910 - enum dma_data_direction direction, struct dma_attrs *attrs) 910 + enum dma_data_direction direction, unsigned long attrs) 911 911 { 912 912 struct ioc *ioc; 913 913 int coalesced, filled = 0; ··· 984 984 */ 985 985 static void 986 986 ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 987 - enum dma_data_direction direction, struct dma_attrs *attrs) 987 + enum dma_data_direction direction, unsigned long attrs) 988 988 { 989 989 struct ioc *ioc; 990 990 ··· 1004 1004 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; 1005 1005 #endif 1006 1006 ccio_unmap_page(dev, sg_dma_address(sglist), 1007 - sg_dma_len(sglist), direction, NULL); 1007 + sg_dma_len(sglist), direction, 0); 1008 1008 ++sglist; 1009 1009 } 1010 1010
+8 -8
drivers/parisc/sba_iommu.c
··· 783 783 static dma_addr_t 784 784 sba_map_page(struct device *dev, struct page *page, unsigned long offset, 785 785 size_t size, enum dma_data_direction direction, 786 - struct dma_attrs *attrs) 786 + unsigned long attrs) 787 787 { 788 788 return sba_map_single(dev, page_address(page) + offset, size, 789 789 direction); ··· 801 801 */ 802 802 static void 803 803 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, 804 - enum dma_data_direction direction, struct dma_attrs *attrs) 804 + enum dma_data_direction direction, unsigned long attrs) 805 805 { 806 806 struct ioc *ioc; 807 807 #if DELAYED_RESOURCE_CNT > 0 ··· 876 876 * See Documentation/DMA-API-HOWTO.txt 877 877 */ 878 878 static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, 879 - gfp_t gfp, struct dma_attrs *attrs) 879 + gfp_t gfp, unsigned long attrs) 880 880 { 881 881 void *ret; 882 882 ··· 908 908 */ 909 909 static void 910 910 sba_free(struct device *hwdev, size_t size, void *vaddr, 911 - dma_addr_t dma_handle, struct dma_attrs *attrs) 911 + dma_addr_t dma_handle, unsigned long attrs) 912 912 { 913 - sba_unmap_page(hwdev, dma_handle, size, 0, NULL); 913 + sba_unmap_page(hwdev, dma_handle, size, 0, 0); 914 914 free_pages((unsigned long) vaddr, get_order(size)); 915 915 } 916 916 ··· 943 943 */ 944 944 static int 945 945 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, 946 - enum dma_data_direction direction, struct dma_attrs *attrs) 946 + enum dma_data_direction direction, unsigned long attrs) 947 947 { 948 948 struct ioc *ioc; 949 949 int coalesced, filled = 0; ··· 1026 1026 */ 1027 1027 static void 1028 1028 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, 1029 - enum dma_data_direction direction, struct dma_attrs *attrs) 1029 + enum dma_data_direction direction, unsigned long attrs) 1030 1030 { 1031 1031 struct ioc *ioc; 1032 1032 #ifdef ASSERT_PDIR_SANITY ··· 1051 1051 while (sg_dma_len(sglist) && nents--) { 1052 1052 1053 1053 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), 1054 - direction, NULL); 1054 + direction, 0); 1055 1055 #ifdef SBA_COLLECT_STATS 1056 1056 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; 1057 1057 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
+3 -4
drivers/remoteproc/qcom_q6v5_pil.c
··· 349 349 350 350 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw) 351 351 { 352 - DEFINE_DMA_ATTRS(attrs); 352 + unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; 353 353 dma_addr_t phys; 354 354 void *ptr; 355 355 int ret; 356 356 357 - dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs); 358 - ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs); 357 + ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs); 359 358 if (!ptr) { 360 359 dev_err(qproc->dev, "failed to allocate mdt buffer\n"); 361 360 return -ENOMEM; ··· 371 372 else if (ret < 0) 372 373 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret); 373 374 374 - dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs); 375 + dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs); 375 376 376 377 return ret < 0 ? ret : 0; 377 378 }
+6 -6
drivers/video/fbdev/omap2/omapfb/omapfb-main.c
··· 1332 1332 } 1333 1333 1334 1334 dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle, 1335 - &rg->attrs); 1335 + rg->attrs); 1336 1336 1337 1337 rg->token = NULL; 1338 1338 rg->vaddr = NULL; ··· 1370 1370 struct omapfb2_device *fbdev = ofbi->fbdev; 1371 1371 struct omapfb2_mem_region *rg; 1372 1372 void *token; 1373 - DEFINE_DMA_ATTRS(attrs); 1373 + unsigned long attrs; 1374 1374 dma_addr_t dma_handle; 1375 1375 int r; 1376 1376 ··· 1386 1386 1387 1387 size = PAGE_ALIGN(size); 1388 1388 1389 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 1389 + attrs = DMA_ATTR_WRITE_COMBINE; 1390 1390 1391 1391 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) 1392 - dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs); 1392 + attrs |= DMA_ATTR_NO_KERNEL_MAPPING; 1393 1393 1394 1394 DBG("allocating %lu bytes for fb %d\n", size, ofbi->id); 1395 1395 1396 1396 token = dma_alloc_attrs(fbdev->dev, size, &dma_handle, 1397 - GFP_KERNEL, &attrs); 1397 + GFP_KERNEL, attrs); 1398 1398 1399 1399 if (token == NULL) { 1400 1400 dev_err(fbdev->dev, "failed to allocate framebuffer\n"); ··· 1408 1408 r = omap_vrfb_request_ctx(&rg->vrfb); 1409 1409 if (r) { 1410 1410 dma_free_attrs(fbdev->dev, size, token, dma_handle, 1411 - &attrs); 1411 + attrs); 1412 1412 dev_err(fbdev->dev, "vrfb create ctx failed\n"); 1413 1413 return r; 1414 1414 }
+1 -2
drivers/video/fbdev/omap2/omapfb/omapfb.h
··· 28 28 #endif 29 29 30 30 #include <linux/rwsem.h> 31 - #include <linux/dma-attrs.h> 32 31 #include <linux/dma-mapping.h> 33 32 34 33 #include <video/omapfb_dss.h> ··· 50 51 51 52 struct omapfb2_mem_region { 52 53 int id; 53 - struct dma_attrs attrs; 54 + unsigned long attrs; 54 55 void *token; 55 56 dma_addr_t dma_handle; 56 57 u32 paddr;
+7 -7
drivers/xen/swiotlb-xen.c
··· 294 294 void * 295 295 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 296 296 dma_addr_t *dma_handle, gfp_t flags, 297 - struct dma_attrs *attrs) 297 + unsigned long attrs) 298 298 { 299 299 void *ret; 300 300 int order = get_order(size); ··· 346 346 347 347 void 348 348 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 349 - dma_addr_t dev_addr, struct dma_attrs *attrs) 349 + dma_addr_t dev_addr, unsigned long attrs) 350 350 { 351 351 int order = get_order(size); 352 352 phys_addr_t phys; ··· 378 378 dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, 379 379 unsigned long offset, size_t size, 380 380 enum dma_data_direction dir, 381 - struct dma_attrs *attrs) 381 + unsigned long attrs) 382 382 { 383 383 phys_addr_t map, phys = page_to_phys(page) + offset; 384 384 dma_addr_t dev_addr = xen_phys_to_bus(phys); ··· 434 434 */ 435 435 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, 436 436 size_t size, enum dma_data_direction dir, 437 - struct dma_attrs *attrs) 437 + unsigned long attrs) 438 438 { 439 439 phys_addr_t paddr = xen_bus_to_phys(dev_addr); 440 440 ··· 462 462 463 463 void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 464 464 size_t size, enum dma_data_direction dir, 465 - struct dma_attrs *attrs) 465 + unsigned long attrs) 466 466 { 467 467 xen_unmap_single(hwdev, dev_addr, size, dir, attrs); 468 468 } ··· 538 538 int 539 539 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 540 540 int nelems, enum dma_data_direction dir, 541 - struct dma_attrs *attrs) 541 + unsigned long attrs) 542 542 { 543 543 struct scatterlist *sg; 544 544 int i; ··· 599 599 void 600 600 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 601 601 int nelems, enum dma_data_direction dir, 602 - struct dma_attrs *attrs) 602 + unsigned long attrs) 603 603 { 604 604 struct scatterlist *sg; 605 605 int i;
-71
include/linux/dma-attrs.h
··· 1 - #ifndef _DMA_ATTR_H 2 - #define _DMA_ATTR_H 3 - 4 - #include <linux/bitmap.h> 5 - #include <linux/bitops.h> 6 - #include <linux/bug.h> 7 - 8 - /** 9 - * an enum dma_attr represents an attribute associated with a DMA 10 - * mapping. The semantics of each attribute should be defined in 11 - * Documentation/DMA-attributes.txt. 12 - */ 13 - enum dma_attr { 14 - DMA_ATTR_WRITE_BARRIER, 15 - DMA_ATTR_WEAK_ORDERING, 16 - DMA_ATTR_WRITE_COMBINE, 17 - DMA_ATTR_NON_CONSISTENT, 18 - DMA_ATTR_NO_KERNEL_MAPPING, 19 - DMA_ATTR_SKIP_CPU_SYNC, 20 - DMA_ATTR_FORCE_CONTIGUOUS, 21 - DMA_ATTR_ALLOC_SINGLE_PAGES, 22 - DMA_ATTR_MAX, 23 - }; 24 - 25 - #define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) 26 - 27 - /** 28 - * struct dma_attrs - an opaque container for DMA attributes 29 - * @flags - bitmask representing a collection of enum dma_attr 30 - */ 31 - struct dma_attrs { 32 - unsigned long flags[__DMA_ATTRS_LONGS]; 33 - }; 34 - 35 - #define DEFINE_DMA_ATTRS(x) \ 36 - struct dma_attrs x = { \ 37 - .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ 38 - } 39 - 40 - static inline void init_dma_attrs(struct dma_attrs *attrs) 41 - { 42 - bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); 43 - } 44 - 45 - /** 46 - * dma_set_attr - set a specific attribute 47 - * @attr: attribute to set 48 - * @attrs: struct dma_attrs (may be NULL) 49 - */ 50 - static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) 51 - { 52 - if (attrs == NULL) 53 - return; 54 - BUG_ON(attr >= DMA_ATTR_MAX); 55 - __set_bit(attr, attrs->flags); 56 - } 57 - 58 - /** 59 - * dma_get_attr - check for a specific attribute 60 - * @attr: attribute to set 61 - * @attrs: struct dma_attrs (may be NULL) 62 - */ 63 - static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) 64 - { 65 - if (attrs == NULL) 66 - return 0; 67 - BUG_ON(attr >= DMA_ATTR_MAX); 68 - return test_bit(attr, attrs->flags); 69 - } 70 - 71 - #endif /* _DMA_ATTR_H */
+3 -3
include/linux/dma-iommu.h
··· 39 39 * the arch code to take care of attributes and cache maintenance 40 40 */ 41 41 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, 42 - struct dma_attrs *attrs, int prot, dma_addr_t *handle, 42 + unsigned long attrs, int prot, dma_addr_t *handle, 43 43 void (*flush_page)(struct device *, const void *, phys_addr_t)); 44 44 void iommu_dma_free(struct device *dev, struct page **pages, size_t size, 45 45 dma_addr_t *handle); ··· 56 56 * directly as DMA mapping callbacks for simplicity 57 57 */ 58 58 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, 59 - enum dma_data_direction dir, struct dma_attrs *attrs); 59 + enum dma_data_direction dir, unsigned long attrs); 60 60 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 61 - enum dma_data_direction dir, struct dma_attrs *attrs); 61 + enum dma_data_direction dir, unsigned long attrs); 62 62 int iommu_dma_supported(struct device *dev, u64 mask); 63 63 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 64 64
+84 -44
include/linux/dma-mapping.h
··· 5 5 #include <linux/string.h> 6 6 #include <linux/device.h> 7 7 #include <linux/err.h> 8 - #include <linux/dma-attrs.h> 9 8 #include <linux/dma-debug.h> 10 9 #include <linux/dma-direction.h> 11 10 #include <linux/scatterlist.h> 12 11 #include <linux/kmemcheck.h> 13 12 #include <linux/bug.h> 13 + 14 + /** 15 + * List of possible attributes associated with a DMA mapping. The semantics 16 + * of each attribute should be defined in Documentation/DMA-attributes.txt. 17 + * 18 + * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute 19 + * forces all pending DMA writes to complete. 20 + */ 21 + #define DMA_ATTR_WRITE_BARRIER (1UL << 0) 22 + /* 23 + * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping 24 + * may be weakly ordered, that is that reads and writes may pass each other. 25 + */ 26 + #define DMA_ATTR_WEAK_ORDERING (1UL << 1) 27 + /* 28 + * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be 29 + * buffered to improve performance. 30 + */ 31 + #define DMA_ATTR_WRITE_COMBINE (1UL << 2) 32 + /* 33 + * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either 34 + * consistent or non-consistent memory as it sees fit. 35 + */ 36 + #define DMA_ATTR_NON_CONSISTENT (1UL << 3) 37 + /* 38 + * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel 39 + * virtual mapping for the allocated buffer. 40 + */ 41 + #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) 42 + /* 43 + * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of 44 + * the CPU cache for the given buffer assuming that it has been already 45 + * transferred to 'device' domain. 46 + */ 47 + #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) 48 + /* 49 + * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer 50 + * in physical memory. 51 + */ 52 + #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) 53 + /* 54 + * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem 55 + * that it's probably not worth the time to try to allocate memory to in a way 56 + * that gives better TLB efficiency. 57 + */ 58 + #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) 14 59 15 60 /* 16 61 * A dma_addr_t can hold any valid DMA or bus address for the platform. ··· 66 21 struct dma_map_ops { 67 22 void* (*alloc)(struct device *dev, size_t size, 68 23 dma_addr_t *dma_handle, gfp_t gfp, 69 - struct dma_attrs *attrs); 24 + unsigned long attrs); 70 25 void (*free)(struct device *dev, size_t size, 71 26 void *vaddr, dma_addr_t dma_handle, 72 - struct dma_attrs *attrs); 27 + unsigned long attrs); 73 28 int (*mmap)(struct device *, struct vm_area_struct *, 74 - void *, dma_addr_t, size_t, struct dma_attrs *attrs); 29 + void *, dma_addr_t, size_t, 30 + unsigned long attrs); 75 31 76 32 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, 77 - dma_addr_t, size_t, struct dma_attrs *attrs); 33 + dma_addr_t, size_t, unsigned long attrs); 78 34 79 35 dma_addr_t (*map_page)(struct device *dev, struct page *page, 80 36 unsigned long offset, size_t size, 81 37 enum dma_data_direction dir, 82 - struct dma_attrs *attrs); 38 + unsigned long attrs); 83 39 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, 84 40 size_t size, enum dma_data_direction dir, 85 - struct dma_attrs *attrs); 41 + unsigned long attrs); 86 42 /* 87 43 * map_sg returns 0 on error and a value > 0 on success. 88 44 * It should never return a value < 0. 89 45 */ 90 46 int (*map_sg)(struct device *dev, struct scatterlist *sg, 91 47 int nents, enum dma_data_direction dir, 92 - struct dma_attrs *attrs); 48 + unsigned long attrs); 93 49 void (*unmap_sg)(struct device *dev, 94 50 struct scatterlist *sg, int nents, 95 51 enum dma_data_direction dir, 96 - struct dma_attrs *attrs); 52 + unsigned long attrs); 97 53 void (*sync_single_for_cpu)(struct device *dev, 98 54 dma_addr_t dma_handle, size_t size, 99 55 enum dma_data_direction dir); ··· 169 123 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, 170 124 size_t size, 171 125 enum dma_data_direction dir, 172 - struct dma_attrs *attrs) 126 + unsigned long attrs) 173 127 { 174 128 struct dma_map_ops *ops = get_dma_ops(dev); 175 129 dma_addr_t addr; ··· 188 142 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, 189 143 size_t size, 190 144 enum dma_data_direction dir, 191 - struct dma_attrs *attrs) 145 + unsigned long attrs) 192 146 { 193 147 struct dma_map_ops *ops = get_dma_ops(dev); 194 148 ··· 204 158 */ 205 159 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 206 160 int nents, enum dma_data_direction dir, 207 - struct dma_attrs *attrs) 161 + unsigned long attrs) 208 162 { 209 163 struct dma_map_ops *ops = get_dma_ops(dev); 210 164 int i, ents; ··· 222 176 223 177 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 224 178 int nents, enum dma_data_direction dir, 225 - struct dma_attrs *attrs) 179 + unsigned long attrs) 226 180 { 227 181 struct dma_map_ops *ops = get_dma_ops(dev); 228 182 ··· 241 195 242 196 kmemcheck_mark_initialized(page_address(page) + offset, size); 243 197 BUG_ON(!valid_dma_direction(dir)); 244 - addr = ops->map_page(dev, page, offset, size, dir, NULL); 198 + addr = ops->map_page(dev, page, offset, size, dir, 0); 245 199 debug_dma_map_page(dev, page, offset, size, dir, addr, false); 246 200 247 201 return addr; ··· 254 208 255 209 BUG_ON(!valid_dma_direction(dir)); 256 210 if (ops->unmap_page) 257 - ops->unmap_page(dev, addr, size, dir, NULL); 211 + ops->unmap_page(dev, addr, size, dir, 0); 258 212 debug_dma_unmap_page(dev, addr, size, dir, false); 259 213 } 260 214 ··· 335 289 336 290 } 337 291 338 - #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) 339 - #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) 340 - #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) 341 - #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) 292 + #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) 293 + #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) 294 + #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) 295 + #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) 342 296 343 297 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 344 298 void *cpu_addr, dma_addr_t dma_addr, size_t size); ··· 367 321 */ 368 322 static inline int 369 323 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, 370 - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) 324 + dma_addr_t dma_addr, size_t size, unsigned long attrs) 371 325 { 372 326 struct dma_map_ops *ops = get_dma_ops(dev); 373 327 BUG_ON(!ops); ··· 376 330 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); 377 331 } 378 332 379 - #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) 333 + #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) 380 334 381 335 int 382 336 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, ··· 384 338 385 339 static inline int 386 340 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, 387 - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) 341 + dma_addr_t dma_addr, size_t size, 342 + unsigned long attrs) 388 343 { 389 344 struct dma_map_ops *ops = get_dma_ops(dev); 390 345 BUG_ON(!ops); ··· 395 348 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); 396 349 } 397 350 398 - #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) 351 + #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) 399 352 400 353 #ifndef arch_dma_alloc_attrs 401 354 #define arch_dma_alloc_attrs(dev, flag) (true) ··· 403 356 404 357 static inline void *dma_alloc_attrs(struct device *dev, size_t size, 405 358 dma_addr_t *dma_handle, gfp_t flag, 406 - struct dma_attrs *attrs) 359 + unsigned long attrs) 407 360 { 408 361 struct dma_map_ops *ops = get_dma_ops(dev); 409 362 void *cpu_addr; ··· 425 378 426 379 static inline void dma_free_attrs(struct device *dev, size_t size, 427 380 void *cpu_addr, dma_addr_t dma_handle, 428 - struct dma_attrs *attrs) 381 + unsigned long attrs) 429 382 { 430 383 struct dma_map_ops *ops = get_dma_ops(dev); 431 384 ··· 445 398 static inline void *dma_alloc_coherent(struct device *dev, size_t size, 446 399 dma_addr_t *dma_handle, gfp_t flag) 447 400 { 448 - return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); 401 + return dma_alloc_attrs(dev, size, dma_handle, flag, 0); 449 402 } 450 403 451 404 static inline void dma_free_coherent(struct device *dev, size_t size, 452 405 void *cpu_addr, dma_addr_t dma_handle) 453 406 { 454 - return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); 407 + return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); 455 408 } 456 409 457 410 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, 458 411 dma_addr_t *dma_handle, gfp_t gfp) 459 412 { 460 - DEFINE_DMA_ATTRS(attrs); 461 - 462 - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); 463 - return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); 413 + return dma_alloc_attrs(dev, size, dma_handle, gfp, 414 + DMA_ATTR_NON_CONSISTENT); 464 415 } 465 416 466 417 static inline void dma_free_noncoherent(struct device *dev, size_t size, 467 418 void *cpu_addr, dma_addr_t dma_handle) 468 419 { 469 - DEFINE_DMA_ATTRS(attrs); 470 - 471 - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); 472 - dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); 420 + dma_free_attrs(dev, size, cpu_addr, dma_handle, 421 + DMA_ATTR_NON_CONSISTENT); 473 422 } 474 423 475 424 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ··· 689 646 static inline void *dma_alloc_wc(struct device *dev, size_t size, 690 647 dma_addr_t *dma_addr, gfp_t gfp) 691 648 { 692 - DEFINE_DMA_ATTRS(attrs); 693 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 694 - return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); 649 + return dma_alloc_attrs(dev, size, dma_addr, gfp, 650 + DMA_ATTR_WRITE_COMBINE); 695 651 } 696 652 #ifndef dma_alloc_writecombine 697 653 #define dma_alloc_writecombine dma_alloc_wc ··· 699 657 static inline void dma_free_wc(struct device *dev, size_t size, 700 658 void *cpu_addr, dma_addr_t dma_addr) 701 659 { 702 - DEFINE_DMA_ATTRS(attrs); 703 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 704 - return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); 660 + return dma_free_attrs(dev, size, cpu_addr, dma_addr, 661 + DMA_ATTR_WRITE_COMBINE); 705 662 } 706 663 #ifndef dma_free_writecombine 707 664 #define dma_free_writecombine dma_free_wc ··· 711 670 void *cpu_addr, dma_addr_t dma_addr, 712 671 size_t size) 713 672 { 714 - DEFINE_DMA_ATTRS(attrs); 715 - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); 716 - return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); 673 + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, 674 + DMA_ATTR_WRITE_COMBINE); 717 675 } 718 676 #ifndef dma_mmap_writecombine 719 677 #define dma_mmap_writecombine dma_mmap_wc
+5 -5
include/linux/swiotlb.h
··· 6 6 #include <linux/types.h> 7 7 8 8 struct device; 9 - struct dma_attrs; 10 9 struct page; 11 10 struct scatterlist; 12 11 ··· 67 68 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 68 69 unsigned long offset, size_t size, 69 70 enum dma_data_direction dir, 70 - struct dma_attrs *attrs); 71 + unsigned long attrs); 71 72 extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 72 73 size_t size, enum dma_data_direction dir, 73 - struct dma_attrs *attrs); 74 + unsigned long attrs); 74 75 75 76 extern int 76 77 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, ··· 82 83 83 84 extern int 84 85 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 85 - enum dma_data_direction dir, struct dma_attrs *attrs); 86 + enum dma_data_direction dir, 87 + unsigned long attrs); 86 88 87 89 extern void 88 90 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 89 91 int nelems, enum dma_data_direction dir, 90 - struct dma_attrs *attrs); 92 + unsigned long attrs); 91 93 92 94 extern void 93 95 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+3 -3
include/media/videobuf2-core.h
··· 98 98 * #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf. 99 99 */ 100 100 struct vb2_mem_ops { 101 - void *(*alloc)(struct device *dev, const struct dma_attrs *attrs, 101 + void *(*alloc)(struct device *dev, unsigned long attrs, 102 102 unsigned long size, enum dma_data_direction dma_dir, 103 103 gfp_t gfp_flags); 104 104 void (*put)(void *buf_priv); ··· 408 408 * @io_modes: supported io methods (see vb2_io_modes enum) 409 409 * @dev: device to use for the default allocation context if the driver 410 410 * doesn't fill in the @alloc_devs array. 411 - * @dma_attrs: DMA attributes to use for the DMA. May be NULL. 411 + * @dma_attrs: DMA attributes to use for the DMA. 412 412 * @fileio_read_once: report EOF after reading the first buffer 413 413 * @fileio_write_immediately: queue buffer after each write() call 414 414 * @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver ··· 476 476 unsigned int type; 477 477 unsigned int io_modes; 478 478 struct device *dev; 479 - const struct dma_attrs *dma_attrs; 479 + unsigned long dma_attrs; 480 480 unsigned fileio_read_once:1; 481 481 unsigned fileio_write_immediately:1; 482 482 unsigned allow_zero_bytesused:1;
-2
include/media/videobuf2-dma-contig.h
··· 16 16 #include <media/videobuf2-v4l2.h> 17 17 #include <linux/dma-mapping.h> 18 18 19 - struct dma_attrs; 20 - 21 19 static inline dma_addr_t 22 20 vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no) 23 21 {
+9 -8
include/rdma/ib_verbs.h
··· 2819 2819 static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, 2820 2820 void *cpu_addr, size_t size, 2821 2821 enum dma_data_direction direction, 2822 - struct dma_attrs *attrs) 2822 + unsigned long dma_attrs) 2823 2823 { 2824 2824 return dma_map_single_attrs(dev->dma_device, cpu_addr, size, 2825 - direction, attrs); 2825 + direction, dma_attrs); 2826 2826 } 2827 2827 2828 2828 static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, 2829 2829 u64 addr, size_t size, 2830 2830 enum dma_data_direction direction, 2831 - struct dma_attrs *attrs) 2831 + unsigned long dma_attrs) 2832 2832 { 2833 2833 return dma_unmap_single_attrs(dev->dma_device, addr, size, 2834 - direction, attrs); 2834 + direction, dma_attrs); 2835 2835 } 2836 2836 2837 2837 /** ··· 2906 2906 static inline int ib_dma_map_sg_attrs(struct ib_device *dev, 2907 2907 struct scatterlist *sg, int nents, 2908 2908 enum dma_data_direction direction, 2909 - struct dma_attrs *attrs) 2909 + unsigned long dma_attrs) 2910 2910 { 2911 - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2911 + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, 2912 + dma_attrs); 2912 2913 } 2913 2914 2914 2915 static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, 2915 2916 struct scatterlist *sg, int nents, 2916 2917 enum dma_data_direction direction, 2917 - struct dma_attrs *attrs) 2918 + unsigned long dma_attrs) 2918 2919 { 2919 - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); 2920 + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); 2920 2921 } 2921 2922 /** 2922 2923 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
+6 -6
include/xen/swiotlb-xen.h
··· 9 9 extern void 10 10 *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, 11 11 dma_addr_t *dma_handle, gfp_t flags, 12 - struct dma_attrs *attrs); 12 + unsigned long attrs); 13 13 14 14 extern void 15 15 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, 16 16 void *vaddr, dma_addr_t dma_handle, 17 - struct dma_attrs *attrs); 17 + unsigned long attrs); 18 18 19 19 extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, 20 20 unsigned long offset, size_t size, 21 21 enum dma_data_direction dir, 22 - struct dma_attrs *attrs); 22 + unsigned long attrs); 23 23 24 24 extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 25 25 size_t size, enum dma_data_direction dir, 26 - struct dma_attrs *attrs); 26 + unsigned long attrs); 27 27 extern int 28 28 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 29 29 int nelems, enum dma_data_direction dir, 30 - struct dma_attrs *attrs); 30 + unsigned long attrs); 31 31 32 32 extern void 33 33 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 34 34 int nelems, enum dma_data_direction dir, 35 - struct dma_attrs *attrs); 35 + unsigned long attrs); 36 36 37 37 extern void 38 38 xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+5 -4
lib/dma-noop.c
··· 10 10 11 11 static void *dma_noop_alloc(struct device *dev, size_t size, 12 12 dma_addr_t *dma_handle, gfp_t gfp, 13 - struct dma_attrs *attrs) 13 + unsigned long attrs) 14 14 { 15 15 void *ret; 16 16 ··· 22 22 23 23 static void dma_noop_free(struct device *dev, size_t size, 24 24 void *cpu_addr, dma_addr_t dma_addr, 25 - struct dma_attrs *attrs) 25 + unsigned long attrs) 26 26 { 27 27 free_pages((unsigned long)cpu_addr, get_order(size)); 28 28 } ··· 30 30 static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, 31 31 unsigned long offset, size_t size, 32 32 enum dma_data_direction dir, 33 - struct dma_attrs *attrs) 33 + unsigned long attrs) 34 34 { 35 35 return page_to_phys(page) + offset; 36 36 } 37 37 38 38 static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 39 - enum dma_data_direction dir, struct dma_attrs *attrs) 39 + enum dma_data_direction dir, 40 + unsigned long attrs) 40 41 { 41 42 int i; 42 43 struct scatterlist *sg;
+7 -6
lib/swiotlb.c
··· 738 738 dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 739 739 unsigned long offset, size_t size, 740 740 enum dma_data_direction dir, 741 - struct dma_attrs *attrs) 741 + unsigned long attrs) 742 742 { 743 743 phys_addr_t map, phys = page_to_phys(page) + offset; 744 744 dma_addr_t dev_addr = phys_to_dma(dev, phys); ··· 807 807 808 808 void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 809 809 size_t size, enum dma_data_direction dir, 810 - struct dma_attrs *attrs) 810 + unsigned long attrs) 811 811 { 812 812 unmap_single(hwdev, dev_addr, size, dir); 813 813 } ··· 877 877 */ 878 878 int 879 879 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 880 - enum dma_data_direction dir, struct dma_attrs *attrs) 880 + enum dma_data_direction dir, unsigned long attrs) 881 881 { 882 882 struct scatterlist *sg; 883 883 int i; ··· 914 914 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 915 915 enum dma_data_direction dir) 916 916 { 917 - return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 917 + return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0); 918 918 } 919 919 EXPORT_SYMBOL(swiotlb_map_sg); 920 920 ··· 924 924 */ 925 925 void 926 926 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 927 - int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) 927 + int nelems, enum dma_data_direction dir, 928 + unsigned long attrs) 928 929 { 929 930 struct scatterlist *sg; 930 931 int i; ··· 942 941 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 943 942 enum dma_data_direction dir) 944 943 { 945 - return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 944 + return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0); 946 945 } 947 946 EXPORT_SYMBOL(swiotlb_unmap_sg); 948 947