Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

PowerPC: adapt for dma_map_ops changes

Adapt core PowerPC architecture code for dma_map_ops changes: replace
alloc/free_coherent with generic alloc/free methods.

Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
[added missing changes to arch/powerpc/kernel/vio.c]
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>

authored by

Andrzej Pietrasiewicz and committed by
Marek Szyprowski
bfbf7d61 e8d51e54

+60 -41
+16 -8
arch/powerpc/include/asm/dma-mapping.h
··· 22 22 23 23 /* Some dma direct funcs must be visible for use in other dma_ops */ 24 24 extern void *dma_direct_alloc_coherent(struct device *dev, size_t size, 25 - dma_addr_t *dma_handle, gfp_t flag); 25 + dma_addr_t *dma_handle, gfp_t flag, 26 + struct dma_attrs *attrs); 26 27 extern void dma_direct_free_coherent(struct device *dev, size_t size, 27 - void *vaddr, dma_addr_t dma_handle); 28 + void *vaddr, dma_addr_t dma_handle, 29 + struct dma_attrs *attrs); 28 30 29 31 30 32 #ifdef CONFIG_NOT_COHERENT_CACHE ··· 132 130 133 131 extern int dma_set_mask(struct device *dev, u64 dma_mask); 134 132 135 - static inline void *dma_alloc_coherent(struct device *dev, size_t size, 136 - dma_addr_t *dma_handle, gfp_t flag) 133 + #define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) 134 + 135 + static inline void *dma_alloc_attrs(struct device *dev, size_t size, 136 + dma_addr_t *dma_handle, gfp_t flag, 137 + struct dma_attrs *attrs) 137 138 { 138 139 struct dma_map_ops *dma_ops = get_dma_ops(dev); 139 140 void *cpu_addr; 140 141 141 142 BUG_ON(!dma_ops); 142 143 143 - cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag); 144 + cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs); 144 145 145 146 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); 146 147 147 148 return cpu_addr; 148 149 } 149 150 150 - static inline void dma_free_coherent(struct device *dev, size_t size, 151 - void *cpu_addr, dma_addr_t dma_handle) 151 + #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) 152 + 153 + static inline void dma_free_attrs(struct device *dev, size_t size, 154 + void *cpu_addr, dma_addr_t dma_handle, 155 + struct dma_attrs *attrs) 152 156 { 153 157 struct dma_map_ops *dma_ops = get_dma_ops(dev); 154 158 ··· 162 154 163 155 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); 164 156 165 - dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 157 + dma_ops->free(dev, size, cpu_addr, dma_handle, attrs); 166 158 } 167 159 168 160 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+6 -4
arch/powerpc/kernel/dma-iommu.c
··· 17 17 * to the dma address (mapping) of the first page. 18 18 */ 19 19 static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t flag) 20 + dma_addr_t *dma_handle, gfp_t flag, 21 + struct dma_attrs *attrs) 21 22 { 22 23 return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, 23 24 dma_handle, dev->coherent_dma_mask, flag, ··· 26 25 } 27 26 28 27 static void dma_iommu_free_coherent(struct device *dev, size_t size, 29 - void *vaddr, dma_addr_t dma_handle) 28 + void *vaddr, dma_addr_t dma_handle, 29 + struct dma_attrs *attrs) 30 30 { 31 31 iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle); 32 32 } ··· 107 105 } 108 106 109 107 struct dma_map_ops dma_iommu_ops = { 110 - .alloc_coherent = dma_iommu_alloc_coherent, 111 - .free_coherent = dma_iommu_free_coherent, 108 + .alloc = dma_iommu_alloc_coherent, 109 + .free = dma_iommu_free_coherent, 112 110 .map_sg = dma_iommu_map_sg, 113 111 .unmap_sg = dma_iommu_unmap_sg, 114 112 .dma_supported = dma_iommu_dma_supported,
+2 -2
arch/powerpc/kernel/dma-swiotlb.c
··· 47 47 * for everything else. 48 48 */ 49 49 struct dma_map_ops swiotlb_dma_ops = { 50 - .alloc_coherent = dma_direct_alloc_coherent, 51 - .free_coherent = dma_direct_free_coherent, 50 + .alloc = dma_direct_alloc_coherent, 51 + .free = dma_direct_free_coherent, 52 52 .map_sg = swiotlb_map_sg_attrs, 53 53 .unmap_sg = swiotlb_unmap_sg_attrs, 54 54 .dma_supported = swiotlb_dma_supported,
+6 -4
arch/powerpc/kernel/dma.c
··· 26 26 27 27 28 28 void *dma_direct_alloc_coherent(struct device *dev, size_t size, 29 - dma_addr_t *dma_handle, gfp_t flag) 29 + dma_addr_t *dma_handle, gfp_t flag, 30 + struct dma_attrs *attrs) 30 31 { 31 32 void *ret; 32 33 #ifdef CONFIG_NOT_COHERENT_CACHE ··· 55 54 } 56 55 57 56 void dma_direct_free_coherent(struct device *dev, size_t size, 58 - void *vaddr, dma_addr_t dma_handle) 57 + void *vaddr, dma_addr_t dma_handle, 58 + struct dma_attrs *attrs) 59 59 { 60 60 #ifdef CONFIG_NOT_COHERENT_CACHE 61 61 __dma_free_coherent(size, vaddr); ··· 152 150 #endif 153 151 154 152 struct dma_map_ops dma_direct_ops = { 155 - .alloc_coherent = dma_direct_alloc_coherent, 156 - .free_coherent = dma_direct_free_coherent, 153 + .alloc = dma_direct_alloc_coherent, 154 + .free = dma_direct_free_coherent, 157 155 .map_sg = dma_direct_map_sg, 158 156 .unmap_sg = dma_direct_unmap_sg, 159 157 .dma_supported = dma_direct_dma_supported,
+6 -4
arch/powerpc/kernel/ibmebus.c
··· 65 65 static void *ibmebus_alloc_coherent(struct device *dev, 66 66 size_t size, 67 67 dma_addr_t *dma_handle, 68 - gfp_t flag) 68 + gfp_t flag, 69 + struct dma_attrs *attrs) 69 70 { 70 71 void *mem; 71 72 ··· 78 77 79 78 static void ibmebus_free_coherent(struct device *dev, 80 79 size_t size, void *vaddr, 81 - dma_addr_t dma_handle) 80 + dma_addr_t dma_handle, 81 + struct dma_attrs *attrs) 82 82 { 83 83 kfree(vaddr); 84 84 } ··· 138 136 } 139 137 140 138 static struct dma_map_ops ibmebus_dma_ops = { 141 - .alloc_coherent = ibmebus_alloc_coherent, 142 - .free_coherent = ibmebus_free_coherent, 139 + .alloc = ibmebus_alloc_coherent, 140 + .free = ibmebus_free_coherent, 143 141 .map_sg = ibmebus_map_sg, 144 142 .unmap_sg = ibmebus_unmap_sg, 145 143 .dma_supported = ibmebus_dma_supported,
+8 -6
arch/powerpc/kernel/vio.c
··· 487 487 } 488 488 489 489 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size, 490 - dma_addr_t *dma_handle, gfp_t flag) 490 + dma_addr_t *dma_handle, gfp_t flag, 491 + struct dma_attrs *attrs) 491 492 { 492 493 struct vio_dev *viodev = to_vio_dev(dev); 493 494 void *ret; ··· 498 497 return NULL; 499 498 } 500 499 501 - ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag); 500 + ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs); 502 501 if (unlikely(ret == NULL)) { 503 502 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 504 503 atomic_inc(&viodev->cmo.allocs_failed); ··· 508 507 } 509 508 510 509 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size, 511 - void *vaddr, dma_addr_t dma_handle) 510 + void *vaddr, dma_addr_t dma_handle, 511 + struct dma_attrs *attrs) 512 512 { 513 513 struct vio_dev *viodev = to_vio_dev(dev); 514 514 515 - dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle); 515 + dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs); 516 516 517 517 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 518 518 } ··· 614 612 } 615 613 616 614 struct dma_map_ops vio_dma_mapping_ops = { 617 - .alloc_coherent = vio_dma_iommu_alloc_coherent, 618 - .free_coherent = vio_dma_iommu_free_coherent, 615 + .alloc = vio_dma_iommu_alloc_coherent, 616 + .free = vio_dma_iommu_free_coherent, 619 617 .map_sg = vio_dma_iommu_map_sg, 620 618 .unmap_sg = vio_dma_iommu_unmap_sg, 621 619 .map_page = vio_dma_iommu_map_page,
+9 -7
arch/powerpc/platforms/cell/iommu.c
··· 564 564 /* A coherent allocation implies strong ordering */ 565 565 566 566 static void *dma_fixed_alloc_coherent(struct device *dev, size_t size, 567 - dma_addr_t *dma_handle, gfp_t flag) 567 + dma_addr_t *dma_handle, gfp_t flag, 568 + struct dma_attrs *attrs) 568 569 { 569 570 if (iommu_fixed_is_weak) 570 571 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev), ··· 573 572 device_to_mask(dev), flag, 574 573 dev_to_node(dev)); 575 574 else 576 - return dma_direct_ops.alloc_coherent(dev, size, dma_handle, 577 - flag); 575 + return dma_direct_ops.alloc(dev, size, dma_handle, flag, 576 + attrs); 578 577 } 579 578 580 579 static void dma_fixed_free_coherent(struct device *dev, size_t size, 581 - void *vaddr, dma_addr_t dma_handle) 580 + void *vaddr, dma_addr_t dma_handle, 581 + struct dma_attrs *attrs) 582 582 { 583 583 if (iommu_fixed_is_weak) 584 584 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr, 585 585 dma_handle); 586 586 else 587 - dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle); 587 + dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs); 588 588 } 589 589 590 590 static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page, ··· 644 642 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); 645 643 646 644 struct dma_map_ops dma_iommu_fixed_ops = { 647 - .alloc_coherent = dma_fixed_alloc_coherent, 648 - .free_coherent = dma_fixed_free_coherent, 645 + .alloc = dma_fixed_alloc_coherent, 646 + .free = dma_fixed_free_coherent, 649 647 .map_sg = dma_fixed_map_sg, 650 648 .unmap_sg = dma_fixed_unmap_sg, 651 649 .dma_supported = dma_fixed_dma_supported,
+7 -6
arch/powerpc/platforms/ps3/system-bus.c
··· 515 515 * to the dma address (mapping) of the first page. 516 516 */ 517 517 static void * ps3_alloc_coherent(struct device *_dev, size_t size, 518 - dma_addr_t *dma_handle, gfp_t flag) 518 + dma_addr_t *dma_handle, gfp_t flag, 519 + struct dma_attrs *attrs) 519 520 { 520 521 int result; 521 522 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); ··· 553 552 } 554 553 555 554 static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr, 556 - dma_addr_t dma_handle) 555 + dma_addr_t dma_handle, struct dma_attrs *attrs) 557 556 { 558 557 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 559 558 ··· 702 701 } 703 702 704 703 static struct dma_map_ops ps3_sb_dma_ops = { 705 - .alloc_coherent = ps3_alloc_coherent, 706 - .free_coherent = ps3_free_coherent, 704 + .alloc = ps3_alloc_coherent, 705 + .free = ps3_free_coherent, 707 706 .map_sg = ps3_sb_map_sg, 708 707 .unmap_sg = ps3_sb_unmap_sg, 709 708 .dma_supported = ps3_dma_supported, ··· 713 712 }; 714 713 715 714 static struct dma_map_ops ps3_ioc0_dma_ops = { 716 - .alloc_coherent = ps3_alloc_coherent, 717 - .free_coherent = ps3_free_coherent, 715 + .alloc = ps3_alloc_coherent, 716 + .free = ps3_free_coherent, 718 717 .map_sg = ps3_ioc0_map_sg, 719 718 .unmap_sg = ps3_ioc0_unmap_sg, 720 719 .dma_supported = ps3_dma_supported,