Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Convert to physical address DMA mapping

Adapt PowerPC DMA to use physical addresses in order to prepare code
to removal .map_page and .unmap_page.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-10-3bbfe3a25cdf@kernel.org

authored by

Leon Romanovsky and committed by
Marek Szyprowski
a10d648d 96ddf2ef

+60 -53
+4 -4
arch/powerpc/include/asm/iommu.h
··· 274 274 unsigned long mask, gfp_t flag, int node); 275 275 extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 276 276 void *vaddr, dma_addr_t dma_handle); 277 - extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 278 - struct page *page, unsigned long offset, 279 - size_t size, unsigned long mask, 277 + extern dma_addr_t iommu_map_phys(struct device *dev, struct iommu_table *tbl, 278 + phys_addr_t phys, size_t size, 279 + unsigned long mask, 280 280 enum dma_data_direction direction, 281 281 unsigned long attrs); 282 - extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 282 + extern void iommu_unmap_phys(struct iommu_table *tbl, dma_addr_t dma_handle, 283 283 size_t size, enum dma_data_direction direction, 284 284 unsigned long attrs); 285 285
+10 -12
arch/powerpc/kernel/dma-iommu.c
··· 93 93 94 94 /* Creates TCEs for a user provided buffer. The user buffer must be 95 95 * contiguous real kernel storage (not vmalloc). The address passed here 96 - * comprises a page address and offset into that page. The dma_addr_t 97 - * returned will point to the same byte within the page as was passed in. 96 + * is a physical address to that page. The dma_addr_t returned will point 97 + * to the same byte within the page as was passed in. 98 98 */ 99 - static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page, 100 - unsigned long offset, size_t size, 99 + static dma_addr_t dma_iommu_map_phys(struct device *dev, phys_addr_t phys, 100 + size_t size, 101 101 enum dma_data_direction direction, 102 102 unsigned long attrs) 103 103 { 104 - return iommu_map_page(dev, get_iommu_table_base(dev), page, offset, 105 - size, dma_get_mask(dev), direction, attrs); 104 + return iommu_map_phys(dev, get_iommu_table_base(dev), phys, size, 105 + dma_get_mask(dev), direction, attrs); 106 106 } 107 107 108 - 109 - static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 108 + static void dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_handle, 110 109 size_t size, enum dma_data_direction direction, 111 110 unsigned long attrs) 112 111 { 113 - iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction, 112 + iommu_unmap_phys(get_iommu_table_base(dev), dma_handle, size, direction, 114 113 attrs); 115 114 } 116 - 117 115 118 116 static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 119 117 int nelems, enum dma_data_direction direction, ··· 209 211 .map_sg = dma_iommu_map_sg, 210 212 .unmap_sg = dma_iommu_unmap_sg, 211 213 .dma_supported = dma_iommu_dma_supported, 212 - .map_page = dma_iommu_map_page, 213 - .unmap_page = dma_iommu_unmap_page, 214 + .map_phys = dma_iommu_map_phys, 215 + .unmap_phys = dma_iommu_unmap_phys, 214 216 .get_required_mask = dma_iommu_get_required_mask, 215 217 .mmap = dma_common_mmap, 216 218 .get_sgtable = dma_common_get_sgtable,
+7 -7
arch/powerpc/kernel/iommu.c
··· 848 848 849 849 /* Creates TCEs for a user provided buffer. The user buffer must be 850 850 * contiguous real kernel storage (not vmalloc). The address passed here 851 - * comprises a page address and offset into that page. The dma_addr_t 852 - * returned will point to the same byte within the page as was passed in. 851 + * is physical address into that page. The dma_addr_t returned will point 852 + * to the same byte within the page as was passed in. 853 853 */ 854 - dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 855 - struct page *page, unsigned long offset, size_t size, 856 - unsigned long mask, enum dma_data_direction direction, 854 + dma_addr_t iommu_map_phys(struct device *dev, struct iommu_table *tbl, 855 + phys_addr_t phys, size_t size, unsigned long mask, 856 + enum dma_data_direction direction, 857 857 unsigned long attrs) 858 858 { 859 859 dma_addr_t dma_handle = DMA_MAPPING_ERROR; ··· 863 863 864 864 BUG_ON(direction == DMA_NONE); 865 865 866 - vaddr = page_address(page) + offset; 866 + vaddr = phys_to_virt(phys); 867 867 uaddr = (unsigned long)vaddr; 868 868 869 869 if (tbl) { ··· 890 890 return dma_handle; 891 891 } 892 892 893 - void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 893 + void iommu_unmap_phys(struct iommu_table *tbl, dma_addr_t dma_handle, 894 894 size_t size, enum dma_data_direction direction, 895 895 unsigned long attrs) 896 896 {
+19 -14
arch/powerpc/platforms/ps3/system-bus.c
··· 551 551 552 552 /* Creates TCEs for a user provided buffer. The user buffer must be 553 553 * contiguous real kernel storage (not vmalloc). The address passed here 554 - * comprises a page address and offset into that page. The dma_addr_t 555 - * returned will point to the same byte within the page as was passed in. 554 + * is physical address to that hat page. The dma_addr_t returned will point 555 + * to the same byte within the page as was passed in. 556 556 */ 557 557 558 - static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page, 559 - unsigned long offset, size_t size, enum dma_data_direction direction, 560 - unsigned long attrs) 558 + static dma_addr_t ps3_sb_map_phys(struct device *_dev, phys_addr_t phys, 559 + size_t size, enum dma_data_direction direction, unsigned long attrs) 561 560 { 562 561 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); 563 562 int result; 564 563 dma_addr_t bus_addr; 565 - void *ptr = page_address(page) + offset; 564 + void *ptr = phys_to_virt(phys); 565 + 566 + if (unlikely(attrs & DMA_ATTR_MMIO)) 567 + return DMA_MAPPING_ERROR; 566 568 567 569 result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size, 568 570 &bus_addr, ··· 579 577 return bus_addr; 580 578 } 581 579 582 - static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page, 583 - unsigned long offset, size_t size, 580 + static dma_addr_t ps3_ioc0_map_phys(struct device *_dev, phys_addr_t phys, 581 + size_t size, 584 582 enum dma_data_direction direction, 585 583 unsigned long attrs) 586 584 { ··· 588 586 int result; 589 587 dma_addr_t bus_addr; 590 588 u64 iopte_flag; 591 - void *ptr = page_address(page) + offset; 589 + void *ptr = phys_to_virt(phys); 590 + 591 + if (unlikely(attrs & DMA_ATTR_MMIO)) 592 + return DMA_MAPPING_ERROR; 592 593 593 594 iopte_flag = CBE_IOPTE_M; 594 595 switch (direction) { ··· 618 613 return bus_addr; 619 614 } 620 615 621 - static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr, 616 + static void ps3_unmap_phys(struct device *_dev, dma_addr_t dma_addr, 622 617 size_t size, enum dma_data_direction direction, unsigned long attrs) 623 618 { 624 619 struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev); ··· 695 690 .map_sg = ps3_sb_map_sg, 696 691 .unmap_sg = ps3_sb_unmap_sg, 697 692 .dma_supported = ps3_dma_supported, 698 - .map_page = ps3_sb_map_page, 699 - .unmap_page = ps3_unmap_page, 693 + .map_phys = ps3_sb_map_phys, 694 + .unmap_phys = ps3_unmap_phys, 700 695 .mmap = dma_common_mmap, 701 696 .get_sgtable = dma_common_get_sgtable, 702 697 .alloc_pages_op = dma_common_alloc_pages, ··· 709 704 .map_sg = ps3_ioc0_map_sg, 710 705 .unmap_sg = ps3_ioc0_unmap_sg, 711 706 .dma_supported = ps3_dma_supported, 712 - .map_page = ps3_ioc0_map_page, 713 - .unmap_page = ps3_unmap_page, 707 + .map_phys = ps3_ioc0_map_phys, 708 + .unmap_phys = ps3_unmap_phys, 714 709 .mmap = dma_common_mmap, 715 710 .get_sgtable = dma_common_get_sgtable, 716 711 .alloc_pages_op = dma_common_alloc_pages,
+8 -7
arch/powerpc/platforms/pseries/ibmebus.c
··· 86 86 kfree(vaddr); 87 87 } 88 88 89 - static dma_addr_t ibmebus_map_page(struct device *dev, 90 - struct page *page, 91 - unsigned long offset, 89 + static dma_addr_t ibmebus_map_phys(struct device *dev, phys_addr_t phys, 92 90 size_t size, 93 91 enum dma_data_direction direction, 94 92 unsigned long attrs) 95 93 { 96 - return (dma_addr_t)(page_address(page) + offset); 94 + if (attrs & DMA_ATTR_MMIO) 95 + return DMA_MAPPING_ERROR; 96 + 97 + return (dma_addr_t)(phys_to_virt(phys)); 97 98 } 98 99 99 - static void ibmebus_unmap_page(struct device *dev, 100 + static void ibmebus_unmap_phys(struct device *dev, 100 101 dma_addr_t dma_addr, 101 102 size_t size, 102 103 enum dma_data_direction direction, ··· 147 146 .unmap_sg = ibmebus_unmap_sg, 148 147 .dma_supported = ibmebus_dma_supported, 149 148 .get_required_mask = ibmebus_dma_get_required_mask, 150 - .map_page = ibmebus_map_page, 151 - .unmap_page = ibmebus_unmap_page, 149 + .map_phys = ibmebus_map_phys, 150 + .unmap_phys = ibmebus_unmap_phys, 152 151 }; 153 152 154 153 static int ibmebus_match_path(struct device *dev, const void *data)
+12 -9
arch/powerpc/platforms/pseries/vio.c
··· 512 512 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE)); 513 513 } 514 514 515 - static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page, 516 - unsigned long offset, size_t size, 517 - enum dma_data_direction direction, 518 - unsigned long attrs) 515 + static dma_addr_t vio_dma_iommu_map_phys(struct device *dev, phys_addr_t phys, 516 + size_t size, 517 + enum dma_data_direction direction, 518 + unsigned long attrs) 519 519 { 520 520 struct vio_dev *viodev = to_vio_dev(dev); 521 521 struct iommu_table *tbl = get_iommu_table_base(dev); 522 522 dma_addr_t ret = DMA_MAPPING_ERROR; 523 523 524 + if (unlikely(attrs & DMA_ATTR_MMIO)) 525 + return ret; 526 + 524 527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) 525 528 goto out_fail; 526 - ret = iommu_map_page(dev, tbl, page, offset, size, dma_get_mask(dev), 529 + ret = iommu_map_phys(dev, tbl, phys, size, dma_get_mask(dev), 527 530 direction, attrs); 528 531 if (unlikely(ret == DMA_MAPPING_ERROR)) 529 532 goto out_deallocate; ··· 539 536 return DMA_MAPPING_ERROR; 540 537 } 541 538 542 - static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle, 539 + static void vio_dma_iommu_unmap_phys(struct device *dev, dma_addr_t dma_handle, 543 540 size_t size, 544 541 enum dma_data_direction direction, 545 542 unsigned long attrs) ··· 547 544 struct vio_dev *viodev = to_vio_dev(dev); 548 545 struct iommu_table *tbl = get_iommu_table_base(dev); 549 546 550 - iommu_unmap_page(tbl, dma_handle, size, direction, attrs); 547 + iommu_unmap_phys(tbl, dma_handle, size, direction, attrs); 551 548 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl))); 552 549 } 553 550 ··· 608 605 .free = vio_dma_iommu_free_coherent, 609 606 .map_sg = vio_dma_iommu_map_sg, 610 607 .unmap_sg = vio_dma_iommu_unmap_sg, 611 - .map_page = vio_dma_iommu_map_page, 612 - .unmap_page = vio_dma_iommu_unmap_page, 608 + .map_phys = vio_dma_iommu_map_phys, 609 + .unmap_phys = vio_dma_iommu_unmap_phys, 613 610 .dma_supported = dma_iommu_dma_supported, 614 611 .get_required_mask = dma_iommu_get_required_mask, 615 612 .mmap = dma_common_mmap,