Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: Convert DMA map_page to map_phys interface

Perform mechanical conversion from .map_page to .map_phys callback.

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Link: https://lore.kernel.org/r/20251015-remove-map-page-v5-9-3bbfe3a25cdf@kernel.org

authored by

Leon Romanovsky and committed by
Marek Szyprowski
96ddf2ef e4e3fff6

+59 -59
+28 -26
drivers/parisc/ccio-dma.c
··· 517 517 * ccio_io_pdir_entry - Initialize an I/O Pdir. 518 518 * @pdir_ptr: A pointer into I/O Pdir. 519 519 * @sid: The Space Identifier. 520 - * @vba: The virtual address. 520 + * @pba: The physical address. 521 521 * @hints: The DMA Hint. 522 522 * 523 - * Given a virtual address (vba, arg2) and space id, (sid, arg1), 523 + * Given a physical address (pba, arg2) and space id, (sid, arg1), 524 524 * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir 525 525 * entry consists of 8 bytes as shown below (MSB == bit 0): 526 526 * ··· 543 543 * index are bits 12:19 of the value returned by LCI. 544 544 */ 545 545 static void 546 - ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba, 546 + ccio_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba, 547 547 unsigned long hints) 548 548 { 549 549 register unsigned long pa; ··· 557 557 ** "hints" parm includes the VALID bit! 558 558 ** "dep" clobbers the physical address offset bits as well. 559 559 */ 560 - pa = lpa(vba); 560 + pa = pba; 561 561 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints)); 562 562 ((u32 *)pdir_ptr)[1] = (u32) pa; 563 563 ··· 582 582 ** Grab virtual index [0:11] 583 583 ** Deposit virt_idx bits into I/O PDIR word 584 584 */ 585 - asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba)); 585 + asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba))); 586 586 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); 587 587 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); 588 588 ··· 704 704 /** 705 705 * ccio_map_single - Map an address range into the IOMMU. 706 706 * @dev: The PCI device. 707 - * @addr: The start address of the DMA region. 707 + * @addr: The physical address of the DMA region. 708 708 * @size: The length of the DMA region. 709 709 * @direction: The direction of the DMA transaction (to/from device). 710 710 * 711 711 * This function implements the pci_map_single function. 712 712 */ 713 713 static dma_addr_t 714 - ccio_map_single(struct device *dev, void *addr, size_t size, 714 + ccio_map_single(struct device *dev, phys_addr_t addr, size_t size, 715 715 enum dma_data_direction direction) 716 716 { 717 717 int idx; ··· 730 730 BUG_ON(size <= 0); 731 731 732 732 /* save offset bits */ 733 - offset = ((unsigned long) addr) & ~IOVP_MASK; 733 + offset = offset_in_page(addr); 734 734 735 735 /* round up to nearest IOVP_SIZE */ 736 736 size = ALIGN(size + offset, IOVP_SIZE); ··· 746 746 747 747 pdir_start = &(ioc->pdir_base[idx]); 748 748 749 - DBG_RUN("%s() %px -> %#lx size: %zu\n", 750 - __func__, addr, (long)(iovp | offset), size); 749 + DBG_RUN("%s() %pa -> %#lx size: %zu\n", 750 + __func__, &addr, (long)(iovp | offset), size); 751 751 752 752 /* If not cacheline aligned, force SAFE_DMA on the whole mess */ 753 - if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) 753 + if ((size % L1_CACHE_BYTES) || (addr % L1_CACHE_BYTES)) 754 754 hint |= HINT_SAFE_DMA; 755 755 756 756 while(size > 0) { 757 - ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint); 757 + ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, hint); 758 758 759 759 DBG_RUN(" pdir %p %08x%08x\n", 760 760 pdir_start, ··· 773 773 774 774 775 775 static dma_addr_t 776 - ccio_map_page(struct device *dev, struct page *page, unsigned long offset, 777 - size_t size, enum dma_data_direction direction, 778 - unsigned long attrs) 776 + ccio_map_phys(struct device *dev, phys_addr_t phys, size_t size, 777 + enum dma_data_direction direction, unsigned long attrs) 779 778 { 780 - return ccio_map_single(dev, page_address(page) + offset, size, 781 - direction); 779 + if (unlikely(attrs & DMA_ATTR_MMIO)) 780 + return DMA_MAPPING_ERROR; 781 + 782 + return ccio_map_single(dev, phys, size, direction); 782 783 } 783 784 784 785 785 786 /** 786 - * ccio_unmap_page - Unmap an address range from the IOMMU. 787 + * ccio_unmap_phys - Unmap an address range from the IOMMU. 787 788 * @dev: The PCI device. 788 789 * @iova: The start address of the DMA region. 789 790 * @size: The length of the DMA region. ··· 792 791 * @attrs: attributes 793 792 */ 794 793 static void 795 - ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size, 794 + ccio_unmap_phys(struct device *dev, dma_addr_t iova, size_t size, 796 795 enum dma_data_direction direction, unsigned long attrs) 797 796 { 798 797 struct ioc *ioc; ··· 854 853 855 854 if (ret) { 856 855 memset(ret, 0, size); 857 - *dma_handle = ccio_map_single(dev, ret, size, DMA_BIDIRECTIONAL); 856 + *dma_handle = ccio_map_single(dev, virt_to_phys(ret), size, 857 + DMA_BIDIRECTIONAL); 858 858 } 859 859 860 860 return ret; ··· 875 873 ccio_free(struct device *dev, size_t size, void *cpu_addr, 876 874 dma_addr_t dma_handle, unsigned long attrs) 877 875 { 878 - ccio_unmap_page(dev, dma_handle, size, 0, 0); 876 + ccio_unmap_phys(dev, dma_handle, size, 0, 0); 879 877 free_pages((unsigned long)cpu_addr, get_order(size)); 880 878 } 881 879 ··· 922 920 /* Fast path single entry scatterlists. */ 923 921 if (nents == 1) { 924 922 sg_dma_address(sglist) = ccio_map_single(dev, 925 - sg_virt(sglist), sglist->length, 923 + sg_phys(sglist), sglist->length, 926 924 direction); 927 925 sg_dma_len(sglist) = sglist->length; 928 926 return 1; ··· 1006 1004 #ifdef CCIO_COLLECT_STATS 1007 1005 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; 1008 1006 #endif 1009 - ccio_unmap_page(dev, sg_dma_address(sglist), 1007 + ccio_unmap_phys(dev, sg_dma_address(sglist), 1010 1008 sg_dma_len(sglist), direction, 0); 1011 1009 ++sglist; 1012 1010 nents--; ··· 1019 1017 .dma_supported = ccio_dma_supported, 1020 1018 .alloc = ccio_alloc, 1021 1019 .free = ccio_free, 1022 - .map_page = ccio_map_page, 1023 - .unmap_page = ccio_unmap_page, 1020 + .map_phys = ccio_map_phys, 1021 + .unmap_phys = ccio_unmap_phys, 1024 1022 .map_sg = ccio_map_sg, 1025 1023 .unmap_sg = ccio_unmap_sg, 1026 1024 .get_sgtable = dma_common_get_sgtable, ··· 1074 1072 ioc->msingle_calls, ioc->msingle_pages, 1075 1073 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); 1076 1074 1077 - /* KLUGE - unmap_sg calls unmap_page for each mapped page */ 1075 + /* KLUGE - unmap_sg calls unmap_phys for each mapped page */ 1078 1076 min = ioc->usingle_calls - ioc->usg_calls; 1079 1077 max = ioc->usingle_pages - ioc->usg_pages; 1080 1078 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
+5 -5
drivers/parisc/iommu-helpers.h
··· 14 14 static inline unsigned int 15 15 iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents, 16 16 unsigned long hint, 17 - void (*iommu_io_pdir_entry)(__le64 *, space_t, unsigned long, 17 + void (*iommu_io_pdir_entry)(__le64 *, space_t, phys_addr_t, 18 18 unsigned long)) 19 19 { 20 20 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ ··· 28 28 dma_sg--; 29 29 30 30 while (nents-- > 0) { 31 - unsigned long vaddr; 31 + phys_addr_t paddr; 32 32 long size; 33 33 34 34 DBG_RUN_SG(" %d : %08lx %p/%05x\n", nents, ··· 67 67 68 68 BUG_ON(pdirp == NULL); 69 69 70 - vaddr = (unsigned long)sg_virt(startsg); 70 + paddr = sg_phys(startsg); 71 71 sg_dma_len(dma_sg) += startsg->length; 72 72 size = startsg->length + dma_offset; 73 73 dma_offset = 0; ··· 76 76 #endif 77 77 do { 78 78 iommu_io_pdir_entry(pdirp, KERNEL_SPACE, 79 - vaddr, hint); 80 - vaddr += IOVP_SIZE; 79 + paddr, hint); 80 + paddr += IOVP_SIZE; 81 81 size -= IOVP_SIZE; 82 82 pdirp++; 83 83 } while(unlikely(size > 0));
+26 -28
drivers/parisc/sba_iommu.c
··· 532 532 * sba_io_pdir_entry - fill in one IO PDIR entry 533 533 * @pdir_ptr: pointer to IO PDIR entry 534 534 * @sid: process Space ID - currently only support KERNEL_SPACE 535 - * @vba: Virtual CPU address of buffer to map 535 + * @pba: Physical address of buffer to map 536 536 * @hint: DMA hint set to use for this mapping 537 537 * 538 538 * SBA Mapping Routine ··· 569 569 */ 570 570 571 571 static void 572 - sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba, 572 + sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, phys_addr_t pba, 573 573 unsigned long hint) 574 574 { 575 - u64 pa; /* physical address */ 576 575 register unsigned ci; /* coherent index */ 577 576 578 - pa = lpa(vba); 579 - pa &= IOVP_MASK; 577 + asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pba))); 578 + pba &= IOVP_MASK; 579 + pba |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ 580 580 581 - asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba)); 582 - pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */ 583 - 584 - pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 585 - *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */ 581 + pba |= SBA_PDIR_VALID_BIT; /* set "valid" bit */ 582 + *pdir_ptr = cpu_to_le64(pba); /* swap and store into I/O Pdir */ 586 583 587 584 /* 588 585 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set ··· 704 707 * See Documentation/core-api/dma-api-howto.rst 705 708 */ 706 709 static dma_addr_t 707 - sba_map_single(struct device *dev, void *addr, size_t size, 710 + sba_map_single(struct device *dev, phys_addr_t addr, size_t size, 708 711 enum dma_data_direction direction) 709 712 { 710 713 struct ioc *ioc; ··· 719 722 return DMA_MAPPING_ERROR; 720 723 721 724 /* save offset bits */ 722 - offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK; 725 + offset = offset_in_page(addr); 723 726 724 727 /* round up to nearest IOVP_SIZE */ 725 728 size = (size + offset + ~IOVP_MASK) & IOVP_MASK; ··· 736 739 pide = sba_alloc_range(ioc, dev, size); 737 740 iovp = (dma_addr_t) pide << IOVP_SHIFT; 738 741 739 - DBG_RUN("%s() 0x%p -> 0x%lx\n", 740 - __func__, addr, (long) iovp | offset); 742 + DBG_RUN("%s() 0x%pa -> 0x%lx\n", 743 + __func__, &addr, (long) iovp | offset); 741 744 742 745 pdir_start = &(ioc->pdir_base[pide]); 743 746 744 747 while (size > 0) { 745 - sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0); 748 + sba_io_pdir_entry(pdir_start, KERNEL_SPACE, addr, 0); 746 749 747 750 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n", 748 751 pdir_start, ··· 775 778 776 779 777 780 static dma_addr_t 778 - sba_map_page(struct device *dev, struct page *page, unsigned long offset, 779 - size_t size, enum dma_data_direction direction, 780 - unsigned long attrs) 781 + sba_map_phys(struct device *dev, phys_addr_t phys, size_t size, 782 + enum dma_data_direction direction, unsigned long attrs) 781 783 { 782 - return sba_map_single(dev, page_address(page) + offset, size, 783 - direction); 784 + if (unlikely(attrs & DMA_ATTR_MMIO)) 785 + return DMA_MAPPING_ERROR; 786 + 787 + return sba_map_single(dev, phys, size, direction); 784 788 } 785 789 786 790 787 791 /** 788 - * sba_unmap_page - unmap one IOVA and free resources 792 + * sba_unmap_phys - unmap one IOVA and free resources 789 793 * @dev: instance of PCI owned by the driver that's asking. 790 794 * @iova: IOVA of driver buffer previously mapped. 791 795 * @size: number of bytes mapped in driver buffer. ··· 796 798 * See Documentation/core-api/dma-api-howto.rst 797 799 */ 798 800 static void 799 - sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size, 801 + sba_unmap_phys(struct device *dev, dma_addr_t iova, size_t size, 800 802 enum dma_data_direction direction, unsigned long attrs) 801 803 { 802 804 struct ioc *ioc; ··· 891 893 892 894 if (ret) { 893 895 memset(ret, 0, size); 894 - *dma_handle = sba_map_single(hwdev, ret, size, 0); 896 + *dma_handle = sba_map_single(hwdev, virt_to_phys(ret), size, 0); 895 897 } 896 898 897 899 return ret; ··· 912 914 sba_free(struct device *hwdev, size_t size, void *vaddr, 913 915 dma_addr_t dma_handle, unsigned long attrs) 914 916 { 915 - sba_unmap_page(hwdev, dma_handle, size, 0, 0); 917 + sba_unmap_phys(hwdev, dma_handle, size, 0, 0); 916 918 free_pages((unsigned long) vaddr, get_order(size)); 917 919 } 918 920 ··· 960 962 961 963 /* Fast path single entry scatterlists. */ 962 964 if (nents == 1) { 963 - sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist), 965 + sg_dma_address(sglist) = sba_map_single(dev, sg_phys(sglist), 964 966 sglist->length, direction); 965 967 sg_dma_len(sglist) = sglist->length; 966 968 return 1; ··· 1059 1061 1060 1062 while (nents && sg_dma_len(sglist)) { 1061 1063 1062 - sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist), 1064 + sba_unmap_phys(dev, sg_dma_address(sglist), sg_dma_len(sglist), 1063 1065 direction, 0); 1064 1066 #ifdef SBA_COLLECT_STATS 1065 1067 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT; ··· 1083 1085 .dma_supported = sba_dma_supported, 1084 1086 .alloc = sba_alloc, 1085 1087 .free = sba_free, 1086 - .map_page = sba_map_page, 1087 - .unmap_page = sba_unmap_page, 1088 + .map_phys = sba_map_phys, 1089 + .unmap_phys = sba_unmap_phys, 1088 1090 .map_sg = sba_map_sg, 1089 1091 .unmap_sg = sba_unmap_sg, 1090 1092 .get_sgtable = dma_common_get_sgtable,