Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu-api: Remove iommu_{un}map_range functions

These functions are not longer used and can be removed
savely. There functionality is now provided by the
iommu_{un}map functions which are also capable of multiple
page sizes.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

+2 -92
-48
arch/x86/kernel/amd_iommu.c
··· 2506 2506 return ret; 2507 2507 } 2508 2508 2509 - static int amd_iommu_map_range(struct iommu_domain *dom, 2510 - unsigned long iova, phys_addr_t paddr, 2511 - size_t size, int iommu_prot) 2512 - { 2513 - struct protection_domain *domain = dom->priv; 2514 - unsigned long i, npages = iommu_num_pages(paddr, size, PAGE_SIZE); 2515 - int prot = 0; 2516 - int ret; 2517 - 2518 - if (iommu_prot & IOMMU_READ) 2519 - prot |= IOMMU_PROT_IR; 2520 - if (iommu_prot & IOMMU_WRITE) 2521 - prot |= IOMMU_PROT_IW; 2522 - 2523 - iova &= PAGE_MASK; 2524 - paddr &= PAGE_MASK; 2525 - 2526 - for (i = 0; i < npages; ++i) { 2527 - ret = iommu_map_page(domain, iova, paddr, prot, PAGE_SIZE); 2528 - if (ret) 2529 - return ret; 2530 - 2531 - iova += PAGE_SIZE; 2532 - paddr += PAGE_SIZE; 2533 - } 2534 - 2535 - return 0; 2536 - } 2537 - 2538 - static void amd_iommu_unmap_range(struct iommu_domain *dom, 2539 - unsigned long iova, size_t size) 2540 - { 2541 - 2542 - struct protection_domain *domain = dom->priv; 2543 - unsigned long i, npages = iommu_num_pages(iova, size, PAGE_SIZE); 2544 - 2545 - iova &= PAGE_MASK; 2546 - 2547 - for (i = 0; i < npages; ++i) { 2548 - iommu_unmap_page(domain, iova, PAGE_SIZE); 2549 - iova += PAGE_SIZE; 2550 - } 2551 - 2552 - iommu_flush_tlb_pde(domain); 2553 - } 2554 - 2555 2509 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 2556 2510 phys_addr_t paddr, int gfp_order, int iommu_prot) 2557 2511 { ··· 2570 2616 .detach_dev = amd_iommu_detach_device, 2571 2617 .map = amd_iommu_map, 2572 2618 .unmap = amd_iommu_unmap, 2573 - .map_range = amd_iommu_map_range, 2574 - .unmap_range = amd_iommu_unmap_range, 2575 2619 .iova_to_phys = amd_iommu_iova_to_phys, 2576 2620 .domain_has_cap = amd_iommu_domain_has_cap, 2577 2621 };
+2 -24
drivers/base/iommu.c
··· 80 80 } 81 81 EXPORT_SYMBOL_GPL(iommu_detach_device); 82 82 83 - int iommu_map_range(struct iommu_domain *domain, unsigned long iova, 84 - phys_addr_t paddr, size_t size, int prot) 85 - { 86 - return iommu_ops->map_range(domain, iova, paddr, size, prot); 87 - } 88 - EXPORT_SYMBOL_GPL(iommu_map_range); 89 - 90 - void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, 91 - size_t size) 92 - { 93 - iommu_ops->unmap_range(domain, iova, size); 94 - } 95 - EXPORT_SYMBOL_GPL(iommu_unmap_range); 96 - 97 83 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 98 84 unsigned long iova) 99 85 { ··· 105 119 106 120 BUG_ON((iova | paddr) & invalid_mask); 107 121 108 - if (iommu_ops->map) 109 - return iommu_ops->map(domain, iova, paddr, gfp_order, prot); 110 - 111 - return iommu_ops->map_range(domain, iova, paddr, size, prot); 122 + return iommu_ops->map(domain, iova, paddr, gfp_order, prot); 112 123 } 113 124 EXPORT_SYMBOL_GPL(iommu_map); 114 125 ··· 119 136 120 137 BUG_ON(iova & invalid_mask); 121 138 122 - if (iommu_ops->unmap) 123 - return iommu_ops->unmap(domain, iova, gfp_order); 124 - 125 - iommu_ops->unmap_range(domain, iova, size); 126 - 127 - return gfp_order; 139 + return iommu_ops->unmap(domain, iova, gfp_order); 128 140 } 129 141 EXPORT_SYMBOL_GPL(iommu_unmap);
-20
include/linux/iommu.h
··· 40 40 phys_addr_t paddr, int gfp_order, int prot); 41 41 int (*unmap)(struct iommu_domain *domain, unsigned long iova, 42 42 int gfp_order); 43 - int (*map_range)(struct iommu_domain *domain, unsigned long iova, 44 - phys_addr_t paddr, size_t size, int prot); 45 - void (*unmap_range)(struct iommu_domain *domain, unsigned long iova, 46 - size_t size); 47 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 48 44 unsigned long iova); 49 45 int (*domain_has_cap)(struct iommu_domain *domain, ··· 56 60 struct device *dev); 57 61 extern void iommu_detach_device(struct iommu_domain *domain, 58 62 struct device *dev); 59 - extern int iommu_map_range(struct iommu_domain *domain, unsigned long iova, 60 - phys_addr_t paddr, size_t size, int prot); 61 - extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, 62 - size_t size); 63 63 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 64 64 phys_addr_t paddr, int gfp_order, int prot); 65 65 extern int iommu_unmap(struct iommu_domain *domain, unsigned long iova, ··· 93 101 94 102 static inline void iommu_detach_device(struct iommu_domain *domain, 95 103 struct device *dev) 96 - { 97 - } 98 - 99 - static inline int iommu_map_range(struct iommu_domain *domain, 100 - unsigned long iova, phys_addr_t paddr, 101 - size_t size, int prot) 102 - { 103 - return -ENODEV; 104 - } 105 - 106 - static inline void iommu_unmap_range(struct iommu_domain *domain, 107 - unsigned long iova, size_t size) 108 104 { 109 105 } 110 106