Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vfio/type1: move iova increment to unmap_unpin_*() caller

Move incrementing iova to the caller of these functions as part of
preparing to handle end of address space map/unmap.

Tested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Fixes: 73fa0d10d077 ("vfio: Type1 IOMMU implementation")
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com>
Signed-off-by: Alex Mastro <amastro@fb.com>
Link: https://lore.kernel.org/r/20251028-fix-unmap-v6-2-2542b96bcc8e@fb.com
Signed-off-by: Alex Williamson <alex@shazbot.org>

authored by

Alex Mastro and committed by
Alex Williamson
1196f1f8 6012379e

+10 -10
+10 -10
drivers/vfio/vfio_iommu_type1.c
··· 1083 1083 #define VFIO_IOMMU_TLB_SYNC_MAX 512 1084 1084 1085 1085 static size_t unmap_unpin_fast(struct vfio_domain *domain, 1086 - struct vfio_dma *dma, dma_addr_t *iova, 1086 + struct vfio_dma *dma, dma_addr_t iova, 1087 1087 size_t len, phys_addr_t phys, long *unlocked, 1088 1088 struct list_head *unmapped_list, 1089 1089 int *unmapped_cnt, ··· 1093 1093 struct vfio_regions *entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1094 1094 1095 1095 if (entry) { 1096 - unmapped = iommu_unmap_fast(domain->domain, *iova, len, 1096 + unmapped = iommu_unmap_fast(domain->domain, iova, len, 1097 1097 iotlb_gather); 1098 1098 1099 1099 if (!unmapped) { 1100 1100 kfree(entry); 1101 1101 } else { 1102 - entry->iova = *iova; 1102 + entry->iova = iova; 1103 1103 entry->phys = phys; 1104 1104 entry->len = unmapped; 1105 1105 list_add_tail(&entry->list, unmapped_list); 1106 1106 1107 - *iova += unmapped; 1108 1107 (*unmapped_cnt)++; 1109 1108 } 1110 1109 } ··· 1122 1123 } 1123 1124 1124 1125 static size_t unmap_unpin_slow(struct vfio_domain *domain, 1125 - struct vfio_dma *dma, dma_addr_t *iova, 1126 + struct vfio_dma *dma, dma_addr_t iova, 1126 1127 size_t len, phys_addr_t phys, 1127 1128 long *unlocked) 1128 1129 { 1129 - size_t unmapped = iommu_unmap(domain->domain, *iova, len); 1130 + size_t unmapped = iommu_unmap(domain->domain, iova, len); 1130 1131 1131 1132 if (unmapped) { 1132 - *unlocked += vfio_unpin_pages_remote(dma, *iova, 1133 + *unlocked += vfio_unpin_pages_remote(dma, iova, 1133 1134 phys >> PAGE_SHIFT, 1134 1135 unmapped >> PAGE_SHIFT, 1135 1136 false); 1136 - *iova += unmapped; 1137 1137 cond_resched(); 1138 1138 } 1139 1139 return unmapped; ··· 1195 1197 * First, try to use fast unmap/unpin. In case of failure, 1196 1198 * switch to slow unmap/unpin path. 1197 1199 */ 1198 - unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys, 1200 + unmapped = unmap_unpin_fast(domain, dma, iova, len, phys, 1199 1201 &unlocked, &unmapped_region_list, 1200 1202 &unmapped_region_cnt, 1201 1203 &iotlb_gather); 1202 1204 if (!unmapped) { 1203 - unmapped = unmap_unpin_slow(domain, dma, &iova, len, 1205 + unmapped = unmap_unpin_slow(domain, dma, iova, len, 1204 1206 phys, &unlocked); 1205 1207 if (WARN_ON(!unmapped)) 1206 1208 break; 1207 1209 } 1210 + 1211 + iova += unmapped; 1208 1212 } 1209 1213 1210 1214 dma->iommu_mapped = false;