Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu/vt-d: Fix to convert mm pfn to dma pfn

For the case that VT-d page is smaller than mm page, converting dma pfn
should be handled in two cases which are for start pfn and for end pfn.
Currently the calculation of end dma pfn is incorrect and the result is
less than real page frame number which is causing the mapping of iova
always misses some page frames.

Rename the mm_to_dma_pfn() to mm_to_dma_pfn_start() and add a new helper
for converting end dma pfn named mm_to_dma_pfn_end().

Signed-off-by: Yanfei Xu <yanfei.xu@intel.com>
Link: https://lore.kernel.org/r/20230625082046.979742-1-yanfei.xu@intel.com
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>

authored by

Yanfei Xu and committed by
Joerg Roedel
fb5f50a4 8a3b8e63

+13 -9
+13 -9
drivers/iommu/intel/iommu.c
··· 113 113 114 114 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things 115 115 are never going to work. */ 116 - static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn) 116 + static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn) 117 117 { 118 118 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT); 119 119 } 120 + static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn) 121 + { 122 + return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1; 123 + } 120 124 static inline unsigned long page_to_dma_pfn(struct page *pg) 121 125 { 122 - return mm_to_dma_pfn(page_to_pfn(pg)); 126 + return mm_to_dma_pfn_start(page_to_pfn(pg)); 123 127 } 124 128 static inline unsigned long virt_to_dma_pfn(void *p) 125 129 { ··· 2407 2403 2408 2404 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 2409 2405 ret = iommu_domain_identity_map(si_domain, 2410 - mm_to_dma_pfn(start_pfn), 2411 - mm_to_dma_pfn(end_pfn)); 2406 + mm_to_dma_pfn_start(start_pfn), 2407 + mm_to_dma_pfn_end(end_pfn)); 2412 2408 if (ret) 2413 2409 return ret; 2414 2410 } ··· 2429 2425 continue; 2430 2426 2431 2427 ret = iommu_domain_identity_map(si_domain, 2432 - mm_to_dma_pfn(start >> PAGE_SHIFT), 2433 - mm_to_dma_pfn(end >> PAGE_SHIFT)); 2428 + mm_to_dma_pfn_start(start >> PAGE_SHIFT), 2429 + mm_to_dma_pfn_end(end >> PAGE_SHIFT)); 2434 2430 if (ret) 2435 2431 return ret; 2436 2432 } ··· 3553 3549 unsigned long val, void *v) 3554 3550 { 3555 3551 struct memory_notify *mhp = v; 3556 - unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn); 3557 - unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn + 3552 + unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn); 3553 + unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn + 3558 3554 mhp->nr_pages - 1); 3559 3555 3560 3556 switch (val) { ··· 4258 4254 unsigned long i; 4259 4255 4260 4256 nrpages = aligned_nrpages(gather->start, size); 4261 - start_pfn = mm_to_dma_pfn(iova_pfn); 4257 + start_pfn = mm_to_dma_pfn_start(iova_pfn); 4262 4258 4263 4259 xa_for_each(&dmar_domain->iommu_array, i, info) 4264 4260 iommu_flush_iotlb_psi(info->iommu, dmar_domain,