Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu/vt-d: Drain PRQs when domain removed from RID

As this iommu driver now supports page faults for requests without
PASID, page requests should be drained when a domain is removed from
the RID2PASID entry.

This results in the intel_iommu_drain_pasid_prq() call being moved to
intel_pasid_tear_down_entry(). This indicates that when a translation
is removed from any PASID entry and the PRI has been enabled on the
device, page requests are drained in the domain detachment path.

The intel_iommu_drain_pasid_prq() helper has been modified to support
sending device TLB invalidation requests for both PASID and non-PASID
cases.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Yi Liu <yi.l.liu@intel.com>
Link: https://lore.kernel.org/r/20241101045543.70086-1-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>

authored by

Lu Baolu and committed by
Joerg Roedel
c43e1ccd 9baed1c2

+10 -18
-1
drivers/iommu/intel/iommu.c
··· 4067 4067 intel_iommu_debugfs_remove_dev_pasid(dev_pasid); 4068 4068 kfree(dev_pasid); 4069 4069 intel_pasid_tear_down_entry(iommu, dev, pasid, false); 4070 - intel_iommu_drain_pasid_prq(dev, pasid); 4071 4070 } 4072 4071 4073 4072 static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+1
drivers/iommu/intel/pasid.c
··· 265 265 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 266 266 267 267 devtlb_invalidation_with_pasid(iommu, dev, pasid); 268 + intel_iommu_drain_pasid_prq(dev, pasid); 268 269 } 269 270 270 271 /*
+9 -17
drivers/iommu/intel/prq.c
··· 63 63 struct dmar_domain *domain; 64 64 struct intel_iommu *iommu; 65 65 struct qi_desc desc[3]; 66 - struct pci_dev *pdev; 67 66 int head, tail; 68 67 u16 sid, did; 69 - int qdep; 70 68 71 69 info = dev_iommu_priv_get(dev); 72 - if (WARN_ON(!info || !dev_is_pci(dev))) 73 - return; 74 - 75 70 if (!info->pri_enabled) 76 71 return; 77 72 78 73 iommu = info->iommu; 79 74 domain = info->domain; 80 - pdev = to_pci_dev(dev); 81 75 sid = PCI_DEVID(info->bus, info->devfn); 82 76 did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID; 83 - 84 - qdep = pci_ats_queue_depth(pdev); 85 77 86 78 /* 87 79 * Check and wait until all pending page requests in the queue are ··· 106 114 desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) | 107 115 QI_IWD_FENCE | 108 116 QI_IWD_TYPE; 109 - desc[1].qw0 = QI_EIOTLB_PASID(pasid) | 110 - QI_EIOTLB_DID(did) | 111 - QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | 112 - QI_EIOTLB_TYPE; 113 - desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) | 114 - QI_DEV_EIOTLB_SID(sid) | 115 - QI_DEV_EIOTLB_QDEP(qdep) | 116 - QI_DEIOTLB_TYPE | 117 - QI_DEV_IOTLB_PFSID(info->pfsid); 117 + if (pasid == IOMMU_NO_PASID) { 118 + qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]); 119 + qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0, 120 + MAX_AGAW_PFN_WIDTH, &desc[2]); 121 + } else { 122 + qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]); 123 + qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep, 124 + 0, MAX_AGAW_PFN_WIDTH, &desc[2]); 125 + } 118 126 qi_retry: 119 127 reinit_completion(&iommu->prq_complete); 120 128 qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);