Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu: Add attach handle to struct iopf_group

Previously, the domain that a page fault targets is stored in an
iopf_group, which represents a minimal set of page faults. With the
introduction of attach handle, replace the domain with the handle
so that the fault handler can obtain more information as needed
when handling the faults.

iommu_report_device_fault() is currently used for SVA page faults,
which handles the page fault in an internal cycle. The domain is retrieved
with iommu_get_domain_for_dev_pasid() if the pasid in the fault message
is valid. This doesn't work in IOMMUFD case, where if the pasid table of
a device is wholly managed by user space, there is no domain attached to
the PASID of the device, and all page faults are forwarded through a
NESTING domain attaching to RID.

Add a static flag in iommu ops, which indicates if the IOMMU driver
supports user-managed PASID tables. In the iopf deliver path, if no
attach handle found for the iopf PASID, roll back to RID domain when
the IOMMU driver supports this capability.

iommu_get_domain_for_dev_pasid() is no longer used and can be removed.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Link: https://lore.kernel.org/r/20240702063444.105814-4-baolu.lu@linux.intel.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Lu Baolu and committed by
Will Deacon
06cdcc32 3e7f57d1

+42 -78
+34 -27
drivers/iommu/io-pgfault.c
··· 59 59 } 60 60 EXPORT_SYMBOL_GPL(iopf_free_group); 61 61 62 - static struct iommu_domain *get_domain_for_iopf(struct device *dev, 63 - struct iommu_fault *fault) 64 - { 65 - struct iommu_domain *domain; 66 - 67 - if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { 68 - domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0); 69 - if (IS_ERR(domain)) 70 - domain = NULL; 71 - } else { 72 - domain = iommu_get_domain_for_dev(dev); 73 - } 74 - 75 - if (!domain || !domain->iopf_handler) { 76 - dev_warn_ratelimited(dev, 77 - "iopf (pasid %d) without domain attached or handler installed\n", 78 - fault->prm.pasid); 79 - 80 - return NULL; 81 - } 82 - 83 - return domain; 84 - } 85 - 86 62 /* Non-last request of a group. Postpone until the last one. */ 87 63 static int report_partial_fault(struct iommu_fault_param *fault_param, 88 64 struct iommu_fault *fault) ··· 182 206 if (group == &abort_group) 183 207 goto err_abort; 184 208 185 - group->domain = get_domain_for_iopf(dev, fault); 186 - if (!group->domain) 209 + if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { 210 + group->attach_handle = iommu_attach_handle_get(dev->iommu_group, 211 + fault->prm.pasid, 212 + 0); 213 + if (IS_ERR(group->attach_handle)) { 214 + const struct iommu_ops *ops = dev_iommu_ops(dev); 215 + 216 + if (!ops->user_pasid_table) 217 + goto err_abort; 218 + 219 + /* 220 + * The iommu driver for this device supports user- 221 + * managed PASID table. Therefore page faults for 222 + * any PASID should go through the NESTING domain 223 + * attached to the device RID. 224 + */ 225 + group->attach_handle = 226 + iommu_attach_handle_get(dev->iommu_group, 227 + IOMMU_NO_PASID, 228 + IOMMU_DOMAIN_NESTED); 229 + if (IS_ERR(group->attach_handle)) 230 + goto err_abort; 231 + } 232 + } else { 233 + group->attach_handle = 234 + iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0); 235 + if (IS_ERR(group->attach_handle)) 236 + goto err_abort; 237 + } 238 + 239 + if (!group->attach_handle->domain->iopf_handler) 187 240 goto err_abort; 188 241 189 242 /* 190 243 * On success iopf_handler must call iopf_group_response() and 191 244 * iopf_free_group() 192 245 */ 193 - if (group->domain->iopf_handler(group)) 246 + if (group->attach_handle->domain->iopf_handler(group)) 194 247 goto err_abort; 195 248 196 249 return; 197 250 198 251 err_abort: 252 + dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n", 253 + fault->prm.pasid); 199 254 iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE); 200 255 if (group == &abort_group) 201 256 __iopf_free_group(group);
+2 -1
drivers/iommu/iommu-sva.c
··· 272 272 if (status != IOMMU_PAGE_RESP_SUCCESS) 273 273 break; 274 274 275 - status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm); 275 + status = iommu_sva_handle_mm(&iopf->fault, 276 + group->attach_handle->domain->mm); 276 277 } 277 278 278 279 iopf_group_response(group, status);
-39
drivers/iommu/iommu.c
··· 3421 3421 } 3422 3422 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid); 3423 3423 3424 - /* 3425 - * iommu_get_domain_for_dev_pasid() - Retrieve domain for @pasid of @dev 3426 - * @dev: the queried device 3427 - * @pasid: the pasid of the device 3428 - * @type: matched domain type, 0 for any match 3429 - * 3430 - * This is a variant of iommu_get_domain_for_dev(). It returns the existing 3431 - * domain attached to pasid of a device. Callers must hold a lock around this 3432 - * function, and both iommu_attach/detach_dev_pasid() whenever a domain of 3433 - * type is being manipulated. This API does not internally resolve races with 3434 - * attach/detach. 3435 - * 3436 - * Return: attached domain on success, NULL otherwise. 3437 - */ 3438 - struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev, 3439 - ioasid_t pasid, 3440 - unsigned int type) 3441 - { 3442 - /* Caller must be a probed driver on dev */ 3443 - struct iommu_group *group = dev->iommu_group; 3444 - struct iommu_attach_handle *handle; 3445 - struct iommu_domain *domain = NULL; 3446 - 3447 - if (!group) 3448 - return NULL; 3449 - 3450 - xa_lock(&group->pasid_array); 3451 - handle = xa_load(&group->pasid_array, pasid); 3452 - if (handle) 3453 - domain = handle->domain; 3454 - 3455 - if (type && domain && domain->type != type) 3456 - domain = NULL; 3457 - xa_unlock(&group->pasid_array); 3458 - 3459 - return domain; 3460 - } 3461 - EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid); 3462 - 3463 3424 ioasid_t iommu_alloc_global_pasid(struct device *dev) 3464 3425 { 3465 3426 int ret;
+6 -11
include/linux/iommu.h
··· 127 127 /* list node for iommu_fault_param::faults */ 128 128 struct list_head pending_node; 129 129 struct work_struct work; 130 - struct iommu_domain *domain; 130 + struct iommu_attach_handle *attach_handle; 131 131 /* The device's fault data parameter. */ 132 132 struct iommu_fault_param *fault_param; 133 133 }; ··· 547 547 * @default_domain: If not NULL this will always be set as the default domain. 548 548 * This should be an IDENTITY/BLOCKED/PLATFORM domain. 549 549 * Do not use in new drivers. 550 + * @user_pasid_table: IOMMU driver supports user-managed PASID table. There is 551 + * no user domain for each PASID and the I/O page faults are 552 + * forwarded through the user domain attached to the device 553 + * RID. 550 554 */ 551 555 struct iommu_ops { 552 556 bool (*capable)(struct device *dev, enum iommu_cap); ··· 594 590 struct iommu_domain *blocked_domain; 595 591 struct iommu_domain *release_domain; 596 592 struct iommu_domain *default_domain; 593 + u8 user_pasid_table:1; 597 594 }; 598 595 599 596 /** ··· 1069 1064 struct iommu_attach_handle *handle); 1070 1065 void iommu_detach_device_pasid(struct iommu_domain *domain, 1071 1066 struct device *dev, ioasid_t pasid); 1072 - struct iommu_domain * 1073 - iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1074 - unsigned int type); 1075 1067 ioasid_t iommu_alloc_global_pasid(struct device *dev); 1076 1068 void iommu_free_global_pasid(ioasid_t pasid); 1077 1069 #else /* CONFIG_IOMMU_API */ ··· 1408 1406 static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1409 1407 struct device *dev, ioasid_t pasid) 1410 1408 { 1411 - } 1412 - 1413 - static inline struct iommu_domain * 1414 - iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1415 - unsigned int type) 1416 - { 1417 - return NULL; 1418 1409 } 1419 1410 1420 1411 static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)