Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull iommufd updates from Jason Gunthorpe:
"Two series:

- Reorganize how the hardware page table objects are managed,
particularly their destruction flow. Increase the selftest test
coverage in this area by creating a more complete mock iommu
driver.

This is preparation to add a replace operation for HWPT binding,
which is done but waiting for the VFIO parts to complete so there
is a user.

- Split the iommufd support for "access" to make it two step -
allocate an access then link it to an IOAS. Update VFIO and have
VFIO always create an access even for the VFIO mdevs that never do
DMA.

This is also preperation for the replace VFIO series that will
allow replace to work on access types as well.

Three minor fixes:

- Sykzaller found the selftest code didn't check for overflow when
processing user VAs

- smatch noted a .data item should have been static

- Add a selftest that reproduces a syzkaller bug for batch carry
already fixed in rc"

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd: (21 commits)
iommufd/selftest: Cover domain unmap with huge pages and access
iommufd/selftest: Set varaiable mock_iommu_device storage-class-specifier to static
vfio: Check the presence for iommufd callbacks in __vfio_register_dev()
vfio/mdev: Uses the vfio emulated iommufd ops set in the mdev sample drivers
vfio-iommufd: Make vfio_iommufd_emulated_bind() return iommufd_access ID
vfio-iommufd: No need to record iommufd_ctx in vfio_device
iommufd: Create access in vfio_iommufd_emulated_bind()
iommu/iommufd: Pass iommufd_ctx pointer in iommufd_get_ioas()
iommufd/selftest: Catch overflow of uptr and length
iommufd/selftest: Add a selftest for iommufd_device_attach() with a hwpt argument
iommufd/selftest: Make selftest create a more complete mock device
iommufd/selftest: Rename the remaining mock device_id's to stdev_id
iommufd/selftest: Rename domain_id to hwpt_id for FIXTURE iommufd_mock_domain
iommufd/selftest: Rename domain_id to stdev_id for FIXTURE iommufd_ioas
iommufd/selftest: Rename the sefltest 'device_id' to 'stdev_id'
iommufd: Make iommufd_hw_pagetable_alloc() do iopt_table_add_domain()
iommufd: Move iommufd_device to iommufd_private.h
iommufd: Move ioas related HWPT destruction into iommufd_hw_pagetable_destroy()
iommufd: Consistently manage hwpt_item
iommufd: Add iommufd_lock_obj() around the auto-domains hwpts
...

+494 -272
+80 -125
drivers/iommu/iommufd/device.c
··· 15 15 "Allow IOMMUFD to bind to devices even if the platform cannot isolate " 16 16 "the MSI interrupt window. Enabling this is a security weakness."); 17 17 18 - /* 19 - * A iommufd_device object represents the binding relationship between a 20 - * consuming driver and the iommufd. These objects are created/destroyed by 21 - * external drivers, not by userspace. 22 - */ 23 - struct iommufd_device { 24 - struct iommufd_object obj; 25 - struct iommufd_ctx *ictx; 26 - struct iommufd_hw_pagetable *hwpt; 27 - /* Head at iommufd_hw_pagetable::devices */ 28 - struct list_head devices_item; 29 - /* always the physical device */ 30 - struct device *dev; 31 - struct iommu_group *group; 32 - bool enforce_cache_coherency; 33 - }; 34 - 35 18 void iommufd_device_destroy(struct iommufd_object *obj) 36 19 { 37 20 struct iommufd_device *idev = ··· 22 39 23 40 iommu_device_release_dma_owner(idev->dev); 24 41 iommu_group_put(idev->group); 25 - iommufd_ctx_put(idev->ictx); 42 + if (!iommufd_selftest_is_mock_dev(idev->dev)) 43 + iommufd_ctx_put(idev->ictx); 26 44 } 27 45 28 46 /** ··· 70 86 goto out_release_owner; 71 87 } 72 88 idev->ictx = ictx; 73 - iommufd_ctx_get(ictx); 89 + if (!iommufd_selftest_is_mock_dev(dev)) 90 + iommufd_ctx_get(ictx); 74 91 idev->dev = dev; 75 92 idev->enforce_cache_coherency = 76 93 device_iommu_capable(dev, IOMMU_CAP_ENFORCE_CACHE_COHERENCY); ··· 153 168 * operation from the device (eg a simple DMA) cannot trigger an 154 169 * interrupt outside this iommufd context. 155 170 */ 156 - if (!iommu_group_has_isolated_msi(idev->group)) { 171 + if (!iommufd_selftest_is_mock_dev(idev->dev) && 172 + !iommu_group_has_isolated_msi(idev->group)) { 157 173 if (!allow_unsafe_interrupts) 158 174 return -EPERM; 159 175 ··· 172 186 { 173 187 struct iommufd_device *cur_dev; 174 188 189 + lockdep_assert_held(&hwpt->devices_lock); 190 + 175 191 list_for_each_entry(cur_dev, &hwpt->devices, devices_item) 176 192 if (cur_dev->group == group) 177 193 return true; 178 194 return false; 179 195 } 180 196 181 - static int iommufd_device_do_attach(struct iommufd_device *idev, 182 - struct iommufd_hw_pagetable *hwpt) 197 + int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, 198 + struct iommufd_device *idev) 183 199 { 184 200 phys_addr_t sw_msi_start = PHYS_ADDR_MAX; 185 201 int rc; 186 202 187 - mutex_lock(&hwpt->devices_lock); 203 + lockdep_assert_held(&hwpt->devices_lock); 204 + 205 + if (WARN_ON(idev->hwpt)) 206 + return -EINVAL; 188 207 189 208 /* 190 209 * Try to upgrade the domain we have, it is an iommu driver bug to ··· 204 213 hwpt->domain); 205 214 if (!hwpt->enforce_cache_coherency) { 206 215 WARN_ON(list_empty(&hwpt->devices)); 207 - rc = -EINVAL; 208 - goto out_unlock; 216 + return -EINVAL; 209 217 } 210 218 } 211 219 212 220 rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev, 213 221 idev->group, &sw_msi_start); 214 222 if (rc) 215 - goto out_unlock; 223 + return rc; 216 224 217 225 rc = iommufd_device_setup_msi(idev, hwpt, sw_msi_start); 218 226 if (rc) 219 - goto out_iova; 227 + goto err_unresv; 220 228 221 229 /* 222 230 * FIXME: Hack around missing a device-centric iommu api, only attach to ··· 224 234 if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) { 225 235 rc = iommu_attach_group(hwpt->domain, idev->group); 226 236 if (rc) 227 - goto out_iova; 228 - 229 - if (list_empty(&hwpt->devices)) { 230 - rc = iopt_table_add_domain(&hwpt->ioas->iopt, 231 - hwpt->domain); 232 - if (rc) 233 - goto out_detach; 234 - } 237 + goto err_unresv; 235 238 } 239 + return 0; 240 + err_unresv: 241 + iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); 242 + return rc; 243 + } 244 + 245 + void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt, 246 + struct iommufd_device *idev) 247 + { 248 + if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) 249 + iommu_detach_group(hwpt->domain, idev->group); 250 + iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); 251 + } 252 + 253 + static int iommufd_device_do_attach(struct iommufd_device *idev, 254 + struct iommufd_hw_pagetable *hwpt) 255 + { 256 + int rc; 257 + 258 + mutex_lock(&hwpt->devices_lock); 259 + rc = iommufd_hw_pagetable_attach(hwpt, idev); 260 + if (rc) 261 + goto out_unlock; 236 262 237 263 idev->hwpt = hwpt; 238 264 refcount_inc(&hwpt->obj.users); 239 265 list_add(&idev->devices_item, &hwpt->devices); 240 - mutex_unlock(&hwpt->devices_lock); 241 - return 0; 242 - 243 - out_detach: 244 - iommu_detach_group(hwpt->domain, idev->group); 245 - out_iova: 246 - iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); 247 266 out_unlock: 248 267 mutex_unlock(&hwpt->devices_lock); 249 268 return rc; ··· 279 280 if (!hwpt->auto_domain) 280 281 continue; 281 282 283 + if (!iommufd_lock_obj(&hwpt->obj)) 284 + continue; 282 285 rc = iommufd_device_do_attach(idev, hwpt); 286 + iommufd_put_object(&hwpt->obj); 283 287 284 288 /* 285 289 * -EINVAL means the domain is incompatible with the device. ··· 294 292 goto out_unlock; 295 293 } 296 294 297 - hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev->dev); 295 + hwpt = iommufd_hw_pagetable_alloc(idev->ictx, ioas, idev, true); 298 296 if (IS_ERR(hwpt)) { 299 297 rc = PTR_ERR(hwpt); 300 298 goto out_unlock; 301 299 } 302 300 hwpt->auto_domain = true; 303 301 304 - rc = iommufd_device_do_attach(idev, hwpt); 305 - if (rc) 306 - goto out_abort; 307 - list_add_tail(&hwpt->hwpt_item, &ioas->hwpt_list); 308 - 309 302 mutex_unlock(&ioas->mutex); 310 303 iommufd_object_finalize(idev->ictx, &hwpt->obj); 311 304 return 0; 312 - 313 - out_abort: 314 - iommufd_object_abort_and_destroy(idev->ictx, &hwpt->obj); 315 305 out_unlock: 316 306 mutex_unlock(&ioas->mutex); 317 307 return rc; ··· 375 381 { 376 382 struct iommufd_hw_pagetable *hwpt = idev->hwpt; 377 383 378 - mutex_lock(&hwpt->ioas->mutex); 379 384 mutex_lock(&hwpt->devices_lock); 380 385 list_del(&idev->devices_item); 381 - if (!iommufd_hw_pagetable_has_group(hwpt, idev->group)) { 382 - if (list_empty(&hwpt->devices)) { 383 - iopt_table_remove_domain(&hwpt->ioas->iopt, 384 - hwpt->domain); 385 - list_del(&hwpt->hwpt_item); 386 - } 387 - iommu_detach_group(hwpt->domain, idev->group); 388 - } 389 - iopt_remove_reserved_iova(&hwpt->ioas->iopt, idev->dev); 386 + idev->hwpt = NULL; 387 + iommufd_hw_pagetable_detach(hwpt, idev); 390 388 mutex_unlock(&hwpt->devices_lock); 391 - mutex_unlock(&hwpt->ioas->mutex); 392 389 393 390 if (hwpt->auto_domain) 394 391 iommufd_object_destroy_user(idev->ictx, &hwpt->obj); 395 392 else 396 393 refcount_dec(&hwpt->obj.users); 397 - 398 - idev->hwpt = NULL; 399 394 400 395 refcount_dec(&idev->obj.users); 401 396 } ··· 395 412 struct iommufd_access *access = 396 413 container_of(obj, struct iommufd_access, obj); 397 414 398 - iopt_remove_access(&access->ioas->iopt, access); 415 + if (access->ioas) { 416 + iopt_remove_access(&access->ioas->iopt, access); 417 + refcount_dec(&access->ioas->obj.users); 418 + access->ioas = NULL; 419 + } 399 420 iommufd_ctx_put(access->ictx); 400 - refcount_dec(&access->ioas->obj.users); 401 421 } 402 422 403 423 /** 404 424 * iommufd_access_create - Create an iommufd_access 405 425 * @ictx: iommufd file descriptor 406 - * @ioas_id: ID for a IOMMUFD_OBJ_IOAS 407 426 * @ops: Driver's ops to associate with the access 408 427 * @data: Opaque data to pass into ops functions 428 + * @id: Output ID number to return to userspace for this access 409 429 * 410 430 * An iommufd_access allows a driver to read/write to the IOAS without using 411 431 * DMA. The underlying CPU memory can be accessed using the ··· 417 431 * The provided ops are required to use iommufd_access_pin_pages(). 418 432 */ 419 433 struct iommufd_access * 420 - iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id, 421 - const struct iommufd_access_ops *ops, void *data) 434 + iommufd_access_create(struct iommufd_ctx *ictx, 435 + const struct iommufd_access_ops *ops, void *data, u32 *id) 422 436 { 423 437 struct iommufd_access *access; 424 - struct iommufd_object *obj; 425 - int rc; 426 438 427 439 /* 428 440 * There is no uAPI for the access object, but to keep things symmetric ··· 433 449 access->data = data; 434 450 access->ops = ops; 435 451 436 - obj = iommufd_get_object(ictx, ioas_id, IOMMUFD_OBJ_IOAS); 437 - if (IS_ERR(obj)) { 438 - rc = PTR_ERR(obj); 439 - goto out_abort; 440 - } 441 - access->ioas = container_of(obj, struct iommufd_ioas, obj); 442 - iommufd_ref_to_users(obj); 443 - 444 452 if (ops->needs_pin_pages) 445 453 access->iova_alignment = PAGE_SIZE; 446 454 else 447 455 access->iova_alignment = 1; 448 - rc = iopt_add_access(&access->ioas->iopt, access); 449 - if (rc) 450 - goto out_put_ioas; 451 456 452 457 /* The calling driver is a user until iommufd_access_destroy() */ 453 458 refcount_inc(&access->obj.users); 454 459 access->ictx = ictx; 455 460 iommufd_ctx_get(ictx); 456 461 iommufd_object_finalize(ictx, &access->obj); 462 + *id = access->obj.id; 457 463 return access; 458 - out_put_ioas: 459 - refcount_dec(&access->ioas->obj.users); 460 - out_abort: 461 - iommufd_object_abort(ictx, &access->obj); 462 - return ERR_PTR(rc); 463 464 } 464 465 EXPORT_SYMBOL_NS_GPL(iommufd_access_create, IOMMUFD); 465 466 ··· 462 493 WARN_ON(!was_destroyed); 463 494 } 464 495 EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD); 496 + 497 + int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id) 498 + { 499 + struct iommufd_ioas *new_ioas; 500 + int rc = 0; 501 + 502 + if (access->ioas) 503 + return -EINVAL; 504 + 505 + new_ioas = iommufd_get_ioas(access->ictx, ioas_id); 506 + if (IS_ERR(new_ioas)) 507 + return PTR_ERR(new_ioas); 508 + 509 + rc = iopt_add_access(&new_ioas->iopt, access); 510 + if (rc) { 511 + iommufd_put_object(&new_ioas->obj); 512 + return rc; 513 + } 514 + iommufd_ref_to_users(&new_ioas->obj); 515 + 516 + access->ioas = new_ioas; 517 + return 0; 518 + } 519 + EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD); 465 520 466 521 /** 467 522 * iommufd_access_notify_unmap - Notify users of an iopt to stop using it ··· 719 726 return rc; 720 727 } 721 728 EXPORT_SYMBOL_NS_GPL(iommufd_access_rw, IOMMUFD); 722 - 723 - #ifdef CONFIG_IOMMUFD_TEST 724 - /* 725 - * Creating a real iommufd_device is too hard, bypass creating a iommufd_device 726 - * and go directly to attaching a domain. 727 - */ 728 - struct iommufd_hw_pagetable * 729 - iommufd_device_selftest_attach(struct iommufd_ctx *ictx, 730 - struct iommufd_ioas *ioas, 731 - struct device *mock_dev) 732 - { 733 - struct iommufd_hw_pagetable *hwpt; 734 - int rc; 735 - 736 - hwpt = iommufd_hw_pagetable_alloc(ictx, ioas, mock_dev); 737 - if (IS_ERR(hwpt)) 738 - return hwpt; 739 - 740 - rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain); 741 - if (rc) 742 - goto out_hwpt; 743 - 744 - refcount_inc(&hwpt->obj.users); 745 - iommufd_object_finalize(ictx, &hwpt->obj); 746 - return hwpt; 747 - 748 - out_hwpt: 749 - iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 750 - return ERR_PTR(rc); 751 - } 752 - 753 - void iommufd_device_selftest_detach(struct iommufd_ctx *ictx, 754 - struct iommufd_hw_pagetable *hwpt) 755 - { 756 - iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain); 757 - refcount_dec(&hwpt->obj.users); 758 - } 759 - #endif
+59 -11
drivers/iommu/iommufd/hw_pagetable.c
··· 13 13 14 14 WARN_ON(!list_empty(&hwpt->devices)); 15 15 16 - iommu_domain_free(hwpt->domain); 16 + if (!list_empty(&hwpt->hwpt_item)) { 17 + mutex_lock(&hwpt->ioas->mutex); 18 + list_del(&hwpt->hwpt_item); 19 + mutex_unlock(&hwpt->ioas->mutex); 20 + 21 + iopt_table_remove_domain(&hwpt->ioas->iopt, hwpt->domain); 22 + } 23 + 24 + if (hwpt->domain) 25 + iommu_domain_free(hwpt->domain); 26 + 17 27 refcount_dec(&hwpt->ioas->obj.users); 18 28 mutex_destroy(&hwpt->devices_lock); 19 29 } ··· 32 22 * iommufd_hw_pagetable_alloc() - Get an iommu_domain for a device 33 23 * @ictx: iommufd context 34 24 * @ioas: IOAS to associate the domain with 35 - * @dev: Device to get an iommu_domain for 25 + * @idev: Device to get an iommu_domain for 26 + * @immediate_attach: True if idev should be attached to the hwpt 36 27 * 37 - * Allocate a new iommu_domain and return it as a hw_pagetable. 28 + * Allocate a new iommu_domain and return it as a hw_pagetable. The HWPT 29 + * will be linked to the given ioas and upon return the underlying iommu_domain 30 + * is fully popoulated. 38 31 */ 39 32 struct iommufd_hw_pagetable * 40 33 iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, 41 - struct device *dev) 34 + struct iommufd_device *idev, bool immediate_attach) 42 35 { 43 36 struct iommufd_hw_pagetable *hwpt; 44 37 int rc; 45 38 39 + lockdep_assert_held(&ioas->mutex); 40 + 46 41 hwpt = iommufd_object_alloc(ictx, hwpt, IOMMUFD_OBJ_HW_PAGETABLE); 47 42 if (IS_ERR(hwpt)) 48 43 return hwpt; 49 - 50 - hwpt->domain = iommu_domain_alloc(dev->bus); 51 - if (!hwpt->domain) { 52 - rc = -ENOMEM; 53 - goto out_abort; 54 - } 55 44 56 45 INIT_LIST_HEAD(&hwpt->devices); 57 46 INIT_LIST_HEAD(&hwpt->hwpt_item); ··· 58 49 /* Pairs with iommufd_hw_pagetable_destroy() */ 59 50 refcount_inc(&ioas->obj.users); 60 51 hwpt->ioas = ioas; 52 + 53 + hwpt->domain = iommu_domain_alloc(idev->dev->bus); 54 + if (!hwpt->domain) { 55 + rc = -ENOMEM; 56 + goto out_abort; 57 + } 58 + 59 + mutex_lock(&hwpt->devices_lock); 60 + 61 + /* 62 + * immediate_attach exists only to accommodate iommu drivers that cannot 63 + * directly allocate a domain. These drivers do not finish creating the 64 + * domain until attach is completed. Thus we must have this call 65 + * sequence. Once those drivers are fixed this should be removed. 66 + */ 67 + if (immediate_attach) { 68 + rc = iommufd_hw_pagetable_attach(hwpt, idev); 69 + if (rc) 70 + goto out_unlock; 71 + } 72 + 73 + rc = iopt_table_add_domain(&hwpt->ioas->iopt, hwpt->domain); 74 + if (rc) 75 + goto out_detach; 76 + list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list); 77 + 78 + if (immediate_attach) { 79 + /* See iommufd_device_do_attach() */ 80 + refcount_inc(&hwpt->obj.users); 81 + idev->hwpt = hwpt; 82 + list_add(&idev->devices_item, &hwpt->devices); 83 + } 84 + 85 + mutex_unlock(&hwpt->devices_lock); 61 86 return hwpt; 62 87 88 + out_detach: 89 + if (immediate_attach) 90 + iommufd_hw_pagetable_detach(hwpt, idev); 91 + out_unlock: 92 + mutex_unlock(&hwpt->devices_lock); 63 93 out_abort: 64 - iommufd_object_abort(ictx, &hwpt->obj); 94 + iommufd_object_abort_and_destroy(ictx, &hwpt->obj); 65 95 return ERR_PTR(rc); 66 96 }
+7 -7
drivers/iommu/iommufd/ioas.c
··· 71 71 if (cmd->__reserved) 72 72 return -EOPNOTSUPP; 73 73 74 - ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); 74 + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); 75 75 if (IS_ERR(ioas)) 76 76 return PTR_ERR(ioas); 77 77 ··· 151 151 if (cmd->__reserved) 152 152 return -EOPNOTSUPP; 153 153 154 - ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); 154 + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); 155 155 if (IS_ERR(ioas)) 156 156 return PTR_ERR(ioas); 157 157 iopt = &ioas->iopt; ··· 213 213 if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) 214 214 return -EOVERFLOW; 215 215 216 - ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); 216 + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); 217 217 if (IS_ERR(ioas)) 218 218 return PTR_ERR(ioas); 219 219 ··· 253 253 cmd->dst_iova >= ULONG_MAX) 254 254 return -EOVERFLOW; 255 255 256 - src_ioas = iommufd_get_ioas(ucmd, cmd->src_ioas_id); 256 + src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id); 257 257 if (IS_ERR(src_ioas)) 258 258 return PTR_ERR(src_ioas); 259 259 rc = iopt_get_pages(&src_ioas->iopt, cmd->src_iova, cmd->length, ··· 262 262 if (rc) 263 263 return rc; 264 264 265 - dst_ioas = iommufd_get_ioas(ucmd, cmd->dst_ioas_id); 265 + dst_ioas = iommufd_get_ioas(ucmd->ictx, cmd->dst_ioas_id); 266 266 if (IS_ERR(dst_ioas)) { 267 267 rc = PTR_ERR(dst_ioas); 268 268 goto out_pages; ··· 292 292 unsigned long unmapped = 0; 293 293 int rc; 294 294 295 - ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); 295 + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); 296 296 if (IS_ERR(ioas)) 297 297 return PTR_ERR(ioas); 298 298 ··· 381 381 if (cmd->__reserved) 382 382 return -EOPNOTSUPP; 383 383 384 - ioas = iommufd_get_ioas(ucmd, cmd->object_id); 384 + ioas = iommufd_get_ioas(ucmd->ictx, cmd->object_id); 385 385 if (IS_ERR(ioas)) 386 386 return PTR_ERR(ioas); 387 387
+30 -9
drivers/iommu/iommufd/iommufd_private.h
··· 12 12 struct iommu_domain; 13 13 struct iommu_group; 14 14 struct iommu_option; 15 + struct iommufd_device; 15 16 16 17 struct iommufd_ctx { 17 18 struct file *file; ··· 212 211 struct list_head hwpt_list; 213 212 }; 214 213 215 - static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ucmd *ucmd, 214 + static inline struct iommufd_ioas *iommufd_get_ioas(struct iommufd_ctx *ictx, 216 215 u32 id) 217 216 { 218 - return container_of(iommufd_get_object(ucmd->ictx, id, 217 + return container_of(iommufd_get_object(ictx, id, 219 218 IOMMUFD_OBJ_IOAS), 220 219 struct iommufd_ioas, obj); 221 220 } ··· 255 254 256 255 struct iommufd_hw_pagetable * 257 256 iommufd_hw_pagetable_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, 258 - struct device *dev); 257 + struct iommufd_device *idev, bool immediate_attach); 258 + int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, 259 + struct iommufd_device *idev); 260 + void iommufd_hw_pagetable_detach(struct iommufd_hw_pagetable *hwpt, 261 + struct iommufd_device *idev); 259 262 void iommufd_hw_pagetable_destroy(struct iommufd_object *obj); 263 + 264 + /* 265 + * A iommufd_device object represents the binding relationship between a 266 + * consuming driver and the iommufd. These objects are created/destroyed by 267 + * external drivers, not by userspace. 268 + */ 269 + struct iommufd_device { 270 + struct iommufd_object obj; 271 + struct iommufd_ctx *ictx; 272 + struct iommufd_hw_pagetable *hwpt; 273 + /* Head at iommufd_hw_pagetable::devices */ 274 + struct list_head devices_item; 275 + /* always the physical device */ 276 + struct device *dev; 277 + struct iommu_group *group; 278 + bool enforce_cache_coherency; 279 + }; 260 280 261 281 void iommufd_device_destroy(struct iommufd_object *obj); 262 282 ··· 297 275 void iommufd_access_destroy_object(struct iommufd_object *obj); 298 276 299 277 #ifdef CONFIG_IOMMUFD_TEST 300 - struct iommufd_hw_pagetable * 301 - iommufd_device_selftest_attach(struct iommufd_ctx *ictx, 302 - struct iommufd_ioas *ioas, 303 - struct device *mock_dev); 304 - void iommufd_device_selftest_detach(struct iommufd_ctx *ictx, 305 - struct iommufd_hw_pagetable *hwpt); 306 278 int iommufd_test(struct iommufd_ucmd *ucmd); 307 279 void iommufd_selftest_destroy(struct iommufd_object *obj); 308 280 extern size_t iommufd_test_memory_limit; ··· 305 289 bool iommufd_should_fail(void); 306 290 void __init iommufd_test_init(void); 307 291 void iommufd_test_exit(void); 292 + bool iommufd_selftest_is_mock_dev(struct device *dev); 308 293 #else 309 294 static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, 310 295 unsigned int ioas_id, ··· 321 304 } 322 305 static inline void iommufd_test_exit(void) 323 306 { 307 + } 308 + static inline bool iommufd_selftest_is_mock_dev(struct device *dev) 309 + { 310 + return false; 324 311 } 325 312 #endif 326 313 #endif
+1 -1
drivers/iommu/iommufd/iommufd_test.h
··· 49 49 __aligned_u64 length; 50 50 } add_reserved; 51 51 struct { 52 - __u32 out_device_id; 52 + __u32 out_stdev_id; 53 53 __u32 out_hwpt_id; 54 54 } mock_domain; 55 55 struct {
+186 -33
drivers/iommu/iommufd/selftest.c
··· 75 75 return; 76 76 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ; 77 77 78 - ioas = iommufd_get_ioas(ucmd, ioas_id); 78 + ioas = iommufd_get_ioas(ucmd->ictx, ioas_id); 79 79 if (IS_ERR(ioas)) 80 80 return; 81 81 *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova); ··· 91 91 TYPE_IDEV, 92 92 }; 93 93 94 + struct mock_dev { 95 + struct device dev; 96 + }; 97 + 94 98 struct selftest_obj { 95 99 struct iommufd_object obj; 96 100 enum selftest_obj_type type; 97 101 98 102 union { 99 103 struct { 100 - struct iommufd_hw_pagetable *hwpt; 104 + struct iommufd_device *idev; 101 105 struct iommufd_ctx *ictx; 102 - struct device mock_dev; 106 + struct mock_dev *mock_dev; 103 107 } idev; 104 108 }; 109 + }; 110 + 111 + static void mock_domain_blocking_free(struct iommu_domain *domain) 112 + { 113 + } 114 + 115 + static int mock_domain_nop_attach(struct iommu_domain *domain, 116 + struct device *dev) 117 + { 118 + return 0; 119 + } 120 + 121 + static const struct iommu_domain_ops mock_blocking_ops = { 122 + .free = mock_domain_blocking_free, 123 + .attach_dev = mock_domain_nop_attach, 124 + }; 125 + 126 + static struct iommu_domain mock_blocking_domain = { 127 + .type = IOMMU_DOMAIN_BLOCKED, 128 + .ops = &mock_blocking_ops, 105 129 }; 106 130 107 131 static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) 108 132 { 109 133 struct mock_iommu_domain *mock; 134 + 135 + if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED) 136 + return &mock_blocking_domain; 110 137 111 138 if (WARN_ON(iommu_domain_type != IOMMU_DOMAIN_UNMANAGED)) 112 139 return NULL; ··· 263 236 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE; 264 237 } 265 238 239 + static bool mock_domain_capable(struct device *dev, enum iommu_cap cap) 240 + { 241 + return cap == IOMMU_CAP_CACHE_COHERENCY; 242 + } 243 + 244 + static void mock_domain_set_plaform_dma_ops(struct device *dev) 245 + { 246 + /* 247 + * mock doesn't setup default domains because we can't hook into the 248 + * normal probe path 249 + */ 250 + } 251 + 266 252 static const struct iommu_ops mock_ops = { 267 253 .owner = THIS_MODULE, 268 254 .pgsize_bitmap = MOCK_IO_PAGE_SIZE, 269 255 .domain_alloc = mock_domain_alloc, 256 + .capable = mock_domain_capable, 257 + .set_platform_dma_ops = mock_domain_set_plaform_dma_ops, 270 258 .default_domain_ops = 271 259 &(struct iommu_domain_ops){ 272 260 .free = mock_domain_free, 261 + .attach_dev = mock_domain_nop_attach, 273 262 .map_pages = mock_domain_map_pages, 274 263 .unmap_pages = mock_domain_unmap_pages, 275 264 .iova_to_phys = mock_domain_iova_to_phys, 276 265 }, 266 + }; 267 + 268 + static struct iommu_device mock_iommu_device = { 269 + .ops = &mock_ops, 277 270 }; 278 271 279 272 static inline struct iommufd_hw_pagetable * ··· 316 269 return hwpt; 317 270 } 318 271 272 + static struct bus_type iommufd_mock_bus_type = { 273 + .name = "iommufd_mock", 274 + .iommu_ops = &mock_ops, 275 + }; 276 + 277 + static void mock_dev_release(struct device *dev) 278 + { 279 + struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); 280 + 281 + kfree(mdev); 282 + } 283 + 284 + static struct mock_dev *mock_dev_create(void) 285 + { 286 + struct iommu_group *iommu_group; 287 + struct dev_iommu *dev_iommu; 288 + struct mock_dev *mdev; 289 + int rc; 290 + 291 + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); 292 + if (!mdev) 293 + return ERR_PTR(-ENOMEM); 294 + 295 + device_initialize(&mdev->dev); 296 + mdev->dev.release = mock_dev_release; 297 + mdev->dev.bus = &iommufd_mock_bus_type; 298 + 299 + iommu_group = iommu_group_alloc(); 300 + if (IS_ERR(iommu_group)) { 301 + rc = PTR_ERR(iommu_group); 302 + goto err_put; 303 + } 304 + 305 + rc = dev_set_name(&mdev->dev, "iommufd_mock%u", 306 + iommu_group_id(iommu_group)); 307 + if (rc) 308 + goto err_group; 309 + 310 + /* 311 + * The iommu core has no way to associate a single device with an iommu 312 + * driver (heck currently it can't even support two iommu_drivers 313 + * registering). Hack it together with an open coded dev_iommu_get(). 314 + * Notice that the normal notifier triggered iommu release process also 315 + * does not work here because this bus is not in iommu_buses. 316 + */ 317 + mdev->dev.iommu = kzalloc(sizeof(*dev_iommu), GFP_KERNEL); 318 + if (!mdev->dev.iommu) { 319 + rc = -ENOMEM; 320 + goto err_group; 321 + } 322 + mutex_init(&mdev->dev.iommu->lock); 323 + mdev->dev.iommu->iommu_dev = &mock_iommu_device; 324 + 325 + rc = device_add(&mdev->dev); 326 + if (rc) 327 + goto err_dev_iommu; 328 + 329 + rc = iommu_group_add_device(iommu_group, &mdev->dev); 330 + if (rc) 331 + goto err_del; 332 + iommu_group_put(iommu_group); 333 + return mdev; 334 + 335 + err_del: 336 + device_del(&mdev->dev); 337 + err_dev_iommu: 338 + kfree(mdev->dev.iommu); 339 + mdev->dev.iommu = NULL; 340 + err_group: 341 + iommu_group_put(iommu_group); 342 + err_put: 343 + put_device(&mdev->dev); 344 + return ERR_PTR(rc); 345 + } 346 + 347 + static void mock_dev_destroy(struct mock_dev *mdev) 348 + { 349 + iommu_group_remove_device(&mdev->dev); 350 + device_del(&mdev->dev); 351 + kfree(mdev->dev.iommu); 352 + mdev->dev.iommu = NULL; 353 + put_device(&mdev->dev); 354 + } 355 + 356 + bool iommufd_selftest_is_mock_dev(struct device *dev) 357 + { 358 + return dev->release == mock_dev_release; 359 + } 360 + 319 361 /* Create an hw_pagetable with the mock domain so we can test the domain ops */ 320 362 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd, 321 363 struct iommu_test_cmd *cmd) 322 364 { 323 - static struct bus_type mock_bus = { .iommu_ops = &mock_ops }; 324 - struct iommufd_hw_pagetable *hwpt; 365 + struct iommufd_device *idev; 325 366 struct selftest_obj *sobj; 326 - struct iommufd_ioas *ioas; 367 + u32 pt_id = cmd->id; 368 + u32 idev_id; 327 369 int rc; 328 370 329 - ioas = iommufd_get_ioas(ucmd, cmd->id); 330 - if (IS_ERR(ioas)) 331 - return PTR_ERR(ioas); 332 - 333 371 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST); 334 - if (IS_ERR(sobj)) { 335 - rc = PTR_ERR(sobj); 336 - goto out_ioas; 337 - } 372 + if (IS_ERR(sobj)) 373 + return PTR_ERR(sobj); 374 + 338 375 sobj->idev.ictx = ucmd->ictx; 339 376 sobj->type = TYPE_IDEV; 340 - sobj->idev.mock_dev.bus = &mock_bus; 341 377 342 - hwpt = iommufd_device_selftest_attach(ucmd->ictx, ioas, 343 - &sobj->idev.mock_dev); 344 - if (IS_ERR(hwpt)) { 345 - rc = PTR_ERR(hwpt); 378 + sobj->idev.mock_dev = mock_dev_create(); 379 + if (IS_ERR(sobj->idev.mock_dev)) { 380 + rc = PTR_ERR(sobj->idev.mock_dev); 346 381 goto out_sobj; 347 382 } 348 - sobj->idev.hwpt = hwpt; 349 383 350 - /* Userspace must destroy both of these IDs to destroy the object */ 351 - cmd->mock_domain.out_hwpt_id = hwpt->obj.id; 352 - cmd->mock_domain.out_device_id = sobj->obj.id; 384 + idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev, 385 + &idev_id); 386 + if (IS_ERR(idev)) { 387 + rc = PTR_ERR(idev); 388 + goto out_mdev; 389 + } 390 + sobj->idev.idev = idev; 391 + 392 + rc = iommufd_device_attach(idev, &pt_id); 393 + if (rc) 394 + goto out_unbind; 395 + 396 + /* Userspace must destroy the device_id to destroy the object */ 397 + cmd->mock_domain.out_hwpt_id = pt_id; 398 + cmd->mock_domain.out_stdev_id = sobj->obj.id; 353 399 iommufd_object_finalize(ucmd->ictx, &sobj->obj); 354 - iommufd_put_object(&ioas->obj); 355 400 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 356 401 402 + out_unbind: 403 + iommufd_device_unbind(idev); 404 + out_mdev: 405 + mock_dev_destroy(sobj->idev.mock_dev); 357 406 out_sobj: 358 407 iommufd_object_abort(ucmd->ictx, &sobj->obj); 359 - out_ioas: 360 - iommufd_put_object(&ioas->obj); 361 408 return rc; 362 409 } 363 410 ··· 463 322 struct iommufd_ioas *ioas; 464 323 int rc; 465 324 466 - ioas = iommufd_get_ioas(ucmd, mockpt_id); 325 + ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id); 467 326 if (IS_ERR(ioas)) 468 327 return PTR_ERR(ioas); 469 328 down_write(&ioas->iopt.iova_rwsem); ··· 480 339 { 481 340 struct iommufd_hw_pagetable *hwpt; 482 341 struct mock_iommu_domain *mock; 342 + uintptr_t end; 483 343 int rc; 484 344 485 345 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE || 486 - (uintptr_t)uptr % MOCK_IO_PAGE_SIZE) 346 + (uintptr_t)uptr % MOCK_IO_PAGE_SIZE || 347 + check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 487 348 return -EINVAL; 488 349 489 350 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock); ··· 533 390 void __user *uptr, size_t length, 534 391 unsigned int refs) 535 392 { 536 - if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE) 393 + uintptr_t end; 394 + 395 + if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE || 396 + check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end)) 537 397 return -EINVAL; 538 398 539 399 for (; length; length -= PAGE_SIZE) { ··· 700 554 struct iommu_test_cmd *cmd = ucmd->cmd; 701 555 struct selftest_access *staccess; 702 556 struct iommufd_access *access; 557 + u32 id; 703 558 int fdno; 704 559 int rc; 705 560 ··· 718 571 } 719 572 720 573 access = iommufd_access_create( 721 - ucmd->ictx, ioas_id, 574 + ucmd->ictx, 722 575 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ? 723 576 &selftest_access_ops_pin : 724 577 &selftest_access_ops, 725 - staccess); 578 + staccess, &id); 726 579 if (IS_ERR(access)) { 727 580 rc = PTR_ERR(access); 728 581 goto out_put_fdno; 729 582 } 583 + rc = iommufd_access_attach(access, ioas_id); 584 + if (rc) 585 + goto out_destroy; 730 586 cmd->create_access.out_access_fd = fdno; 731 587 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 732 588 if (rc) ··· 930 780 931 781 switch (sobj->type) { 932 782 case TYPE_IDEV: 933 - iommufd_device_selftest_detach(sobj->idev.ictx, 934 - sobj->idev.hwpt); 783 + iommufd_device_detach(sobj->idev.idev); 784 + iommufd_device_unbind(sobj->idev.idev); 785 + mock_dev_destroy(sobj->idev.mock_dev); 935 786 break; 936 787 } 937 788 } ··· 996 845 { 997 846 dbgfs_root = 998 847 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd); 848 + WARN_ON(bus_register(&iommufd_mock_bus_type)); 999 849 } 1000 850 1001 851 void iommufd_test_exit(void) 1002 852 { 1003 853 debugfs_remove_recursive(dbgfs_root); 854 + bus_unregister(&iommufd_mock_bus_type); 1004 855 }
+1 -1
drivers/iommu/iommufd/vfio_compat.c
··· 137 137 return iommufd_ucmd_respond(ucmd, sizeof(*cmd)); 138 138 139 139 case IOMMU_VFIO_IOAS_SET: 140 - ioas = iommufd_get_ioas(ucmd, cmd->ioas_id); 140 + ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); 141 141 if (IS_ERR(ioas)) 142 142 return PTR_ERR(ioas); 143 143 xa_lock(&ucmd->ictx->objects);
+18 -19
drivers/vfio/iommufd.c
··· 32 32 return 0; 33 33 } 34 34 35 - /* 36 - * If the driver doesn't provide this op then it means the device does 37 - * not do DMA at all. So nothing to do. 38 - */ 39 - if (!vdev->ops->bind_iommufd) 40 - return 0; 41 - 42 35 ret = vdev->ops->bind_iommufd(vdev, ictx, &device_id); 43 36 if (ret) 44 37 return ret; ··· 112 119 /* 113 120 * The emulated standard ops mean that vfio_device is going to use the 114 121 * "mdev path" and will call vfio_pin_pages()/vfio_dma_rw(). Drivers using this 115 - * ops set should call vfio_register_emulated_iommu_dev(). 122 + * ops set should call vfio_register_emulated_iommu_dev(). Drivers that do 123 + * not call vfio_pin_pages()/vfio_dma_rw() have no need to provide dma_unmap. 116 124 */ 117 125 118 126 static void vfio_emulated_unmap(void *data, unsigned long iova, ··· 121 127 { 122 128 struct vfio_device *vdev = data; 123 129 124 - vdev->ops->dma_unmap(vdev, iova, length); 130 + if (vdev->ops->dma_unmap) 131 + vdev->ops->dma_unmap(vdev, iova, length); 125 132 } 126 133 127 134 static const struct iommufd_access_ops vfio_user_ops = { ··· 133 138 int vfio_iommufd_emulated_bind(struct vfio_device *vdev, 134 139 struct iommufd_ctx *ictx, u32 *out_device_id) 135 140 { 141 + struct iommufd_access *user; 142 + 136 143 lockdep_assert_held(&vdev->dev_set->lock); 137 144 138 - vdev->iommufd_ictx = ictx; 139 - iommufd_ctx_get(ictx); 145 + user = iommufd_access_create(ictx, &vfio_user_ops, vdev, out_device_id); 146 + if (IS_ERR(user)) 147 + return PTR_ERR(user); 148 + vdev->iommufd_access = user; 140 149 return 0; 141 150 } 142 151 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_bind); ··· 151 152 152 153 if (vdev->iommufd_access) { 153 154 iommufd_access_destroy(vdev->iommufd_access); 155 + vdev->iommufd_attached = false; 154 156 vdev->iommufd_access = NULL; 155 157 } 156 - iommufd_ctx_put(vdev->iommufd_ictx); 157 - vdev->iommufd_ictx = NULL; 158 158 } 159 159 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_unbind); 160 160 161 161 int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id) 162 162 { 163 - struct iommufd_access *user; 163 + int rc; 164 164 165 165 lockdep_assert_held(&vdev->dev_set->lock); 166 166 167 - user = iommufd_access_create(vdev->iommufd_ictx, *pt_id, &vfio_user_ops, 168 - vdev); 169 - if (IS_ERR(user)) 170 - return PTR_ERR(user); 171 - vdev->iommufd_access = user; 167 + if (vdev->iommufd_attached) 168 + return -EBUSY; 169 + rc = iommufd_access_attach(vdev->iommufd_access, *pt_id); 170 + if (rc) 171 + return rc; 172 + vdev->iommufd_attached = true; 172 173 return 0; 173 174 } 174 175 EXPORT_SYMBOL_GPL(vfio_iommufd_emulated_attach_ioas);
+3 -2
drivers/vfio/vfio_main.c
··· 255 255 { 256 256 int ret; 257 257 258 - if (WARN_ON(device->ops->bind_iommufd && 259 - (!device->ops->unbind_iommufd || 258 + if (WARN_ON(IS_ENABLED(CONFIG_IOMMUFD) && 259 + (!device->ops->bind_iommufd || 260 + !device->ops->unbind_iommufd || 260 261 !device->ops->attach_ioas))) 261 262 return -EINVAL; 262 263
+3 -2
include/linux/iommufd.h
··· 40 40 }; 41 41 42 42 struct iommufd_access * 43 - iommufd_access_create(struct iommufd_ctx *ictx, u32 ioas_id, 44 - const struct iommufd_access_ops *ops, void *data); 43 + iommufd_access_create(struct iommufd_ctx *ictx, 44 + const struct iommufd_access_ops *ops, void *data, u32 *id); 45 45 void iommufd_access_destroy(struct iommufd_access *access); 46 + int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id); 46 47 47 48 void iommufd_ctx_get(struct iommufd_ctx *ictx); 48 49
-1
include/linux/vfio.h
··· 60 60 void (*put_kvm)(struct kvm *kvm); 61 61 #if IS_ENABLED(CONFIG_IOMMUFD) 62 62 struct iommufd_device *iommufd_device; 63 - struct iommufd_ctx *iommufd_ictx; 64 63 bool iommufd_attached; 65 64 #endif 66 65 };
+3
samples/vfio-mdev/mbochs.c
··· 1374 1374 .write = mbochs_write, 1375 1375 .ioctl = mbochs_ioctl, 1376 1376 .mmap = mbochs_mmap, 1377 + .bind_iommufd = vfio_iommufd_emulated_bind, 1378 + .unbind_iommufd = vfio_iommufd_emulated_unbind, 1379 + .attach_ioas = vfio_iommufd_emulated_attach_ioas, 1377 1380 }; 1378 1381 1379 1382 static struct mdev_driver mbochs_driver = {
+3
samples/vfio-mdev/mdpy.c
··· 663 663 .write = mdpy_write, 664 664 .ioctl = mdpy_ioctl, 665 665 .mmap = mdpy_mmap, 666 + .bind_iommufd = vfio_iommufd_emulated_bind, 667 + .unbind_iommufd = vfio_iommufd_emulated_unbind, 668 + .attach_ioas = vfio_iommufd_emulated_attach_ioas, 666 669 }; 667 670 668 671 static struct mdev_driver mdpy_driver = {
+3
samples/vfio-mdev/mtty.c
··· 1269 1269 .read = mtty_read, 1270 1270 .write = mtty_write, 1271 1271 .ioctl = mtty_ioctl, 1272 + .bind_iommufd = vfio_iommufd_emulated_bind, 1273 + .unbind_iommufd = vfio_iommufd_emulated_unbind, 1274 + .attach_ioas = vfio_iommufd_emulated_attach_ioas, 1272 1275 }; 1273 1276 1274 1277 static struct mdev_driver mtty_driver = {
+74 -30
tools/testing/selftests/iommu/iommufd.c
··· 186 186 { 187 187 int fd; 188 188 uint32_t ioas_id; 189 - uint32_t domain_id; 189 + uint32_t stdev_id; 190 + uint32_t hwpt_id; 190 191 uint64_t base_iova; 191 192 }; 192 193 ··· 213 212 } 214 213 215 214 for (i = 0; i != variant->mock_domains; i++) { 216 - test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_id); 215 + test_cmd_mock_domain(self->ioas_id, &self->stdev_id, 216 + &self->hwpt_id); 217 217 self->base_iova = MOCK_APERTURE_START; 218 218 } 219 219 } ··· 251 249 252 250 TEST_F(iommufd_ioas, ioas_destroy) 253 251 { 254 - if (self->domain_id) { 255 - /* IOAS cannot be freed while a domain is on it */ 252 + if (self->stdev_id) { 253 + /* IOAS cannot be freed while a device has a HWPT using it */ 256 254 EXPECT_ERRNO(EBUSY, 257 255 _test_ioctl_destroy(self->fd, self->ioas_id)); 258 256 } else { ··· 261 259 } 262 260 } 263 261 262 + TEST_F(iommufd_ioas, hwpt_attach) 263 + { 264 + /* Create a device attached directly to a hwpt */ 265 + if (self->stdev_id) { 266 + test_cmd_mock_domain(self->hwpt_id, NULL, NULL); 267 + } else { 268 + test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL); 269 + } 270 + } 271 + 264 272 TEST_F(iommufd_ioas, ioas_area_destroy) 265 273 { 266 274 /* Adding an area does not change ability to destroy */ 267 275 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova); 268 - if (self->domain_id) 276 + if (self->stdev_id) 269 277 EXPECT_ERRNO(EBUSY, 270 278 _test_ioctl_destroy(self->fd, self->ioas_id)); 271 279 else ··· 394 382 for (i = 0; i != 10; i++) { 395 383 size_t length = PAGE_SIZE * (i + 1); 396 384 397 - if (self->domain_id) { 385 + if (self->stdev_id) { 398 386 test_ioctl_ioas_map(buffer, length, &iovas[i]); 399 387 } else { 400 388 test_ioctl_ioas_map((void *)(1UL << 31), length, ··· 430 418 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)); 431 419 432 420 /* Allocate from an allowed region */ 433 - if (self->domain_id) { 421 + if (self->stdev_id) { 434 422 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE; 435 423 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1; 436 424 } else { ··· 537 525 /* Range can be read */ 538 526 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)); 539 527 EXPECT_EQ(1, ranges_cmd.num_iovas); 540 - if (!self->domain_id) { 528 + if (!self->stdev_id) { 541 529 EXPECT_EQ(0, ranges[0].start); 542 530 EXPECT_EQ(SIZE_MAX, ranges[0].last); 543 531 EXPECT_EQ(1, ranges_cmd.out_iova_alignment); ··· 562 550 &test_cmd)); 563 551 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges); 564 552 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)); 565 - if (!self->domain_id) { 553 + if (!self->stdev_id) { 566 554 EXPECT_EQ(2, ranges_cmd.num_iovas); 567 555 EXPECT_EQ(0, ranges[0].start); 568 556 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last); ··· 577 565 /* Buffer too small */ 578 566 memset(ranges, 0, BUFFER_SIZE); 579 567 ranges_cmd.num_iovas = 1; 580 - if (!self->domain_id) { 568 + if (!self->stdev_id) { 581 569 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, 582 570 &ranges_cmd)); 583 571 EXPECT_EQ(2, ranges_cmd.num_iovas); ··· 592 580 } 593 581 EXPECT_EQ(0, ranges[1].start); 594 582 EXPECT_EQ(0, ranges[1].last); 583 + } 584 + 585 + TEST_F(iommufd_ioas, access_domain_destory) 586 + { 587 + struct iommu_test_cmd access_cmd = { 588 + .size = sizeof(access_cmd), 589 + .op = IOMMU_TEST_OP_ACCESS_PAGES, 590 + .access_pages = { .iova = self->base_iova + PAGE_SIZE, 591 + .length = PAGE_SIZE}, 592 + }; 593 + size_t buf_size = 2 * HUGEPAGE_SIZE; 594 + uint8_t *buf; 595 + 596 + buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, 597 + MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1, 598 + 0); 599 + ASSERT_NE(MAP_FAILED, buf); 600 + test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova); 601 + 602 + test_cmd_create_access(self->ioas_id, &access_cmd.id, 603 + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES); 604 + access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE; 605 + ASSERT_EQ(0, 606 + ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 607 + &access_cmd)); 608 + 609 + /* Causes a complicated unpin across a huge page boundary */ 610 + if (self->stdev_id) 611 + test_ioctl_destroy(self->stdev_id); 612 + 613 + test_cmd_destroy_access_pages( 614 + access_cmd.id, access_cmd.access_pages.out_access_pages_id); 615 + test_cmd_destroy_access(access_cmd.id); 616 + ASSERT_EQ(0, munmap(buf, buf_size)); 595 617 } 596 618 597 619 TEST_F(iommufd_ioas, access_pin) ··· 651 605 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES); 652 606 653 607 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) { 654 - uint32_t mock_device_id; 608 + uint32_t mock_stdev_id; 655 609 uint32_t mock_hwpt_id; 656 610 657 611 access_cmd.access_pages.length = npages * PAGE_SIZE; ··· 683 637 ASSERT_EQ(0, ioctl(self->fd, 684 638 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 685 639 &access_cmd)); 686 - test_cmd_mock_domain(self->ioas_id, &mock_device_id, 640 + test_cmd_mock_domain(self->ioas_id, &mock_stdev_id, 687 641 &mock_hwpt_id); 688 642 check_map_cmd.id = mock_hwpt_id; 689 643 ASSERT_EQ(0, ioctl(self->fd, 690 644 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), 691 645 &check_map_cmd)); 692 646 693 - test_ioctl_destroy(mock_device_id); 694 - test_ioctl_destroy(mock_hwpt_id); 647 + test_ioctl_destroy(mock_stdev_id); 695 648 test_cmd_destroy_access_pages( 696 649 access_cmd.id, 697 650 access_cmd.access_pages.out_access_pages_id); ··· 834 789 ASSERT_NE(-1, child); 835 790 ASSERT_EQ(child, waitpid(child, NULL, 0)); 836 791 837 - if (self->domain_id) { 792 + if (self->stdev_id) { 838 793 /* 839 794 * If a domain already existed then everything was pinned within 840 795 * the fork, so this copies from one domain to another. ··· 1033 988 { 1034 989 int fd; 1035 990 uint32_t ioas_id; 1036 - uint32_t domain_id; 1037 - uint32_t domain_ids[2]; 991 + uint32_t hwpt_id; 992 + uint32_t hwpt_ids[2]; 1038 993 int mmap_flags; 1039 994 size_t mmap_buf_size; 1040 995 }; ··· 1053 1008 ASSERT_NE(-1, self->fd); 1054 1009 test_ioctl_ioas_alloc(&self->ioas_id); 1055 1010 1056 - ASSERT_GE(ARRAY_SIZE(self->domain_ids), variant->mock_domains); 1011 + ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains); 1057 1012 1058 1013 for (i = 0; i != variant->mock_domains; i++) 1059 - test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_ids[i]); 1060 - self->domain_id = self->domain_ids[0]; 1014 + test_cmd_mock_domain(self->ioas_id, NULL, &self->hwpt_ids[i]); 1015 + self->hwpt_id = self->hwpt_ids[0]; 1061 1016 1062 1017 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS; 1063 1018 self->mmap_buf_size = PAGE_SIZE * 8; ··· 1106 1061 struct iommu_test_cmd check_map_cmd = { \ 1107 1062 .size = sizeof(check_map_cmd), \ 1108 1063 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \ 1109 - .id = self->domain_id, \ 1064 + .id = self->hwpt_id, \ 1110 1065 .check_map = { .iova = _iova, \ 1111 1066 .length = _length, \ 1112 1067 .uptr = (uintptr_t)(_ptr) }, \ ··· 1115 1070 ioctl(self->fd, \ 1116 1071 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \ 1117 1072 &check_map_cmd)); \ 1118 - if (self->domain_ids[1]) { \ 1119 - check_map_cmd.id = self->domain_ids[1]; \ 1073 + if (self->hwpt_ids[1]) { \ 1074 + check_map_cmd.id = self->hwpt_ids[1]; \ 1120 1075 ASSERT_EQ(0, \ 1121 1076 ioctl(self->fd, \ 1122 1077 _IOMMU_TEST_CMD( \ ··· 1242 1197 for (; end < buf_size; end += MOCK_PAGE_SIZE) { 1243 1198 size_t length = end - start; 1244 1199 unsigned int old_id; 1245 - uint32_t mock_device_id; 1200 + uint32_t mock_stdev_id; 1246 1201 __u64 iova; 1247 1202 1248 1203 test_ioctl_ioas_map(buf + start, length, &iova); 1249 1204 1250 1205 /* Add and destroy a domain while the area exists */ 1251 - old_id = self->domain_ids[1]; 1252 - test_cmd_mock_domain(self->ioas_id, &mock_device_id, 1253 - &self->domain_ids[1]); 1206 + old_id = self->hwpt_ids[1]; 1207 + test_cmd_mock_domain(self->ioas_id, &mock_stdev_id, 1208 + &self->hwpt_ids[1]); 1254 1209 1255 1210 check_mock_iova(buf + start, iova, length); 1256 1211 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE, ··· 1258 1213 start / PAGE_SIZE * PAGE_SIZE, 1259 1214 1); 1260 1215 1261 - test_ioctl_destroy(mock_device_id); 1262 - test_ioctl_destroy(self->domain_ids[1]); 1263 - self->domain_ids[1] = old_id; 1216 + test_ioctl_destroy(mock_stdev_id); 1217 + self->hwpt_ids[1] = old_id; 1264 1218 1265 1219 test_ioctl_ioas_unmap(iova, length); 1266 1220 }
+15 -23
tools/testing/selftests/iommu/iommufd_fail_nth.c
··· 297 297 TEST_FAIL_NTH(basic_fail_nth, map_domain) 298 298 { 299 299 uint32_t ioas_id; 300 - __u32 device_id; 300 + __u32 stdev_id; 301 301 __u32 hwpt_id; 302 302 __u64 iova; 303 303 ··· 313 313 314 314 fail_nth_enable(); 315 315 316 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 316 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) 317 317 return -1; 318 318 319 319 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, ··· 321 321 IOMMU_IOAS_MAP_READABLE)) 322 322 return -1; 323 323 324 - if (_test_ioctl_destroy(self->fd, device_id)) 325 - return -1; 326 - if (_test_ioctl_destroy(self->fd, hwpt_id)) 324 + if (_test_ioctl_destroy(self->fd, stdev_id)) 327 325 return -1; 328 326 329 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 327 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) 330 328 return -1; 331 329 return 0; 332 330 } ··· 332 334 TEST_FAIL_NTH(basic_fail_nth, map_two_domains) 333 335 { 334 336 uint32_t ioas_id; 335 - __u32 device_id2; 336 - __u32 device_id; 337 + __u32 stdev_id2; 338 + __u32 stdev_id; 337 339 __u32 hwpt_id2; 338 340 __u32 hwpt_id; 339 341 __u64 iova; ··· 348 350 if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 349 351 return -1; 350 352 351 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 353 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) 352 354 return -1; 353 355 354 356 fail_nth_enable(); 355 357 356 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2)) 358 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2)) 357 359 return -1; 358 360 359 361 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, ··· 361 363 IOMMU_IOAS_MAP_READABLE)) 362 364 return -1; 363 365 364 - if (_test_ioctl_destroy(self->fd, device_id)) 365 - return -1; 366 - if (_test_ioctl_destroy(self->fd, hwpt_id)) 366 + if (_test_ioctl_destroy(self->fd, stdev_id)) 367 367 return -1; 368 368 369 - if (_test_ioctl_destroy(self->fd, device_id2)) 370 - return -1; 371 - if (_test_ioctl_destroy(self->fd, hwpt_id2)) 369 + if (_test_ioctl_destroy(self->fd, stdev_id2)) 372 370 return -1; 373 371 374 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 372 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) 375 373 return -1; 376 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2)) 374 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id2, &hwpt_id2)) 377 375 return -1; 378 376 return 0; 379 377 } ··· 512 518 { 513 519 uint32_t access_pages_id; 514 520 uint32_t ioas_id; 515 - __u32 device_id; 521 + __u32 stdev_id; 516 522 __u32 hwpt_id; 517 523 __u64 iova; 518 524 ··· 526 532 if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 527 533 return -1; 528 534 529 - if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 535 + if (_test_cmd_mock_domain(self->fd, ioas_id, &stdev_id, &hwpt_id)) 530 536 return -1; 531 537 532 538 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova, ··· 564 570 return -1; 565 571 self->access_id = 0; 566 572 567 - if (_test_ioctl_destroy(self->fd, device_id)) 568 - return -1; 569 - if (_test_ioctl_destroy(self->fd, hwpt_id)) 573 + if (_test_ioctl_destroy(self->fd, stdev_id)) 570 574 return -1; 571 575 return 0; 572 576 }
+8 -8
tools/testing/selftests/iommu/iommufd_utils.h
··· 38 38 &test_cmd)); \ 39 39 }) 40 40 41 - static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id, 41 + static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *stdev_id, 42 42 __u32 *hwpt_id) 43 43 { 44 44 struct iommu_test_cmd cmd = { ··· 52 52 ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); 53 53 if (ret) 54 54 return ret; 55 - if (device_id) 56 - *device_id = cmd.mock_domain.out_device_id; 55 + if (stdev_id) 56 + *stdev_id = cmd.mock_domain.out_stdev_id; 57 57 assert(cmd.id != 0); 58 58 if (hwpt_id) 59 59 *hwpt_id = cmd.mock_domain.out_hwpt_id; 60 60 return 0; 61 61 } 62 - #define test_cmd_mock_domain(ioas_id, device_id, hwpt_id) \ 63 - ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, device_id, \ 64 - hwpt_id)) 65 - #define test_err_mock_domain(_errno, ioas_id, device_id, hwpt_id) \ 62 + #define test_cmd_mock_domain(ioas_id, stdev_id, hwpt_id) \ 63 + ASSERT_EQ(0, \ 64 + _test_cmd_mock_domain(self->fd, ioas_id, stdev_id, hwpt_id)) 65 + #define test_err_mock_domain(_errno, ioas_id, stdev_id, hwpt_id) \ 66 66 EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ 67 - device_id, hwpt_id)) 67 + stdev_id, hwpt_id)) 68 68 69 69 static int _test_cmd_create_access(int fd, unsigned int ioas_id, 70 70 __u32 *access_id, unsigned int flags)