Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd

Pull more iommufd updates from Jason Gunthorpe:
"Change the driver callback op domain_alloc_user() into two ops:
domain_alloc_paging_flags() and domain_alloc_nesting() that better
describe what the ops are expected to do.

There will be per-driver cleanup based on this going into the next
cycle via the driver trees"

* tag 'for-linus-iommufd' of git://git.kernel.org/pub/scm/linux/kernel/git/jgg/iommufd:
iommu: Rename ops->domain_alloc_user() to domain_alloc_paging_flags()
iommu: Add ops->domain_alloc_nested()

+57 -55
+4 -5
drivers/iommu/amd/iommu.c
··· 2407 2407 } 2408 2408 2409 2409 static struct iommu_domain * 2410 - amd_iommu_domain_alloc_user(struct device *dev, u32 flags, 2411 - struct iommu_domain *parent, 2412 - const struct iommu_user_data *user_data) 2410 + amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, 2411 + const struct iommu_user_data *user_data) 2413 2412 2414 2413 { 2415 2414 unsigned int type = IOMMU_DOMAIN_UNMANAGED; ··· 2419 2420 if (dev) 2420 2421 iommu = get_amd_iommu_from_dev(dev); 2421 2422 2422 - if ((flags & ~supported_flags) || parent || user_data) 2423 + if ((flags & ~supported_flags) || user_data) 2423 2424 return ERR_PTR(-EOPNOTSUPP); 2424 2425 2425 2426 /* Allocate domain with v2 page table if IOMMU supports PASID. */ ··· 2883 2884 .release_domain = &release_domain, 2884 2885 .identity_domain = &identity_domain.domain, 2885 2886 .domain_alloc = amd_iommu_domain_alloc, 2886 - .domain_alloc_user = amd_iommu_domain_alloc_user, 2887 + .domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags, 2887 2888 .domain_alloc_sva = amd_iommu_domain_alloc_sva, 2888 2889 .probe_device = amd_iommu_probe_device, 2889 2890 .release_device = amd_iommu_release_device,
+4 -5
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 3132 3132 }; 3133 3133 3134 3134 static struct iommu_domain * 3135 - arm_smmu_domain_alloc_user(struct device *dev, u32 flags, 3136 - struct iommu_domain *parent, 3137 - const struct iommu_user_data *user_data) 3135 + arm_smmu_domain_alloc_paging_flags(struct device *dev, u32 flags, 3136 + const struct iommu_user_data *user_data) 3138 3137 { 3139 3138 struct arm_smmu_master *master = dev_iommu_priv_get(dev); 3140 3139 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | ··· 3144 3145 3145 3146 if (flags & ~PAGING_FLAGS) 3146 3147 return ERR_PTR(-EOPNOTSUPP); 3147 - if (parent || user_data) 3148 + if (user_data) 3148 3149 return ERR_PTR(-EOPNOTSUPP); 3149 3150 3150 3151 if (flags & IOMMU_HWPT_ALLOC_PASID) ··· 3545 3546 .hw_info = arm_smmu_hw_info, 3546 3547 .domain_alloc_paging = arm_smmu_domain_alloc_paging, 3547 3548 .domain_alloc_sva = arm_smmu_sva_domain_alloc, 3548 - .domain_alloc_user = arm_smmu_domain_alloc_user, 3549 + .domain_alloc_paging_flags = arm_smmu_domain_alloc_paging_flags, 3549 3550 .probe_device = arm_smmu_probe_device, 3550 3551 .release_device = arm_smmu_release_device, 3551 3552 .device_group = arm_smmu_device_group,
+4 -11
drivers/iommu/intel/iommu.c
··· 3328 3328 } 3329 3329 3330 3330 static struct iommu_domain * 3331 - intel_iommu_domain_alloc_user(struct device *dev, u32 flags, 3332 - struct iommu_domain *parent, 3333 - const struct iommu_user_data *user_data) 3331 + intel_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, 3332 + const struct iommu_user_data *user_data) 3334 3333 { 3335 3334 struct device_domain_info *info = dev_iommu_priv_get(dev); 3336 3335 bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; ··· 3338 3339 struct dmar_domain *dmar_domain; 3339 3340 struct iommu_domain *domain; 3340 3341 bool first_stage; 3341 - 3342 - /* Must be NESTING domain */ 3343 - if (parent) { 3344 - if (!nested_supported(iommu) || flags) 3345 - return ERR_PTR(-EOPNOTSUPP); 3346 - return intel_nested_domain_alloc(parent, user_data); 3347 - } 3348 3342 3349 3343 if (flags & 3350 3344 (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING ··· 4464 4472 .identity_domain = &identity_domain, 4465 4473 .capable = intel_iommu_capable, 4466 4474 .hw_info = intel_iommu_hw_info, 4467 - .domain_alloc_user = intel_iommu_domain_alloc_user, 4475 + .domain_alloc_paging_flags = intel_iommu_domain_alloc_paging_flags, 4468 4476 .domain_alloc_sva = intel_svm_domain_alloc, 4469 4477 .domain_alloc_paging = intel_iommu_domain_alloc_paging, 4478 + .domain_alloc_nested = intel_iommu_domain_alloc_nested, 4470 4479 .probe_device = intel_iommu_probe_device, 4471 4480 .release_device = intel_iommu_release_device, 4472 4481 .get_resv_regions = intel_iommu_get_resv_regions,
+4 -2
drivers/iommu/intel/iommu.h
··· 1265 1265 int dmar_ir_support(void); 1266 1266 1267 1267 void iommu_flush_write_buffer(struct intel_iommu *iommu); 1268 - struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, 1269 - const struct iommu_user_data *user_data); 1268 + struct iommu_domain * 1269 + intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, 1270 + u32 flags, 1271 + const struct iommu_user_data *user_data); 1270 1272 struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid); 1271 1273 1272 1274 enum cache_tag_type {
+9 -2
drivers/iommu/intel/nested.c
··· 186 186 .cache_invalidate_user = intel_nested_cache_invalidate_user, 187 187 }; 188 188 189 - struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, 190 - const struct iommu_user_data *user_data) 189 + struct iommu_domain * 190 + intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, 191 + u32 flags, 192 + const struct iommu_user_data *user_data) 191 193 { 194 + struct device_domain_info *info = dev_iommu_priv_get(dev); 192 195 struct dmar_domain *s2_domain = to_dmar_domain(parent); 196 + struct intel_iommu *iommu = info->iommu; 193 197 struct iommu_hwpt_vtd_s1 vtd; 194 198 struct dmar_domain *domain; 195 199 int ret; 200 + 201 + if (!nested_supported(iommu) || flags) 202 + return ERR_PTR(-EOPNOTSUPP); 196 203 197 204 /* Must be nested domain */ 198 205 if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
+2 -2
drivers/iommu/iommu.c
··· 1987 1987 1988 1988 if (ops->domain_alloc_paging && !flags) 1989 1989 domain = ops->domain_alloc_paging(dev); 1990 - else if (ops->domain_alloc_user) 1991 - domain = ops->domain_alloc_user(dev, flags, NULL, NULL); 1990 + else if (ops->domain_alloc_paging_flags) 1991 + domain = ops->domain_alloc_paging_flags(dev, flags, NULL); 1992 1992 else if (ops->domain_alloc && !flags) 1993 1993 domain = ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); 1994 1994 else
+8 -8
drivers/iommu/iommufd/hw_pagetable.c
··· 119 119 120 120 lockdep_assert_held(&ioas->mutex); 121 121 122 - if ((flags || user_data) && !ops->domain_alloc_user) 122 + if ((flags || user_data) && !ops->domain_alloc_paging_flags) 123 123 return ERR_PTR(-EOPNOTSUPP); 124 124 if (flags & ~valid_flags) 125 125 return ERR_PTR(-EOPNOTSUPP); ··· 139 139 hwpt_paging->ioas = ioas; 140 140 hwpt_paging->nest_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; 141 141 142 - if (ops->domain_alloc_user) { 143 - hwpt->domain = ops->domain_alloc_user(idev->dev, flags, NULL, 144 - user_data); 142 + if (ops->domain_alloc_paging_flags) { 143 + hwpt->domain = ops->domain_alloc_paging_flags(idev->dev, flags, 144 + user_data); 145 145 if (IS_ERR(hwpt->domain)) { 146 146 rc = PTR_ERR(hwpt->domain); 147 147 hwpt->domain = NULL; ··· 227 227 int rc; 228 228 229 229 if ((flags & ~IOMMU_HWPT_FAULT_ID_VALID) || 230 - !user_data->len || !ops->domain_alloc_user) 230 + !user_data->len || !ops->domain_alloc_nested) 231 231 return ERR_PTR(-EOPNOTSUPP); 232 232 if (parent->auto_domain || !parent->nest_parent || 233 233 parent->common.domain->owner != ops) ··· 242 242 refcount_inc(&parent->common.obj.users); 243 243 hwpt_nested->parent = parent; 244 244 245 - hwpt->domain = ops->domain_alloc_user(idev->dev, 246 - flags & ~IOMMU_HWPT_FAULT_ID_VALID, 247 - parent->common.domain, user_data); 245 + hwpt->domain = ops->domain_alloc_nested( 246 + idev->dev, parent->common.domain, 247 + flags & ~IOMMU_HWPT_FAULT_ID_VALID, user_data); 248 248 if (IS_ERR(hwpt->domain)) { 249 249 rc = PTR_ERR(hwpt->domain); 250 250 hwpt->domain = NULL;
+6 -9
drivers/iommu/iommufd/selftest.c
··· 356 356 } 357 357 358 358 static struct iommu_domain * 359 - mock_domain_alloc_nested(struct iommu_domain *parent, u32 flags, 360 - const struct iommu_user_data *user_data) 359 + mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent, 360 + u32 flags, const struct iommu_user_data *user_data) 361 361 { 362 362 struct mock_iommu_domain_nested *mock_nested; 363 363 struct mock_iommu_domain *mock_parent; ··· 379 379 } 380 380 381 381 static struct iommu_domain * 382 - mock_domain_alloc_user(struct device *dev, u32 flags, 383 - struct iommu_domain *parent, 384 - const struct iommu_user_data *user_data) 382 + mock_domain_alloc_paging_flags(struct device *dev, u32 flags, 383 + const struct iommu_user_data *user_data) 385 384 { 386 385 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; 387 386 const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING | ··· 388 389 bool no_dirty_ops = to_mock_dev(dev)->flags & 389 390 MOCK_FLAGS_DEVICE_NO_DIRTY; 390 391 struct iommu_domain *domain; 391 - 392 - if (parent) 393 - return mock_domain_alloc_nested(parent, flags, user_data); 394 392 395 393 if (user_data) 396 394 return ERR_PTR(-EOPNOTSUPP); ··· 714 718 .pgsize_bitmap = MOCK_IO_PAGE_SIZE, 715 719 .hw_info = mock_domain_hw_info, 716 720 .domain_alloc_paging = mock_domain_alloc_paging, 717 - .domain_alloc_user = mock_domain_alloc_user, 721 + .domain_alloc_paging_flags = mock_domain_alloc_paging_flags, 722 + .domain_alloc_nested = mock_domain_alloc_nested, 718 723 .capable = mock_domain_capable, 719 724 .device_group = generic_device_group, 720 725 .probe_device = mock_probe_device,
+16 -11
include/linux/iommu.h
··· 557 557 * @domain_alloc: allocate and return an iommu domain if success. Otherwise 558 558 * NULL is returned. The domain is not fully initialized until 559 559 * the caller iommu_domain_alloc() returns. 560 - * @domain_alloc_user: Allocate an iommu domain corresponding to the input 561 - * parameters as defined in include/uapi/linux/iommufd.h. 562 - * Upon success, if the @user_data is valid and the @parent 563 - * points to a kernel-managed domain, the new domain must be 564 - * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be 565 - * NULL while the @user_data can be optionally provided, the 566 - * new domain must support __IOMMU_DOMAIN_PAGING. 567 - * Upon failure, ERR_PTR must be returned. 560 + * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the 561 + * input parameters as defined in 562 + * include/uapi/linux/iommufd.h. The @user_data can be 563 + * optionally provided, the new domain must support 564 + * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be 565 + * returned. 568 566 * @domain_alloc_paging: Allocate an iommu_domain that can be used for 569 - * UNMANAGED, DMA, and DMA_FQ domain types. 567 + * UNMANAGED, DMA, and DMA_FQ domain types. This is the 568 + * same as invoking domain_alloc_paging_flags() with 569 + * @flags=0, @user_data=NULL. A driver should implement 570 + * only one of the two ops. 570 571 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. 572 + * @domain_alloc_nested: Allocate an iommu_domain for nested translation. 571 573 * @probe_device: Add device to iommu driver handling 572 574 * @release_device: Remove device from iommu driver handling 573 575 * @probe_finalize: Do final setup work after the device is added to an IOMMU ··· 618 616 619 617 /* Domain allocation and freeing by the iommu driver */ 620 618 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 621 - struct iommu_domain *(*domain_alloc_user)( 622 - struct device *dev, u32 flags, struct iommu_domain *parent, 619 + struct iommu_domain *(*domain_alloc_paging_flags)( 620 + struct device *dev, u32 flags, 623 621 const struct iommu_user_data *user_data); 624 622 struct iommu_domain *(*domain_alloc_paging)(struct device *dev); 625 623 struct iommu_domain *(*domain_alloc_sva)(struct device *dev, 626 624 struct mm_struct *mm); 625 + struct iommu_domain *(*domain_alloc_nested)( 626 + struct device *dev, struct iommu_domain *parent, u32 flags, 627 + const struct iommu_user_data *user_data); 627 628 628 629 struct iommu_device *(*probe_device)(struct device *dev); 629 630 void (*release_device)(struct device *dev);