Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu/vt-d: Replace intel SVM APIs with generic SVA APIs

This patch is an initial step to replace Intel SVM code with the
following IOMMU SVA ops:
intel_svm_bind_mm() => iommu_sva_bind_device()
intel_svm_unbind_mm() => iommu_sva_unbind_device()
intel_svm_is_pasid_valid() => iommu_sva_get_pasid()

The features below will continue to work but are not included in this patch
in that they are handled mostly within the IOMMU subsystem.
- IO page fault
- mmu notifier

Consolidation of the above will come after merging generic IOMMU sva
code[1]. There should not be any changes needed for SVA users such as
accelerator device drivers during this time.

[1] http://jpbrucker.net/sva/

Signed-off-by: Jacob Pan <jacob.jun.pan@linux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20200516062101.29541-12-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>

authored by

Jacob Pan and committed by
Joerg Roedel
064a57d7 76fdd6c5

+78 -141
+3
drivers/iommu/intel-iommu.c
··· 6071 6071 .cache_invalidate = intel_iommu_sva_invalidate, 6072 6072 .sva_bind_gpasid = intel_svm_bind_gpasid, 6073 6073 .sva_unbind_gpasid = intel_svm_unbind_gpasid, 6074 + .sva_bind = intel_svm_bind, 6075 + .sva_unbind = intel_svm_unbind, 6076 + .sva_get_pasid = intel_svm_get_pasid, 6074 6077 #endif 6075 6078 }; 6076 6079
+69 -55
drivers/iommu/intel-svm.c
··· 426 426 return ret; 427 427 } 428 428 429 - int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) 429 + /* Caller must hold pasid_mutex, mm reference */ 430 + static int 431 + intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops, 432 + struct mm_struct *mm, struct intel_svm_dev **sd) 430 433 { 431 434 struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); 432 435 struct device_domain_info *info; 433 436 struct intel_svm_dev *sdev; 434 437 struct intel_svm *svm = NULL; 435 - struct mm_struct *mm = NULL; 436 438 int pasid_max; 437 439 int ret; 438 440 ··· 451 449 } else 452 450 pasid_max = 1 << 20; 453 451 452 + /* Bind supervisor PASID shuld have mm = NULL */ 454 453 if (flags & SVM_FLAG_SUPERVISOR_MODE) { 455 - if (!ecap_srs(iommu->ecap)) 454 + if (!ecap_srs(iommu->ecap) || mm) { 455 + pr_err("Supervisor PASID with user provided mm.\n"); 456 456 return -EINVAL; 457 - } else if (pasid) { 458 - mm = get_task_mm(current); 459 - BUG_ON(!mm); 457 + } 460 458 } 461 459 462 - mutex_lock(&pasid_mutex); 463 - if (pasid && !(flags & SVM_FLAG_PRIVATE_PASID)) { 460 + if (!(flags & SVM_FLAG_PRIVATE_PASID)) { 464 461 struct intel_svm *t; 465 462 466 463 list_for_each_entry(t, &global_svm_list, list) { ··· 497 496 sdev->dev = dev; 498 497 499 498 ret = intel_iommu_enable_pasid(iommu, dev); 500 - if (ret || !pasid) { 501 - /* If they don't actually want to assign a PASID, this is 502 - * just an enabling check/preparation. */ 499 + if (ret) { 503 500 kfree(sdev); 504 501 goto out; 505 502 } ··· 596 597 } 597 598 } 598 599 list_add_rcu(&sdev->list, &svm->devs); 599 - 600 - success: 601 - *pasid = svm->pasid; 600 + success: 601 + sdev->pasid = svm->pasid; 602 + sdev->sva.dev = dev; 603 + if (sd) 604 + *sd = sdev; 602 605 ret = 0; 603 606 out: 604 - mutex_unlock(&pasid_mutex); 605 - if (mm) 606 - mmput(mm); 607 607 return ret; 608 608 } 609 - EXPORT_SYMBOL_GPL(intel_svm_bind_mm); 610 609 610 + /* Caller must hold pasid_mutex */ 611 611 int intel_svm_unbind_mm(struct device *dev, int pasid) 612 612 { 613 613 struct intel_svm_dev *sdev; ··· 614 616 struct intel_svm *svm; 615 617 int ret = -EINVAL; 616 618 617 - mutex_lock(&pasid_mutex); 618 619 iommu = intel_svm_device_to_iommu(dev); 619 620 if (!iommu) 620 621 goto out; ··· 659 662 break; 660 663 } 661 664 out: 662 - mutex_unlock(&pasid_mutex); 663 665 664 666 return ret; 665 667 } 666 - EXPORT_SYMBOL_GPL(intel_svm_unbind_mm); 667 - 668 - int intel_svm_is_pasid_valid(struct device *dev, int pasid) 669 - { 670 - struct intel_iommu *iommu; 671 - struct intel_svm *svm; 672 - int ret = -EINVAL; 673 - 674 - mutex_lock(&pasid_mutex); 675 - iommu = intel_svm_device_to_iommu(dev); 676 - if (!iommu) 677 - goto out; 678 - 679 - svm = ioasid_find(NULL, pasid, NULL); 680 - if (!svm) 681 - goto out; 682 - 683 - if (IS_ERR(svm)) { 684 - ret = PTR_ERR(svm); 685 - goto out; 686 - } 687 - /* init_mm is used in this case */ 688 - if (!svm->mm) 689 - ret = 1; 690 - else if (atomic_read(&svm->mm->mm_users) > 0) 691 - ret = 1; 692 - else 693 - ret = 0; 694 - 695 - out: 696 - mutex_unlock(&pasid_mutex); 697 - 698 - return ret; 699 - } 700 - EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid); 701 668 702 669 /* Page request queue descriptor */ 703 670 struct page_req_dsc { ··· 854 893 dmar_writeq(iommu->reg + DMAR_PQH_REG, tail); 855 894 856 895 return IRQ_RETVAL(handled); 896 + } 897 + 898 + #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva) 899 + struct iommu_sva * 900 + intel_svm_bind(struct device *dev, struct mm_struct *mm, void *drvdata) 901 + { 902 + struct iommu_sva *sva = ERR_PTR(-EINVAL); 903 + struct intel_svm_dev *sdev = NULL; 904 + int flags = 0; 905 + int ret; 906 + 907 + /* 908 + * TODO: Consolidate with generic iommu-sva bind after it is merged. 909 + * It will require shared SVM data structures, i.e. combine io_mm 910 + * and intel_svm etc. 911 + */ 912 + if (drvdata) 913 + flags = *(int *)drvdata; 914 + mutex_lock(&pasid_mutex); 915 + ret = intel_svm_bind_mm(dev, flags, NULL, mm, &sdev); 916 + if (ret) 917 + sva = ERR_PTR(ret); 918 + else if (sdev) 919 + sva = &sdev->sva; 920 + else 921 + WARN(!sdev, "SVM bind succeeded with no sdev!\n"); 922 + 923 + mutex_unlock(&pasid_mutex); 924 + 925 + return sva; 926 + } 927 + 928 + void intel_svm_unbind(struct iommu_sva *sva) 929 + { 930 + struct intel_svm_dev *sdev; 931 + 932 + mutex_lock(&pasid_mutex); 933 + sdev = to_intel_svm_dev(sva); 934 + intel_svm_unbind_mm(sdev->dev, sdev->pasid); 935 + mutex_unlock(&pasid_mutex); 936 + } 937 + 938 + int intel_svm_get_pasid(struct iommu_sva *sva) 939 + { 940 + struct intel_svm_dev *sdev; 941 + int pasid; 942 + 943 + mutex_lock(&pasid_mutex); 944 + sdev = to_intel_svm_dev(sva); 945 + pasid = sdev->pasid; 946 + mutex_unlock(&pasid_mutex); 947 + 948 + return pasid; 857 949 }
+6
include/linux/intel-iommu.h
··· 723 723 int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, 724 724 struct iommu_gpasid_bind_data *data); 725 725 int intel_svm_unbind_gpasid(struct device *dev, int pasid); 726 + struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, 727 + void *drvdata); 728 + void intel_svm_unbind(struct iommu_sva *handle); 729 + int intel_svm_get_pasid(struct iommu_sva *handle); 726 730 struct svm_dev_ops; 727 731 728 732 struct intel_svm_dev { ··· 734 730 struct rcu_head rcu; 735 731 struct device *dev; 736 732 struct svm_dev_ops *ops; 733 + struct iommu_sva sva; 734 + int pasid; 737 735 int users; 738 736 u16 did; 739 737 u16 dev_iotlb:1;
-86
include/linux/intel-svm.h
··· 21 21 #define SVM_REQ_EXEC (1<<1) 22 22 #define SVM_REQ_PRIV (1<<0) 23 23 24 - 25 24 /* 26 25 * The SVM_FLAG_PRIVATE_PASID flag requests a PASID which is *not* the "main" 27 26 * PASID for the current process. Even if a PASID already exists, a new one ··· 55 56 * which requires guest and host PASID translation at both directions. 56 57 */ 57 58 #define SVM_FLAG_GUEST_PASID (1<<3) 58 - 59 - #ifdef CONFIG_INTEL_IOMMU_SVM 60 - 61 - /** 62 - * intel_svm_bind_mm() - Bind the current process to a PASID 63 - * @dev: Device to be granted access 64 - * @pasid: Address for allocated PASID 65 - * @flags: Flags. Later for requesting supervisor mode, etc. 66 - * @ops: Callbacks to device driver 67 - * 68 - * This function attempts to enable PASID support for the given device. 69 - * If the @pasid argument is non-%NULL, a PASID is allocated for access 70 - * to the MM of the current process. 71 - * 72 - * By using a %NULL value for the @pasid argument, this function can 73 - * be used to simply validate that PASID support is available for the 74 - * given device — i.e. that it is behind an IOMMU which has the 75 - * requisite support, and is enabled. 76 - * 77 - * Page faults are handled transparently by the IOMMU code, and there 78 - * should be no need for the device driver to be involved. If a page 79 - * fault cannot be handled (i.e. is an invalid address rather than 80 - * just needs paging in), then the page request will be completed by 81 - * the core IOMMU code with appropriate status, and the device itself 82 - * can then report the resulting fault to its driver via whatever 83 - * mechanism is appropriate. 84 - * 85 - * Multiple calls from the same process may result in the same PASID 86 - * being re-used. A reference count is kept. 87 - */ 88 - extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, 89 - struct svm_dev_ops *ops); 90 - 91 - /** 92 - * intel_svm_unbind_mm() - Unbind a specified PASID 93 - * @dev: Device for which PASID was allocated 94 - * @pasid: PASID value to be unbound 95 - * 96 - * This function allows a PASID to be retired when the device no 97 - * longer requires access to the address space of a given process. 98 - * 99 - * If the use count for the PASID in question reaches zero, the 100 - * PASID is revoked and may no longer be used by hardware. 101 - * 102 - * Device drivers are required to ensure that no access (including 103 - * page requests) is currently outstanding for the PASID in question, 104 - * before calling this function. 105 - */ 106 - extern int intel_svm_unbind_mm(struct device *dev, int pasid); 107 - 108 - /** 109 - * intel_svm_is_pasid_valid() - check if pasid is valid 110 - * @dev: Device for which PASID was allocated 111 - * @pasid: PASID value to be checked 112 - * 113 - * This function checks if the specified pasid is still valid. A 114 - * valid pasid means the backing mm is still having a valid user. 115 - * For kernel callers init_mm is always valid. for other mm, if mm->mm_users 116 - * is non-zero, it is valid. 117 - * 118 - * returns -EINVAL if invalid pasid, 0 if pasid ref count is invalid 119 - * 1 if pasid is valid. 120 - */ 121 - extern int intel_svm_is_pasid_valid(struct device *dev, int pasid); 122 - 123 - #else /* CONFIG_INTEL_IOMMU_SVM */ 124 - 125 - static inline int intel_svm_bind_mm(struct device *dev, int *pasid, 126 - int flags, struct svm_dev_ops *ops) 127 - { 128 - return -ENOSYS; 129 - } 130 - 131 - static inline int intel_svm_unbind_mm(struct device *dev, int pasid) 132 - { 133 - BUG(); 134 - } 135 - 136 - static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid) 137 - { 138 - return -EINVAL; 139 - } 140 - #endif /* CONFIG_INTEL_IOMMU_SVM */ 141 - 142 - #define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL)) 143 59 144 60 #endif /* __INTEL_SVM_H__ */