Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'iommu-fixes-v6.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux

Pull iommu fixes from Joerg Roedel:

- Fix a device-stall problem in bad io-page-fault setups (faults
received from devices with no supporting domain attached).

- Context flush fix for Intel VT-d.

- Do not allow non-read+non-write mapping through iommufd as most
implementations can not handle that.

- Fix a possible infinite-loop issue in map_pages() path.

- Add Jean-Philippe as reviewer for SMMUv3 SVA support

* tag 'iommu-fixes-v6.11-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux:
MAINTAINERS: Add Jean-Philippe as SMMUv3 SVA reviewer
iommu: Do not return 0 from map_pages if it doesn't do anything
iommufd: Do not allow creating areas without READ or WRITE
iommu/vt-d: Fix incorrect domain ID in context flush helper
iommu: Handle iommu faults for a bad iopf setup

+116 -56
+4
MAINTAINERS
··· 1880 1880 F: drivers/iommu/arm/ 1881 1881 F: drivers/iommu/io-pgtable-arm* 1882 1882 1883 + ARM SMMU SVA SUPPORT 1884 + R: Jean-Philippe Brucker <jean-philippe@linaro.org> 1885 + F: drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c 1886 + 1883 1887 ARM SUB-ARCHITECTURES 1884 1888 L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1885 1889 S: Maintained
+1 -1
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
··· 1777 1777 goto out_unlock; 1778 1778 } 1779 1779 1780 - iommu_report_device_fault(master->dev, &fault_evt); 1780 + ret = iommu_report_device_fault(master->dev, &fault_evt); 1781 1781 out_unlock: 1782 1782 mutex_unlock(&smmu->streams_mutex); 1783 1783 return ret;
+6 -2
drivers/iommu/intel/iommu.c
··· 1944 1944 { 1945 1945 struct intel_iommu *iommu = info->iommu; 1946 1946 struct context_entry *context; 1947 + u16 did; 1947 1948 1948 1949 spin_lock(&iommu->lock); 1949 1950 context = iommu_context_addr(iommu, bus, devfn, 0); ··· 1953 1952 return; 1954 1953 } 1955 1954 1955 + did = context_domain_id(context); 1956 1956 context_clear_entry(context); 1957 1957 __iommu_flush_cache(iommu, context, sizeof(*context)); 1958 1958 spin_unlock(&iommu->lock); 1959 - intel_context_flush_present(info, context, true); 1959 + intel_context_flush_present(info, context, did, true); 1960 1960 } 1961 1961 1962 1962 static int domain_setup_first_level(struct intel_iommu *iommu, ··· 4251 4249 struct intel_iommu *iommu = info->iommu; 4252 4250 u8 bus = info->bus, devfn = info->devfn; 4253 4251 struct context_entry *context; 4252 + u16 did; 4254 4253 4255 4254 spin_lock(&iommu->lock); 4256 4255 if (context_copied(iommu, bus, devfn)) { ··· 4264 4261 spin_unlock(&iommu->lock); 4265 4262 return -ENODEV; 4266 4263 } 4264 + did = context_domain_id(context); 4267 4265 4268 4266 if (enable) 4269 4267 context_set_sm_pre(context); ··· 4273 4269 4274 4270 if (!ecap_coherent(iommu->ecap)) 4275 4271 clflush_cache_range(context, sizeof(*context)); 4276 - intel_context_flush_present(info, context, true); 4272 + intel_context_flush_present(info, context, did, true); 4277 4273 spin_unlock(&iommu->lock); 4278 4274 4279 4275 return 0;
+1 -1
drivers/iommu/intel/iommu.h
··· 1154 1154 1155 1155 void intel_context_flush_present(struct device_domain_info *info, 1156 1156 struct context_entry *context, 1157 - bool affect_domains); 1157 + u16 did, bool affect_domains); 1158 1158 1159 1159 #ifdef CONFIG_INTEL_IOMMU_SVM 1160 1160 void intel_svm_check(struct intel_iommu *iommu);
+4 -3
drivers/iommu/intel/pasid.c
··· 683 683 struct device_domain_info *info = dev_iommu_priv_get(dev); 684 684 struct intel_iommu *iommu = info->iommu; 685 685 struct context_entry *context; 686 + u16 did; 686 687 687 688 spin_lock(&iommu->lock); 688 689 context = iommu_context_addr(iommu, bus, devfn, false); ··· 692 691 return; 693 692 } 694 693 694 + did = context_domain_id(context); 695 695 context_clear_entry(context); 696 696 __iommu_flush_cache(iommu, context, sizeof(*context)); 697 697 spin_unlock(&iommu->lock); 698 - intel_context_flush_present(info, context, false); 698 + intel_context_flush_present(info, context, did, false); 699 699 } 700 700 701 701 static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data) ··· 887 885 */ 888 886 void intel_context_flush_present(struct device_domain_info *info, 889 887 struct context_entry *context, 890 - bool flush_domains) 888 + u16 did, bool flush_domains) 891 889 { 892 890 struct intel_iommu *iommu = info->iommu; 893 - u16 did = context_domain_id(context); 894 891 struct pasid_entry *pte; 895 892 int i; 896 893
+83 -38
drivers/iommu/io-pgfault.c
··· 115 115 return group; 116 116 } 117 117 118 + static struct iommu_attach_handle *find_fault_handler(struct device *dev, 119 + struct iopf_fault *evt) 120 + { 121 + struct iommu_fault *fault = &evt->fault; 122 + struct iommu_attach_handle *attach_handle; 123 + 124 + if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { 125 + attach_handle = iommu_attach_handle_get(dev->iommu_group, 126 + fault->prm.pasid, 0); 127 + if (IS_ERR(attach_handle)) { 128 + const struct iommu_ops *ops = dev_iommu_ops(dev); 129 + 130 + if (!ops->user_pasid_table) 131 + return NULL; 132 + /* 133 + * The iommu driver for this device supports user- 134 + * managed PASID table. Therefore page faults for 135 + * any PASID should go through the NESTING domain 136 + * attached to the device RID. 137 + */ 138 + attach_handle = iommu_attach_handle_get( 139 + dev->iommu_group, IOMMU_NO_PASID, 140 + IOMMU_DOMAIN_NESTED); 141 + if (IS_ERR(attach_handle)) 142 + return NULL; 143 + } 144 + } else { 145 + attach_handle = iommu_attach_handle_get(dev->iommu_group, 146 + IOMMU_NO_PASID, 0); 147 + 148 + if (IS_ERR(attach_handle)) 149 + return NULL; 150 + } 151 + 152 + if (!attach_handle->domain->iopf_handler) 153 + return NULL; 154 + 155 + return attach_handle; 156 + } 157 + 158 + static void iopf_error_response(struct device *dev, struct iopf_fault *evt) 159 + { 160 + const struct iommu_ops *ops = dev_iommu_ops(dev); 161 + struct iommu_fault *fault = &evt->fault; 162 + struct iommu_page_response resp = { 163 + .pasid = fault->prm.pasid, 164 + .grpid = fault->prm.grpid, 165 + .code = IOMMU_PAGE_RESP_INVALID 166 + }; 167 + 168 + ops->page_response(dev, evt, &resp); 169 + } 170 + 118 171 /** 119 172 * iommu_report_device_fault() - Report fault event to device driver 120 173 * @dev: the device ··· 206 153 * handling framework should guarantee that the iommu domain could only be 207 154 * freed after the device has stopped generating page faults (or the iommu 208 155 * hardware has been set to block the page faults) and the pending page faults 209 - * have been flushed. 156 + * have been flushed. In case no page fault handler is attached or no iopf params 157 + * are setup, then the ops->page_response() is called to complete the evt. 158 + * 159 + * Returns 0 on success, or an error in case of a bad/failed iopf setup. 210 160 */ 211 - void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) 161 + int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) 212 162 { 163 + struct iommu_attach_handle *attach_handle; 213 164 struct iommu_fault *fault = &evt->fault; 214 165 struct iommu_fault_param *iopf_param; 215 166 struct iopf_group abort_group = {}; 216 167 struct iopf_group *group; 217 168 169 + attach_handle = find_fault_handler(dev, evt); 170 + if (!attach_handle) 171 + goto err_bad_iopf; 172 + 173 + /* 174 + * Something has gone wrong if a fault capable domain is attached but no 175 + * iopf_param is setup 176 + */ 218 177 iopf_param = iopf_get_dev_fault_param(dev); 219 178 if (WARN_ON(!iopf_param)) 220 - return; 179 + goto err_bad_iopf; 221 180 222 181 if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) { 223 - report_partial_fault(iopf_param, fault); 182 + int ret; 183 + 184 + ret = report_partial_fault(iopf_param, fault); 224 185 iopf_put_dev_fault_param(iopf_param); 225 186 /* A request that is not the last does not need to be ack'd */ 226 - return; 187 + 188 + return ret; 227 189 } 228 190 229 191 /* ··· 253 185 if (group == &abort_group) 254 186 goto err_abort; 255 187 256 - if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { 257 - group->attach_handle = iommu_attach_handle_get(dev->iommu_group, 258 - fault->prm.pasid, 259 - 0); 260 - if (IS_ERR(group->attach_handle)) { 261 - const struct iommu_ops *ops = dev_iommu_ops(dev); 262 - 263 - if (!ops->user_pasid_table) 264 - goto err_abort; 265 - 266 - /* 267 - * The iommu driver for this device supports user- 268 - * managed PASID table. Therefore page faults for 269 - * any PASID should go through the NESTING domain 270 - * attached to the device RID. 271 - */ 272 - group->attach_handle = 273 - iommu_attach_handle_get(dev->iommu_group, 274 - IOMMU_NO_PASID, 275 - IOMMU_DOMAIN_NESTED); 276 - if (IS_ERR(group->attach_handle)) 277 - goto err_abort; 278 - } 279 - } else { 280 - group->attach_handle = 281 - iommu_attach_handle_get(dev->iommu_group, IOMMU_NO_PASID, 0); 282 - if (IS_ERR(group->attach_handle)) 283 - goto err_abort; 284 - } 285 - 286 - if (!group->attach_handle->domain->iopf_handler) 287 - goto err_abort; 188 + group->attach_handle = attach_handle; 288 189 289 190 /* 290 191 * On success iopf_handler must call iopf_group_response() and ··· 262 225 if (group->attach_handle->domain->iopf_handler(group)) 263 226 goto err_abort; 264 227 265 - return; 228 + return 0; 266 229 267 230 err_abort: 268 231 dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n", ··· 272 235 __iopf_free_group(group); 273 236 else 274 237 iopf_free_group(group); 238 + 239 + return 0; 240 + 241 + err_bad_iopf: 242 + if (fault->type == IOMMU_FAULT_PAGE_REQ) 243 + iopf_error_response(dev, evt); 244 + 245 + return -EINVAL; 275 246 } 276 247 EXPORT_SYMBOL_GPL(iommu_report_device_fault); 277 248
+1 -2
drivers/iommu/io-pgtable-arm-v7s.c
··· 552 552 paddr >= (1ULL << data->iop.cfg.oas))) 553 553 return -ERANGE; 554 554 555 - /* If no access, then nothing to do */ 556 555 if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 557 - return 0; 556 + return -EINVAL; 558 557 559 558 while (pgcount--) { 560 559 ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
+1 -2
drivers/iommu/io-pgtable-arm.c
··· 515 515 if (WARN_ON(iaext || paddr >> cfg->oas)) 516 516 return -ERANGE; 517 517 518 - /* If no access, then nothing to do */ 519 518 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 520 - return 0; 519 + return -EINVAL; 521 520 522 521 prot = arm_lpae_prot_to_pte(data, iommu_prot); 523 522 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
+1 -2
drivers/iommu/io-pgtable-dart.c
··· 245 245 if (WARN_ON(paddr >> cfg->oas)) 246 246 return -ERANGE; 247 247 248 - /* If no access, then nothing to do */ 249 248 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) 250 - return 0; 249 + return -EINVAL; 251 250 252 251 tbl = dart_get_table(data, iova); 253 252
+8
drivers/iommu/iommufd/ioas.c
··· 213 213 if (cmd->iova >= ULONG_MAX || cmd->length >= ULONG_MAX) 214 214 return -EOVERFLOW; 215 215 216 + if (!(cmd->flags & 217 + (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))) 218 + return -EINVAL; 219 + 216 220 ioas = iommufd_get_ioas(ucmd->ictx, cmd->ioas_id); 217 221 if (IS_ERR(ioas)) 218 222 return PTR_ERR(ioas); ··· 256 252 if (cmd->length >= ULONG_MAX || cmd->src_iova >= ULONG_MAX || 257 253 cmd->dst_iova >= ULONG_MAX) 258 254 return -EOVERFLOW; 255 + 256 + if (!(cmd->flags & 257 + (IOMMU_IOAS_MAP_WRITEABLE | IOMMU_IOAS_MAP_READABLE))) 258 + return -EINVAL; 259 259 260 260 src_ioas = iommufd_get_ioas(ucmd->ictx, cmd->src_ioas_id); 261 261 if (IS_ERR(src_ioas))
+3 -2
include/linux/iommu.h
··· 1563 1563 void iopf_queue_free(struct iopf_queue *queue); 1564 1564 int iopf_queue_discard_partial(struct iopf_queue *queue); 1565 1565 void iopf_free_group(struct iopf_group *group); 1566 - void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); 1566 + int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); 1567 1567 void iopf_group_response(struct iopf_group *group, 1568 1568 enum iommu_page_response_code status); 1569 1569 #else ··· 1601 1601 { 1602 1602 } 1603 1603 1604 - static inline void 1604 + static inline int 1605 1605 iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) 1606 1606 { 1607 + return -ENODEV; 1607 1608 } 1608 1609 1609 1610 static inline void iopf_group_response(struct iopf_group *group,
+3 -3
tools/testing/selftests/iommu/iommufd.c
··· 825 825 { 826 826 struct iommu_ioas_copy copy_cmd = { 827 827 .size = sizeof(copy_cmd), 828 - .flags = IOMMU_IOAS_MAP_FIXED_IOVA, 828 + .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE, 829 829 .dst_ioas_id = self->ioas_id, 830 830 .src_ioas_id = self->ioas_id, 831 831 .length = PAGE_SIZE, ··· 1318 1318 { 1319 1319 struct iommu_ioas_copy copy_cmd = { 1320 1320 .size = sizeof(copy_cmd), 1321 - .flags = IOMMU_IOAS_MAP_FIXED_IOVA, 1321 + .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE, 1322 1322 .src_ioas_id = self->ioas_id, 1323 1323 .dst_iova = MOCK_APERTURE_START, 1324 1324 .length = MOCK_PAGE_SIZE, ··· 1608 1608 }; 1609 1609 struct iommu_ioas_copy copy_cmd = { 1610 1610 .size = sizeof(copy_cmd), 1611 - .flags = IOMMU_IOAS_MAP_FIXED_IOVA, 1611 + .flags = IOMMU_IOAS_MAP_FIXED_IOVA | IOMMU_IOAS_MAP_WRITEABLE, 1612 1612 .dst_ioas_id = self->ioas_id, 1613 1613 .dst_iova = MOCK_APERTURE_START, 1614 1614 .length = BUFFER_SIZE,