Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'iommu-fixes-v6.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

- Fix race conditions in device probe path

- Handle ERR_PTR() returns in __iommu_domain_alloc() path

- Update MAINTAINERS entry for Qualcom IOMMUs

- Printk argument fix in device tree specific code

- Several Intel VT-d fixes from Lu Baolu:
- Do not support enforcing cache coherency for non-empty domains
- Avoid devTLB invalidation if iommu is off
- Disable PCI ATS in legacy passthrough mode
- Support non-PCI devices when clearing context
- Fix incorrect cache invalidation for mm notification
- Add MTL to quirk list to skip TE disabling
- Set variable intel_dirty_ops to static

* tag 'iommu-fixes-v6.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu: Fix printk arg in of_iommu_get_resv_regions()
iommu/vt-d: Set variable intel_dirty_ops to static
iommu/vt-d: Fix incorrect cache invalidation for mm notification
iommu/vt-d: Add MTL to quirk list to skip TE disabling
iommu/vt-d: Make context clearing consistent with context mapping
iommu/vt-d: Disable PCI ATS in legacy passthrough mode
iommu/vt-d: Omit devTLB invalidation requests when TES=0
iommu/vt-d: Support enforce_cache_coherency only for empty domains
iommu: Avoid more races around device probe
MAINTAINERS: list all Qualcomm IOMMU drivers in the QUALCOMM IOMMU entry
iommu: Flow ERR_PTR out from __iommu_domain_alloc()

+126 -42
+2
MAINTAINERS
··· 17946 17946 L: linux-arm-msm@vger.kernel.org 17947 17947 S: Maintained 17948 17948 F: drivers/iommu/arm/arm-smmu/qcom_iommu.c 17949 + F: drivers/iommu/arm/arm-smmu/arm-smmu-qcom* 17950 + F: drivers/iommu/msm_iommu* 17949 17951 17950 17952 QUALCOMM IPC ROUTER (QRTR) DRIVER 17951 17953 M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+6 -1
drivers/acpi/scan.c
··· 1568 1568 int err; 1569 1569 const struct iommu_ops *ops; 1570 1570 1571 + /* Serialise to make dev->iommu stable under our potential fwspec */ 1572 + mutex_lock(&iommu_probe_device_lock); 1571 1573 /* 1572 1574 * If we already translated the fwspec there is nothing left to do, 1573 1575 * return the iommu_ops. 1574 1576 */ 1575 1577 ops = acpi_iommu_fwspec_ops(dev); 1576 - if (ops) 1578 + if (ops) { 1579 + mutex_unlock(&iommu_probe_device_lock); 1577 1580 return ops; 1581 + } 1578 1582 1579 1583 err = iort_iommu_configure_id(dev, id_in); 1580 1584 if (err && err != -EPROBE_DEFER) 1581 1585 err = viot_iommu_configure(dev); 1586 + mutex_unlock(&iommu_probe_device_lock); 1582 1587 1583 1588 /* 1584 1589 * If we have reason to believe the IOMMU driver missed the initial
+18
drivers/iommu/intel/dmar.c
··· 1522 1522 { 1523 1523 struct qi_desc desc; 1524 1524 1525 + /* 1526 + * VT-d spec, section 4.3: 1527 + * 1528 + * Software is recommended to not submit any Device-TLB invalidation 1529 + * requests while address remapping hardware is disabled. 1530 + */ 1531 + if (!(iommu->gcmd & DMA_GCMD_TE)) 1532 + return; 1533 + 1525 1534 if (mask) { 1526 1535 addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; 1527 1536 desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; ··· 1595 1586 { 1596 1587 unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); 1597 1588 struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; 1589 + 1590 + /* 1591 + * VT-d spec, section 4.3: 1592 + * 1593 + * Software is recommended to not submit any Device-TLB invalidation 1594 + * requests while address remapping hardware is disabled. 1595 + */ 1596 + if (!(iommu->gcmd & DMA_GCMD_TE)) 1597 + return; 1598 1598 1599 1599 desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | 1600 1600 QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
+11 -7
drivers/iommu/intel/iommu.c
··· 299 299 #define IDENTMAP_AZALIA 4 300 300 301 301 const struct iommu_ops intel_iommu_ops; 302 - const struct iommu_dirty_ops intel_dirty_ops; 302 + static const struct iommu_dirty_ops intel_dirty_ops; 303 303 304 304 static bool translation_pre_enabled(struct intel_iommu *iommu) 305 305 { ··· 2207 2207 attr |= DMA_FL_PTE_DIRTY; 2208 2208 } 2209 2209 2210 + domain->has_mappings = true; 2211 + 2210 2212 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; 2211 2213 2212 2214 while (nr_pages > 0) { ··· 2492 2490 return ret; 2493 2491 } 2494 2492 2495 - iommu_enable_pci_caps(info); 2493 + if (sm_supported(info->iommu) || !domain_type_is_si(info->domain)) 2494 + iommu_enable_pci_caps(info); 2496 2495 2497 2496 return 0; 2498 2497 } ··· 3928 3925 */ 3929 3926 static void domain_context_clear(struct device_domain_info *info) 3930 3927 { 3931 - if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) 3932 - return; 3928 + if (!dev_is_pci(info->dev)) 3929 + domain_context_clear_one(info, info->bus, info->devfn); 3933 3930 3934 3931 pci_for_each_dma_alias(to_pci_dev(info->dev), 3935 3932 &domain_context_clear_one_cb, info); ··· 4363 4360 return true; 4364 4361 4365 4362 spin_lock_irqsave(&dmar_domain->lock, flags); 4366 - if (!domain_support_force_snooping(dmar_domain)) { 4363 + if (!domain_support_force_snooping(dmar_domain) || 4364 + (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { 4367 4365 spin_unlock_irqrestore(&dmar_domain->lock, flags); 4368 4366 return false; 4369 4367 } ··· 4929 4925 return 0; 4930 4926 } 4931 4927 4932 - const struct iommu_dirty_ops intel_dirty_ops = { 4928 + static const struct iommu_dirty_ops intel_dirty_ops = { 4933 4929 .set_dirty_tracking = intel_iommu_set_dirty_tracking, 4934 4930 .read_and_clear_dirty = intel_iommu_read_and_clear_dirty, 4935 4931 }; ··· 5077 5073 ver = (dev->device >> 8) & 0xff; 5078 5074 if (ver != 0x45 && ver != 0x46 && ver != 0x4c && 5079 5075 ver != 0x4e && ver != 0x8a && ver != 0x98 && 5080 - ver != 0x9a && ver != 0xa7) 5076 + ver != 0x9a && ver != 0xa7 && ver != 0x7d) 5081 5077 return; 5082 5078 5083 5079 if (risky_device(dev))
+3
drivers/iommu/intel/iommu.h
··· 602 602 */ 603 603 u8 dirty_tracking:1; /* Dirty tracking is enabled */ 604 604 u8 nested_parent:1; /* Has other domains nested on it */ 605 + u8 has_mappings:1; /* Has mappings configured through 606 + * iommu_map() interface. 607 + */ 605 608 606 609 spinlock_t lock; /* Protect device tracking lists */ 607 610 struct list_head devices; /* all devices' list */
+26
drivers/iommu/intel/svm.c
··· 216 216 rcu_read_unlock(); 217 217 } 218 218 219 + static void intel_flush_svm_all(struct intel_svm *svm) 220 + { 221 + struct device_domain_info *info; 222 + struct intel_svm_dev *sdev; 223 + 224 + rcu_read_lock(); 225 + list_for_each_entry_rcu(sdev, &svm->devs, list) { 226 + info = dev_iommu_priv_get(sdev->dev); 227 + 228 + qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0); 229 + if (info->ats_enabled) { 230 + qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid, 231 + svm->pasid, sdev->qdep, 232 + 0, 64 - VTD_PAGE_SHIFT); 233 + quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT, 234 + svm->pasid, sdev->qdep); 235 + } 236 + } 237 + rcu_read_unlock(); 238 + } 239 + 219 240 /* Pages have been freed at this point */ 220 241 static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, 221 242 struct mm_struct *mm, 222 243 unsigned long start, unsigned long end) 223 244 { 224 245 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 246 + 247 + if (start == 0 && end == -1UL) { 248 + intel_flush_svm_all(svm); 249 + return; 250 + } 225 251 226 252 intel_flush_svm_range(svm, start, 227 253 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
+49 -30
drivers/iommu/iommu.c
··· 485 485 dev_iommu_free(dev); 486 486 } 487 487 488 + DEFINE_MUTEX(iommu_probe_device_lock); 489 + 488 490 static int __iommu_probe_device(struct device *dev, struct list_head *group_list) 489 491 { 490 492 const struct iommu_ops *ops = dev->bus->iommu_ops; 491 493 struct iommu_group *group; 492 - static DEFINE_MUTEX(iommu_probe_device_lock); 493 494 struct group_device *gdev; 494 495 int ret; 495 496 ··· 503 502 * probably be able to use device_lock() here to minimise the scope, 504 503 * but for now enforcing a simple global ordering is fine. 505 504 */ 506 - mutex_lock(&iommu_probe_device_lock); 505 + lockdep_assert_held(&iommu_probe_device_lock); 507 506 508 507 /* Device is probed already if in a group */ 509 - if (dev->iommu_group) { 510 - ret = 0; 511 - goto out_unlock; 512 - } 508 + if (dev->iommu_group) 509 + return 0; 513 510 514 511 ret = iommu_init_device(dev, ops); 515 512 if (ret) 516 - goto out_unlock; 513 + return ret; 517 514 518 515 group = dev->iommu_group; 519 516 gdev = iommu_group_alloc_device(group, dev); ··· 547 548 list_add_tail(&group->entry, group_list); 548 549 } 549 550 mutex_unlock(&group->mutex); 550 - mutex_unlock(&iommu_probe_device_lock); 551 551 552 552 if (dev_is_pci(dev)) 553 553 iommu_dma_set_pci_32bit_workaround(dev); ··· 560 562 iommu_deinit_device(dev); 561 563 mutex_unlock(&group->mutex); 562 564 iommu_group_put(group); 563 - out_unlock: 564 - mutex_unlock(&iommu_probe_device_lock); 565 565 566 566 return ret; 567 567 } ··· 569 573 const struct iommu_ops *ops; 570 574 int ret; 571 575 576 + mutex_lock(&iommu_probe_device_lock); 572 577 ret = __iommu_probe_device(dev, NULL); 578 + mutex_unlock(&iommu_probe_device_lock); 573 579 if (ret) 574 580 return ret; 575 581 ··· 1786 1788 */ 1787 1789 if (ops->default_domain) { 1788 1790 if (req_type) 1789 - return NULL; 1791 + return ERR_PTR(-EINVAL); 1790 1792 return ops->default_domain; 1791 1793 } 1792 1794 ··· 1795 1797 1796 1798 /* The driver gave no guidance on what type to use, try the default */ 1797 1799 dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type); 1798 - if (dom) 1800 + if (!IS_ERR(dom)) 1799 1801 return dom; 1800 1802 1801 1803 /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */ 1802 1804 if (iommu_def_domain_type == IOMMU_DOMAIN_DMA) 1803 - return NULL; 1805 + return ERR_PTR(-EINVAL); 1804 1806 dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA); 1805 - if (!dom) 1806 - return NULL; 1807 + if (IS_ERR(dom)) 1808 + return dom; 1807 1809 1808 1810 pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA", 1809 1811 iommu_def_domain_type, group->name); ··· 1820 1822 struct list_head *group_list = data; 1821 1823 int ret; 1822 1824 1825 + mutex_lock(&iommu_probe_device_lock); 1823 1826 ret = __iommu_probe_device(dev, group_list); 1827 + mutex_unlock(&iommu_probe_device_lock); 1824 1828 if (ret == -ENODEV) 1825 1829 ret = 0; 1826 1830 ··· 2094 2094 else if (ops->domain_alloc) 2095 2095 domain = ops->domain_alloc(alloc_type); 2096 2096 else 2097 - return NULL; 2097 + return ERR_PTR(-EOPNOTSUPP); 2098 2098 2099 + /* 2100 + * Many domain_alloc ops now return ERR_PTR, make things easier for the 2101 + * driver by accepting ERR_PTR from all domain_alloc ops instead of 2102 + * having two rules. 2103 + */ 2104 + if (IS_ERR(domain)) 2105 + return domain; 2099 2106 if (!domain) 2100 - return NULL; 2107 + return ERR_PTR(-ENOMEM); 2101 2108 2102 2109 domain->type = type; 2103 2110 /* ··· 2117 2110 if (!domain->ops) 2118 2111 domain->ops = ops->default_domain_ops; 2119 2112 2120 - if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { 2121 - iommu_domain_free(domain); 2122 - domain = NULL; 2113 + if (iommu_is_dma_domain(domain)) { 2114 + int rc; 2115 + 2116 + rc = iommu_get_dma_cookie(domain); 2117 + if (rc) { 2118 + iommu_domain_free(domain); 2119 + return ERR_PTR(rc); 2120 + } 2123 2121 } 2124 2122 return domain; 2125 2123 } ··· 2141 2129 2142 2130 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 2143 2131 { 2132 + struct iommu_domain *domain; 2133 + 2144 2134 if (bus == NULL || bus->iommu_ops == NULL) 2145 2135 return NULL; 2146 - return __iommu_domain_alloc(bus->iommu_ops, NULL, 2136 + domain = __iommu_domain_alloc(bus->iommu_ops, NULL, 2147 2137 IOMMU_DOMAIN_UNMANAGED); 2138 + if (IS_ERR(domain)) 2139 + return NULL; 2140 + return domain; 2148 2141 } 2149 2142 EXPORT_SYMBOL_GPL(iommu_domain_alloc); 2150 2143 ··· 3058 3041 return -EINVAL; 3059 3042 3060 3043 dom = iommu_group_alloc_default_domain(group, req_type); 3061 - if (!dom) 3062 - return -ENODEV; 3044 + if (IS_ERR(dom)) 3045 + return PTR_ERR(dom); 3063 3046 3064 3047 if (group->default_domain == dom) 3065 3048 return 0; ··· 3260 3243 3261 3244 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group) 3262 3245 { 3246 + struct iommu_domain *domain; 3247 + 3263 3248 if (group->blocking_domain) 3264 3249 return 0; 3265 3250 3266 - group->blocking_domain = 3267 - __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); 3268 - if (!group->blocking_domain) { 3251 + domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED); 3252 + if (IS_ERR(domain)) { 3269 3253 /* 3270 3254 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED 3271 3255 * create an empty domain instead. 3272 3256 */ 3273 - group->blocking_domain = __iommu_group_domain_alloc( 3274 - group, IOMMU_DOMAIN_UNMANAGED); 3275 - if (!group->blocking_domain) 3276 - return -EINVAL; 3257 + domain = __iommu_group_domain_alloc(group, 3258 + IOMMU_DOMAIN_UNMANAGED); 3259 + if (IS_ERR(domain)) 3260 + return PTR_ERR(domain); 3277 3261 } 3262 + group->blocking_domain = domain; 3278 3263 return 0; 3279 3264 } 3280 3265
+10 -4
drivers/iommu/of_iommu.c
··· 112 112 const u32 *id) 113 113 { 114 114 const struct iommu_ops *ops = NULL; 115 - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 115 + struct iommu_fwspec *fwspec; 116 116 int err = NO_IOMMU; 117 117 118 118 if (!master_np) 119 119 return NULL; 120 120 121 + /* Serialise to make dev->iommu stable under our potential fwspec */ 122 + mutex_lock(&iommu_probe_device_lock); 123 + fwspec = dev_iommu_fwspec_get(dev); 121 124 if (fwspec) { 122 - if (fwspec->ops) 125 + if (fwspec->ops) { 126 + mutex_unlock(&iommu_probe_device_lock); 123 127 return fwspec->ops; 124 - 128 + } 125 129 /* In the deferred case, start again from scratch */ 126 130 iommu_fwspec_free(dev); 127 131 } ··· 159 155 fwspec = dev_iommu_fwspec_get(dev); 160 156 ops = fwspec->ops; 161 157 } 158 + mutex_unlock(&iommu_probe_device_lock); 159 + 162 160 /* 163 161 * If we have reason to believe the IOMMU driver missed the initial 164 162 * probe for dev, replay it to get things in order. ··· 197 191 if (start == phys->start && end == phys->end) 198 192 return IOMMU_RESV_DIRECT; 199 193 200 - dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys, 194 + dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys, 201 195 &start, &end); 202 196 return IOMMU_RESV_RESERVED; 203 197 }
+1
include/linux/iommu.h
··· 845 845 dev->iommu->priv = priv; 846 846 } 847 847 848 + extern struct mutex iommu_probe_device_lock; 848 849 int iommu_probe_device(struct device *dev); 849 850 850 851 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);