Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu: Remove deferred attach check from __iommu_detach_device()

At the current moment, __iommu_detach_device() is only called via call
chains that are after the device driver is attached - eg via explicit
attach APIs called by the device driver.

Commit bd421264ed30 ("iommu: Fix deferred domain attachment") has removed
deferred domain attachment check from __iommu_attach_device() path, so it
should just unconditionally work in the __iommu_detach_device() path.

It actually looks like a bug that we were blocking detach on these paths
since the attach was unconditional and the caller is going to free the
(probably) UNAMANGED domain once this returns.

The only place we should be testing for deferred attach is during the
initial point the dma device is linked to the group, and then again
during the dma api calls.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20230110025408.667767-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>

authored by

Jason Gunthorpe and committed by
Joerg Roedel
dd8a25c5 c1fe9119

+38 -34
+36 -34
drivers/iommu/iommu.c
··· 371 371 return ret; 372 372 } 373 373 374 + static bool iommu_is_attach_deferred(struct device *dev) 375 + { 376 + const struct iommu_ops *ops = dev_iommu_ops(dev); 377 + 378 + if (ops->is_attach_deferred) 379 + return ops->is_attach_deferred(dev); 380 + 381 + return false; 382 + } 383 + 384 + static int iommu_group_do_dma_first_attach(struct device *dev, void *data) 385 + { 386 + struct iommu_domain *domain = data; 387 + 388 + lockdep_assert_held(&dev->iommu_group->mutex); 389 + 390 + if (iommu_is_attach_deferred(dev)) { 391 + dev->iommu->attach_deferred = 1; 392 + return 0; 393 + } 394 + 395 + return __iommu_attach_device(domain, dev); 396 + } 397 + 374 398 int iommu_probe_device(struct device *dev) 375 399 { 376 400 const struct iommu_ops *ops; ··· 425 401 * attach the default domain. 426 402 */ 427 403 if (group->default_domain && !group->owner) { 428 - ret = __iommu_attach_device(group->default_domain, dev); 404 + ret = iommu_group_do_dma_first_attach(dev, group->default_domain); 429 405 if (ret) { 430 406 mutex_unlock(&group->mutex); 431 407 iommu_group_put(group); ··· 971 947 return ret; 972 948 } 973 949 974 - static bool iommu_is_attach_deferred(struct device *dev) 975 - { 976 - const struct iommu_ops *ops = dev_iommu_ops(dev); 977 - 978 - if (ops->is_attach_deferred) 979 - return ops->is_attach_deferred(dev); 980 - 981 - return false; 982 - } 983 - 984 950 /** 985 951 * iommu_group_add_device - add a device to an iommu group 986 952 * @group: the group into which to add the device (reference should be held) ··· 1023 1009 1024 1010 mutex_lock(&group->mutex); 1025 1011 list_add_tail(&device->list, &group->devices); 1026 - if (group->domain && !iommu_is_attach_deferred(dev)) 1027 - ret = __iommu_attach_device(group->domain, dev); 1012 + if (group->domain) 1013 + ret = iommu_group_do_dma_first_attach(dev, group->domain); 1028 1014 mutex_unlock(&group->mutex); 1029 1015 if (ret) 1030 1016 goto err_put_group; ··· 1790 1776 1791 1777 } 1792 1778 1793 - static int iommu_group_do_dma_attach(struct device *dev, void *data) 1794 - { 1795 - struct iommu_domain *domain = data; 1796 - int ret = 0; 1797 - 1798 - if (!iommu_is_attach_deferred(dev)) 1799 - ret = __iommu_attach_device(domain, dev); 1800 - 1801 - return ret; 1802 - } 1803 - 1804 - static int __iommu_group_dma_attach(struct iommu_group *group) 1779 + static int __iommu_group_dma_first_attach(struct iommu_group *group) 1805 1780 { 1806 1781 return __iommu_group_for_each_dev(group, group->default_domain, 1807 - iommu_group_do_dma_attach); 1782 + iommu_group_do_dma_first_attach); 1808 1783 } 1809 1784 1810 1785 static int iommu_group_do_probe_finalize(struct device *dev, void *data) ··· 1858 1855 1859 1856 iommu_group_create_direct_mappings(group); 1860 1857 1861 - ret = __iommu_group_dma_attach(group); 1858 + ret = __iommu_group_dma_first_attach(group); 1862 1859 1863 1860 mutex_unlock(&group->mutex); 1864 1861 ··· 1990 1987 return -ENODEV; 1991 1988 1992 1989 ret = domain->ops->attach_dev(domain, dev); 1993 - if (!ret) 1994 - trace_attach_device_to_domain(dev); 1995 - return ret; 1990 + if (ret) 1991 + return ret; 1992 + dev->iommu->attach_deferred = 0; 1993 + trace_attach_device_to_domain(dev); 1994 + return 0; 1996 1995 } 1997 1996 1998 1997 /** ··· 2039 2034 2040 2035 int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) 2041 2036 { 2042 - if (iommu_is_attach_deferred(dev)) 2037 + if (dev->iommu && dev->iommu->attach_deferred) 2043 2038 return __iommu_attach_device(domain, dev); 2044 2039 2045 2040 return 0; ··· 2048 2043 static void __iommu_detach_device(struct iommu_domain *domain, 2049 2044 struct device *dev) 2050 2045 { 2051 - if (iommu_is_attach_deferred(dev)) 2052 - return; 2053 - 2054 2046 domain->ops->detach_dev(domain, dev); 2055 2047 trace_detach_device_from_domain(dev); 2056 2048 }
+2
include/linux/iommu.h
··· 405 405 * @iommu_dev: IOMMU device this device is linked to 406 406 * @priv: IOMMU Driver private data 407 407 * @max_pasids: number of PASIDs this device can consume 408 + * @attach_deferred: the dma domain attachment is deferred 408 409 * 409 410 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 410 411 * struct iommu_group *iommu_group; ··· 418 417 struct iommu_device *iommu_dev; 419 418 void *priv; 420 419 u32 max_pasids; 420 + u32 attach_deferred:1; 421 421 }; 422 422 423 423 int iommu_device_register(struct iommu_device *iommu,