Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu/arm-smmu: Wire up generic configuration support

With everything else now in place, fill in an of_xlate callback and the
appropriate registration to plumb into the generic configuration
machinery, and watch everything just work.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>

authored by

Robin Murphy and committed by
Will Deacon
021bb842 d0acbb75

+108 -60
+108 -60
drivers/iommu/arm-smmu.c
··· 418 418 419 419 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0); 420 420 421 + static bool using_legacy_binding, using_generic_binding; 422 + 421 423 static struct arm_smmu_option_prop arm_smmu_options[] = { 422 424 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, 423 425 { 0, NULL}, ··· 819 817 if (smmu_domain->smmu) 820 818 goto out_unlock; 821 819 822 - /* We're bypassing these SIDs, so don't allocate an actual context */ 823 - if (domain->type == IOMMU_DOMAIN_DMA) { 824 - smmu_domain->smmu = smmu; 825 - goto out_unlock; 826 - } 827 - 828 820 /* 829 821 * Mapping the requested stage onto what we support is surprisingly 830 822 * complicated, mainly because the spec allows S1+S2 SMMUs without ··· 977 981 void __iomem *cb_base; 978 982 int irq; 979 983 980 - if (!smmu || domain->type == IOMMU_DOMAIN_DMA) 984 + if (!smmu) 981 985 return; 982 986 983 987 /* ··· 1011 1015 if (!smmu_domain) 1012 1016 return NULL; 1013 1017 1014 - if (type == IOMMU_DOMAIN_DMA && 1015 - iommu_get_dma_cookie(&smmu_domain->domain)) { 1018 + if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding || 1019 + iommu_get_dma_cookie(&smmu_domain->domain))) { 1016 1020 kfree(smmu_domain); 1017 1021 return NULL; 1018 1022 } ··· 1129 1133 mutex_lock(&smmu->stream_map_mutex); 1130 1134 /* Figure out a viable stream map entry allocation */ 1131 1135 for_each_cfg_sme(fwspec, i, idx) { 1136 + u16 sid = fwspec->ids[i]; 1137 + u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; 1138 + 1132 1139 if (idx != INVALID_SMENDX) { 1133 1140 ret = -EEXIST; 1134 1141 goto out_err; 1135 1142 } 1136 1143 1137 - ret = arm_smmu_find_sme(smmu, fwspec->ids[i], 0); 1144 + ret = arm_smmu_find_sme(smmu, sid, mask); 1138 1145 if (ret < 0) 1139 1146 goto out_err; 1140 1147 1141 1148 idx = ret; 1142 1149 if (smrs && smmu->s2crs[idx].count == 0) { 1143 - smrs[idx].id = fwspec->ids[i]; 1144 - smrs[idx].mask = 0; /* We don't currently share SMRs */ 1150 + smrs[idx].id = sid; 1151 + smrs[idx].mask = mask; 1145 1152 smrs[idx].valid = true; 1146 1153 } 1147 1154 smmu->s2crs[idx].count++; ··· 1201 1202 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS; 1202 1203 u8 cbndx = smmu_domain->cfg.cbndx; 1203 1204 int i, idx; 1204 - 1205 - /* 1206 - * FIXME: This won't be needed once we have IOMMU-backed DMA ops 1207 - * for all devices behind the SMMU. Note that we need to take 1208 - * care configuring SMRs for devices both a platform_device and 1209 - * and a PCI device (i.e. a PCI host controller) 1210 - */ 1211 - if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA) 1212 - type = S2CR_TYPE_BYPASS; 1213 1205 1214 1206 for_each_cfg_sme(fwspec, i, idx) { 1215 1207 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) ··· 1363 1373 } 1364 1374 } 1365 1375 1376 + static int arm_smmu_match_node(struct device *dev, void *data) 1377 + { 1378 + return dev->of_node == data; 1379 + } 1380 + 1381 + static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np) 1382 + { 1383 + struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL, 1384 + np, arm_smmu_match_node); 1385 + put_device(dev); 1386 + return dev ? dev_get_drvdata(dev) : NULL; 1387 + } 1388 + 1366 1389 static int arm_smmu_add_device(struct device *dev) 1367 1390 { 1368 1391 struct arm_smmu_device *smmu; 1369 1392 struct arm_smmu_master_cfg *cfg; 1370 - struct iommu_fwspec *fwspec; 1393 + struct iommu_fwspec *fwspec = dev->iommu_fwspec; 1371 1394 int i, ret; 1372 1395 1373 - ret = arm_smmu_register_legacy_master(dev, &smmu); 1374 - fwspec = dev->iommu_fwspec; 1375 - if (ret) 1376 - goto out_free; 1396 + if (using_legacy_binding) { 1397 + ret = arm_smmu_register_legacy_master(dev, &smmu); 1398 + fwspec = dev->iommu_fwspec; 1399 + if (ret) 1400 + goto out_free; 1401 + } else if (fwspec) { 1402 + smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode)); 1403 + } else { 1404 + return -ENODEV; 1405 + } 1377 1406 1378 1407 ret = -EINVAL; 1379 1408 for (i = 0; i < fwspec->num_ids; i++) { 1380 1409 u16 sid = fwspec->ids[i]; 1410 + u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT; 1381 1411 1382 1412 if (sid & ~smmu->streamid_mask) { 1383 1413 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n", 1384 - sid, cfg->smmu->streamid_mask); 1414 + sid, smmu->streamid_mask); 1415 + goto out_free; 1416 + } 1417 + if (mask & ~smmu->smr_mask_mask) { 1418 + dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n", 1419 + sid, smmu->smr_mask_mask); 1385 1420 goto out_free; 1386 1421 } 1387 1422 } ··· 1518 1503 return ret; 1519 1504 } 1520 1505 1506 + static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) 1507 + { 1508 + u32 fwid = 0; 1509 + 1510 + if (args->args_count > 0) 1511 + fwid |= (u16)args->args[0]; 1512 + 1513 + if (args->args_count > 1) 1514 + fwid |= (u16)args->args[1] << SMR_MASK_SHIFT; 1515 + 1516 + return iommu_fwspec_add_ids(dev, &fwid, 1); 1517 + } 1518 + 1521 1519 static struct iommu_ops arm_smmu_ops = { 1522 1520 .capable = arm_smmu_capable, 1523 1521 .domain_alloc = arm_smmu_domain_alloc, ··· 1545 1517 .device_group = arm_smmu_device_group, 1546 1518 .domain_get_attr = arm_smmu_domain_get_attr, 1547 1519 .domain_set_attr = arm_smmu_domain_set_attr, 1520 + .of_xlate = arm_smmu_of_xlate, 1548 1521 .pgsize_bitmap = -1UL, /* Restricted during device attach */ 1549 1522 }; 1550 1523 ··· 1899 1870 struct arm_smmu_device *smmu; 1900 1871 struct device *dev = &pdev->dev; 1901 1872 int num_irqs, i, err; 1873 + bool legacy_binding; 1874 + 1875 + legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); 1876 + if (legacy_binding && !using_generic_binding) { 1877 + if (!using_legacy_binding) 1878 + pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n"); 1879 + using_legacy_binding = true; 1880 + } else if (!legacy_binding && !using_legacy_binding) { 1881 + using_generic_binding = true; 1882 + } else { 1883 + dev_err(dev, "not probing due to mismatched DT properties\n"); 1884 + return -ENODEV; 1885 + } 1902 1886 1903 1887 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 1904 1888 if (!smmu) { ··· 1996 1954 of_iommu_set_ops(dev->of_node, &arm_smmu_ops); 1997 1955 platform_set_drvdata(pdev, smmu); 1998 1956 arm_smmu_device_reset(smmu); 1957 + 1958 + /* Oh, for a proper bus abstraction */ 1959 + if (!iommu_present(&platform_bus_type)) 1960 + bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 1961 + #ifdef CONFIG_ARM_AMBA 1962 + if (!iommu_present(&amba_bustype)) 1963 + bus_set_iommu(&amba_bustype, &arm_smmu_ops); 1964 + #endif 1965 + #ifdef CONFIG_PCI 1966 + if (!iommu_present(&pci_bus_type)) { 1967 + pci_request_acs(); 1968 + bus_set_iommu(&pci_bus_type, &arm_smmu_ops); 1969 + } 1970 + #endif 1999 1971 return 0; 2000 1972 } 2001 1973 ··· 2039 1983 2040 1984 static int __init arm_smmu_init(void) 2041 1985 { 2042 - struct device_node *np; 2043 - int ret; 1986 + static bool registered; 1987 + int ret = 0; 2044 1988 2045 - /* 2046 - * Play nice with systems that don't have an ARM SMMU by checking that 2047 - * an ARM SMMU exists in the system before proceeding with the driver 2048 - * and IOMMU bus operation registration. 2049 - */ 2050 - np = of_find_matching_node(NULL, arm_smmu_of_match); 2051 - if (!np) 2052 - return 0; 2053 - 2054 - of_node_put(np); 2055 - 2056 - ret = platform_driver_register(&arm_smmu_driver); 2057 - if (ret) 2058 - return ret; 2059 - 2060 - /* Oh, for a proper bus abstraction */ 2061 - if (!iommu_present(&platform_bus_type)) 2062 - bus_set_iommu(&platform_bus_type, &arm_smmu_ops); 2063 - 2064 - #ifdef CONFIG_ARM_AMBA 2065 - if (!iommu_present(&amba_bustype)) 2066 - bus_set_iommu(&amba_bustype, &arm_smmu_ops); 2067 - #endif 2068 - 2069 - #ifdef CONFIG_PCI 2070 - if (!iommu_present(&pci_bus_type)) { 2071 - pci_request_acs(); 2072 - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); 1989 + if (!registered) { 1990 + ret = platform_driver_register(&arm_smmu_driver); 1991 + registered = !ret; 2073 1992 } 2074 - #endif 2075 - 2076 - return 0; 1993 + return ret; 2077 1994 } 2078 1995 2079 1996 static void __exit arm_smmu_exit(void) ··· 2056 2027 2057 2028 subsys_initcall(arm_smmu_init); 2058 2029 module_exit(arm_smmu_exit); 2030 + 2031 + static int __init arm_smmu_of_init(struct device_node *np) 2032 + { 2033 + int ret = arm_smmu_init(); 2034 + 2035 + if (ret) 2036 + return ret; 2037 + 2038 + if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root)) 2039 + return -ENODEV; 2040 + 2041 + return 0; 2042 + } 2043 + IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init); 2044 + IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init); 2045 + IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init); 2046 + IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init); 2047 + IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init); 2048 + IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init); 2059 2049 2060 2050 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations"); 2061 2051 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");