Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'iommu-updates-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU upates from Joerg Roedel:
"This time a few more updates queued up.

- Rework VT-d code to support ACPI devices

- Improvements for memory and PCI hotplug support in the VT-d driver

- Device-tree support for OMAP IOMMU

- Convert OMAP IOMMU to use devm_* interfaces

- Fixed PASID support for AMD IOMMU

- Other random cleanups and fixes for OMAP, ARM-SMMU and SHMOBILE
IOMMU

Most of the changes are in the VT-d driver because some rework was
necessary for better hotplug and ACPI device support"

* tag 'iommu-updates-v3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (75 commits)
iommu/vt-d: Fix error handling in ANDD processing
iommu/vt-d: returning free pointer in get_domain_for_dev()
iommu/vt-d: Only call dmar_acpi_dev_scope_init() if DRHD units present
iommu/vt-d: Check for NULL pointer in dmar_acpi_dev_scope_init()
iommu/amd: Fix logic to determine and checking max PASID
iommu/vt-d: Include ACPI devices in iommu=pt
iommu/vt-d: Finally enable translation for non-PCI devices
iommu/vt-d: Remove to_pci_dev() in intel_map_page()
iommu/vt-d: Remove pdev from intel_iommu_attach_device()
iommu/vt-d: Remove pdev from iommu_no_mapping()
iommu/vt-d: Make domain_add_dev_info() take struct device
iommu/vt-d: Make domain_remove_one_dev_info() take struct device
iommu/vt-d: Rename 'hwdev' variables to 'dev' now that that's the norm
iommu/vt-d: Remove some pointless to_pci_dev() calls
iommu/vt-d: Make get_valid_domain_for_dev() take struct device
iommu/vt-d: Make iommu_should_identity_map() take struct device
iommu/vt-d: Handle RMRRs for non-PCI devices
iommu/vt-d: Make get_domain_for_dev() take struct device
iommu/vt-d: Make domain_context_mapp{ed,ing}() take struct device
iommu/vt-d: Make device_to_iommu() cope with non-PCI devices
...

+1769 -1005
+6
Documentation/devicetree/bindings/iommu/arm,smmu.txt
··· 48 48 from the mmu-masters towards memory) node for this 49 49 SMMU. 50 50 51 + - calxeda,smmu-secure-config-access : Enable proper handling of buggy 52 + implementations that always use secure access to 53 + SMMU configuration registers. In this case non-secure 54 + aliases of secure registers have to be used during 55 + SMMU configuration. 56 + 51 57 Example: 52 58 53 59 smmu {
+26
Documentation/devicetree/bindings/iommu/ti,omap-iommu.txt
··· 1 + OMAP2+ IOMMU 2 + 3 + Required properties: 4 + - compatible : Should be one of, 5 + "ti,omap2-iommu" for OMAP2/OMAP3 IOMMU instances 6 + "ti,omap4-iommu" for OMAP4/OMAP5 IOMMU instances 7 + "ti,dra7-iommu" for DRA7xx IOMMU instances 8 + - ti,hwmods : Name of the hwmod associated with the IOMMU instance 9 + - reg : Address space for the configuration registers 10 + - interrupts : Interrupt specifier for the IOMMU instance 11 + 12 + Optional properties: 13 + - ti,#tlb-entries : Number of entries in the translation look-aside buffer. 14 + Should be either 8 or 32 (default: 32) 15 + - ti,iommu-bus-err-back : Indicates the IOMMU instance supports throwing 16 + back a bus error response on MMU faults. 17 + 18 + Example: 19 + /* OMAP3 ISP MMU */ 20 + mmu_isp: mmu@480bd400 { 21 + compatible = "ti,omap2-iommu"; 22 + reg = <0x480bd400 0x80>; 23 + interrupts = <24>; 24 + ti,hwmods = "mmu_isp"; 25 + ti,#tlb-entries = <8>; 26 + };
+5
arch/arm/mach-omap2/omap-iommu.c
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <linux/of.h> 13 14 #include <linux/module.h> 14 15 #include <linux/platform_device.h> 15 16 #include <linux/err.h> ··· 59 58 60 59 static int __init omap_iommu_init(void) 61 60 { 61 + /* If dtb is there, the devices will be created dynamically */ 62 + if (of_have_populated_dt()) 63 + return -ENODEV; 64 + 62 65 return omap_hwmod_for_each_by_class("mmu", omap_iommu_dev_init, NULL); 63 66 } 64 67 /* must be ready before omap3isp is probed */
+1 -1
drivers/iommu/Kconfig
··· 207 207 bool "IOMMU for Renesas IPMMU/IPMMUI" 208 208 default n 209 209 depends on ARM 210 - depends on SH_MOBILE || COMPILE_TEST 210 + depends on ARCH_SHMOBILE || COMPILE_TEST 211 211 select IOMMU_API 212 212 select ARM_DMA_USE_IOMMU 213 213 select SHMOBILE_IPMMU
+4 -4
drivers/iommu/amd_iommu.c
··· 963 963 964 964 address &= ~(0xfffULL); 965 965 966 - cmd->data[0] = pasid & PASID_MASK; 966 + cmd->data[0] = pasid; 967 967 cmd->data[1] = domid; 968 968 cmd->data[2] = lower_32_bits(address); 969 969 cmd->data[3] = upper_32_bits(address); ··· 982 982 address &= ~(0xfffULL); 983 983 984 984 cmd->data[0] = devid; 985 - cmd->data[0] |= (pasid & 0xff) << 16; 985 + cmd->data[0] |= ((pasid >> 8) & 0xff) << 16; 986 986 cmd->data[0] |= (qdep & 0xff) << 24; 987 987 cmd->data[1] = devid; 988 - cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16; 988 + cmd->data[1] |= (pasid & 0xff) << 16; 989 989 cmd->data[2] = lower_32_bits(address); 990 990 cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK; 991 991 cmd->data[3] = upper_32_bits(address); ··· 1001 1001 1002 1002 cmd->data[0] = devid; 1003 1003 if (gn) { 1004 - cmd->data[1] = pasid & PASID_MASK; 1004 + cmd->data[1] = pasid; 1005 1005 cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK; 1006 1006 } 1007 1007 cmd->data[3] = tag & 0x1ff;
+9 -7
drivers/iommu/amd_iommu_init.c
··· 150 150 bool amd_iommu_np_cache __read_mostly; 151 151 bool amd_iommu_iotlb_sup __read_mostly = true; 152 152 153 - u32 amd_iommu_max_pasids __read_mostly = ~0; 153 + u32 amd_iommu_max_pasid __read_mostly = ~0; 154 154 155 155 bool amd_iommu_v2_present __read_mostly; 156 156 bool amd_iommu_pc_present __read_mostly; ··· 1231 1231 1232 1232 if (iommu_feature(iommu, FEATURE_GT)) { 1233 1233 int glxval; 1234 - u32 pasids; 1235 - u64 shift; 1234 + u32 max_pasid; 1235 + u64 pasmax; 1236 1236 1237 - shift = iommu->features & FEATURE_PASID_MASK; 1238 - shift >>= FEATURE_PASID_SHIFT; 1239 - pasids = (1 << shift); 1237 + pasmax = iommu->features & FEATURE_PASID_MASK; 1238 + pasmax >>= FEATURE_PASID_SHIFT; 1239 + max_pasid = (1 << (pasmax + 1)) - 1; 1240 1240 1241 - amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); 1241 + amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 1242 + 1243 + BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 1242 1244 1243 1245 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1244 1246 glxval >>= FEATURE_GLXVAL_SHIFT;
+8 -3
drivers/iommu/amd_iommu_types.h
··· 99 99 #define FEATURE_GLXVAL_SHIFT 14 100 100 #define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 101 101 102 - #define PASID_MASK 0x000fffff 102 + /* Note: 103 + * The current driver only support 16-bit PASID. 104 + * Currently, hardware only implement upto 16-bit PASID 105 + * even though the spec says it could have upto 20 bits. 106 + */ 107 + #define PASID_MASK 0x0000ffff 103 108 104 109 /* MMIO status bits */ 105 110 #define MMIO_STATUS_EVT_INT_MASK (1 << 1) ··· 702 697 */ 703 698 extern u32 amd_iommu_unmap_flush; 704 699 705 - /* Smallest number of PASIDs supported by any IOMMU in the system */ 706 - extern u32 amd_iommu_max_pasids; 700 + /* Smallest max PASID supported by any IOMMU in the system */ 701 + extern u32 amd_iommu_max_pasid; 707 702 708 703 extern bool amd_iommu_v2_present; 709 704
+71 -34
drivers/iommu/arm-smmu.c
··· 48 48 #include <asm/pgalloc.h> 49 49 50 50 /* Maximum number of stream IDs assigned to a single device */ 51 - #define MAX_MASTER_STREAMIDS 8 51 + #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS 52 52 53 53 /* Maximum number of context banks per SMMU */ 54 54 #define ARM_SMMU_MAX_CBS 128 ··· 59 59 /* SMMU global address space */ 60 60 #define ARM_SMMU_GR0(smmu) ((smmu)->base) 61 61 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) 62 + 63 + /* 64 + * SMMU global address space with conditional offset to access secure 65 + * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448, 66 + * nsGFSYNR0: 0x450) 67 + */ 68 + #define ARM_SMMU_GR0_NS(smmu) \ 69 + ((smmu)->base + \ 70 + ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ 71 + ? 0x400 : 0)) 62 72 63 73 /* Page table bits */ 64 74 #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) ··· 361 351 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) 362 352 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) 363 353 u32 features; 354 + 355 + #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) 356 + u32 options; 364 357 int version; 365 358 366 359 u32 num_context_banks; ··· 413 400 414 401 static DEFINE_SPINLOCK(arm_smmu_devices_lock); 415 402 static LIST_HEAD(arm_smmu_devices); 403 + 404 + struct arm_smmu_option_prop { 405 + u32 opt; 406 + const char *prop; 407 + }; 408 + 409 + static struct arm_smmu_option_prop arm_smmu_options [] = { 410 + { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, 411 + { 0, NULL}, 412 + }; 413 + 414 + static void parse_driver_options(struct arm_smmu_device *smmu) 415 + { 416 + int i = 0; 417 + do { 418 + if (of_property_read_bool(smmu->dev->of_node, 419 + arm_smmu_options[i].prop)) { 420 + smmu->options |= arm_smmu_options[i].opt; 421 + dev_notice(smmu->dev, "option %s\n", 422 + arm_smmu_options[i].prop); 423 + } 424 + } while (arm_smmu_options[++i].opt); 425 + } 416 426 417 427 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu, 418 428 struct device_node *dev_node) ··· 650 614 { 651 615 u32 gfsr, gfsynr0, gfsynr1, gfsynr2; 652 616 struct arm_smmu_device *smmu = dev; 653 - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); 617 + void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu); 654 618 655 619 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); 656 - if (!gfsr) 657 - return IRQ_NONE; 658 - 659 620 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); 660 621 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); 661 622 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); 623 + 624 + if (!gfsr) 625 + return IRQ_NONE; 662 626 663 627 dev_err_ratelimited(smmu->dev, 664 628 "Unexpected global fault, this could be serious\n"); ··· 678 642 679 643 /* Ensure new page tables are visible to the hardware walker */ 680 644 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { 681 - dsb(); 645 + dsb(ishst); 682 646 } else { 683 647 /* 684 648 * If the SMMU can't walk tables in the CPU caches, treat them ··· 1026 990 1027 991 /* 1028 992 * Recursively free the page tables for this domain. We don't 1029 - * care about speculative TLB filling, because the TLB will be 1030 - * nuked next time this context bank is re-allocated and no devices 1031 - * currently map to these tables. 993 + * care about speculative TLB filling because the tables should 994 + * not be active in any context bank at this point (SCTLR.M is 0). 1032 995 */ 1033 996 pgd = pgd_base; 1034 997 for (i = 0; i < PTRS_PER_PGD; ++i) { ··· 1253 1218 1254 1219 static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, 1255 1220 unsigned long addr, unsigned long end, 1256 - unsigned long pfn, int flags, int stage) 1221 + unsigned long pfn, int prot, int stage) 1257 1222 { 1258 1223 pte_t *pte, *start; 1259 1224 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; ··· 1275 1240 1276 1241 if (stage == 1) { 1277 1242 pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; 1278 - if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ)) 1243 + if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) 1279 1244 pteval |= ARM_SMMU_PTE_AP_RDONLY; 1280 1245 1281 - if (flags & IOMMU_CACHE) 1246 + if (prot & IOMMU_CACHE) 1282 1247 pteval |= (MAIR_ATTR_IDX_CACHE << 1283 1248 ARM_SMMU_PTE_ATTRINDX_SHIFT); 1284 1249 } else { 1285 1250 pteval |= ARM_SMMU_PTE_HAP_FAULT; 1286 - if (flags & IOMMU_READ) 1251 + if (prot & IOMMU_READ) 1287 1252 pteval |= ARM_SMMU_PTE_HAP_READ; 1288 - if (flags & IOMMU_WRITE) 1253 + if (prot & IOMMU_WRITE) 1289 1254 pteval |= ARM_SMMU_PTE_HAP_WRITE; 1290 - if (flags & IOMMU_CACHE) 1255 + if (prot & IOMMU_CACHE) 1291 1256 pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; 1292 1257 else 1293 1258 pteval |= ARM_SMMU_PTE_MEMATTR_NC; 1294 1259 } 1295 1260 1296 1261 /* If no access, create a faulting entry to avoid TLB fills */ 1297 - if (flags & IOMMU_EXEC) 1262 + if (prot & IOMMU_EXEC) 1298 1263 pteval &= ~ARM_SMMU_PTE_XN; 1299 - else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) 1264 + else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) 1300 1265 pteval &= ~ARM_SMMU_PTE_PAGE; 1301 1266 1302 1267 pteval |= ARM_SMMU_PTE_SH_IS; ··· 1358 1323 1359 1324 static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, 1360 1325 unsigned long addr, unsigned long end, 1361 - phys_addr_t phys, int flags, int stage) 1326 + phys_addr_t phys, int prot, int stage) 1362 1327 { 1363 1328 int ret; 1364 1329 pmd_t *pmd; ··· 1382 1347 do { 1383 1348 next = pmd_addr_end(addr, end); 1384 1349 ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn, 1385 - flags, stage); 1350 + prot, stage); 1386 1351 phys += next - addr; 1387 1352 } while (pmd++, addr = next, addr < end); 1388 1353 ··· 1391 1356 1392 1357 static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, 1393 1358 unsigned long addr, unsigned long end, 1394 - phys_addr_t phys, int flags, int stage) 1359 + phys_addr_t phys, int prot, int stage) 1395 1360 { 1396 1361 int ret = 0; 1397 1362 pud_t *pud; ··· 1415 1380 do { 1416 1381 next = pud_addr_end(addr, end); 1417 1382 ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, 1418 - flags, stage); 1383 + prot, stage); 1419 1384 phys += next - addr; 1420 1385 } while (pud++, addr = next, addr < end); 1421 1386 ··· 1424 1389 1425 1390 static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, 1426 1391 unsigned long iova, phys_addr_t paddr, 1427 - size_t size, int flags) 1392 + size_t size, int prot) 1428 1393 { 1429 1394 int ret, stage; 1430 1395 unsigned long end; ··· 1432 1397 struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg; 1433 1398 pgd_t *pgd = root_cfg->pgd; 1434 1399 struct arm_smmu_device *smmu = root_cfg->smmu; 1435 - unsigned long irqflags; 1400 + unsigned long flags; 1436 1401 1437 1402 if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) { 1438 1403 stage = 2; ··· 1455 1420 if (paddr & ~output_mask) 1456 1421 return -ERANGE; 1457 1422 1458 - spin_lock_irqsave(&smmu_domain->lock, irqflags); 1423 + spin_lock_irqsave(&smmu_domain->lock, flags); 1459 1424 pgd += pgd_index(iova); 1460 1425 end = iova + size; 1461 1426 do { 1462 1427 unsigned long next = pgd_addr_end(iova, end); 1463 1428 1464 1429 ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, 1465 - flags, stage); 1430 + prot, stage); 1466 1431 if (ret) 1467 1432 goto out_unlock; 1468 1433 ··· 1471 1436 } while (pgd++, iova != end); 1472 1437 1473 1438 out_unlock: 1474 - spin_unlock_irqrestore(&smmu_domain->lock, irqflags); 1439 + spin_unlock_irqrestore(&smmu_domain->lock, flags); 1475 1440 1476 1441 return ret; 1477 1442 } 1478 1443 1479 1444 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, 1480 - phys_addr_t paddr, size_t size, int flags) 1445 + phys_addr_t paddr, size_t size, int prot) 1481 1446 { 1482 1447 struct arm_smmu_domain *smmu_domain = domain->priv; 1483 1448 ··· 1488 1453 if ((phys_addr_t)iova & ~smmu_domain->output_mask) 1489 1454 return -ERANGE; 1490 1455 1491 - return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags); 1456 + return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); 1492 1457 } 1493 1458 1494 1459 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, ··· 1632 1597 int i = 0; 1633 1598 u32 reg; 1634 1599 1635 - /* Clear Global FSR */ 1636 - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); 1637 - writel(reg, gr0_base + ARM_SMMU_GR0_sGFSR); 1600 + /* clear global FSR */ 1601 + reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); 1602 + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR); 1638 1603 1639 1604 /* Mark all SMRn as invalid and all S2CRn as bypass */ 1640 1605 for (i = 0; i < smmu->num_mapping_groups; ++i) { ··· 1654 1619 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); 1655 1620 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); 1656 1621 1657 - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0); 1622 + reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 1658 1623 1659 1624 /* Enable fault reporting */ 1660 1625 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE); ··· 1673 1638 1674 1639 /* Push the button */ 1675 1640 arm_smmu_tlb_sync(smmu); 1676 - writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sCR0); 1641 + writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 1677 1642 } 1678 1643 1679 1644 static int arm_smmu_id_size_to_bits(int size) ··· 1920 1885 if (err) 1921 1886 goto out_put_parent; 1922 1887 1888 + parse_driver_options(smmu); 1889 + 1923 1890 if (smmu->version > 1 && 1924 1891 smmu->num_context_banks != smmu->num_context_irqs) { 1925 1892 dev_err(dev, ··· 2006 1969 free_irq(smmu->irqs[i], smmu); 2007 1970 2008 1971 /* Turn the thing off */ 2009 - writel_relaxed(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0); 1972 + writel(sCR0_CLIENTPD,ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); 2010 1973 return 0; 2011 1974 } 2012 1975
+390 -125
drivers/iommu/dmar.c
··· 43 43 44 44 #include "irq_remapping.h" 45 45 46 - /* No locks are needed as DMA remapping hardware unit 47 - * list is constructed at boot time and hotplug of 48 - * these units are not supported by the architecture. 46 + /* 47 + * Assumptions: 48 + * 1) The hotplug framework guarentees that DMAR unit will be hot-added 49 + * before IO devices managed by that unit. 50 + * 2) The hotplug framework guarantees that DMAR unit will be hot-removed 51 + * after IO devices managed by that unit. 52 + * 3) Hotplug events are rare. 53 + * 54 + * Locking rules for DMA and interrupt remapping related global data structures: 55 + * 1) Use dmar_global_lock in process context 56 + * 2) Use RCU in interrupt context 49 57 */ 58 + DECLARE_RWSEM(dmar_global_lock); 50 59 LIST_HEAD(dmar_drhd_units); 51 60 52 61 struct acpi_table_header * __initdata dmar_tbl; 53 62 static acpi_size dmar_tbl_size; 63 + static int dmar_dev_scope_status = 1; 54 64 55 65 static int alloc_iommu(struct dmar_drhd_unit *drhd); 56 66 static void free_iommu(struct intel_iommu *iommu); ··· 72 62 * the very end. 73 63 */ 74 64 if (drhd->include_all) 75 - list_add_tail(&drhd->list, &dmar_drhd_units); 65 + list_add_tail_rcu(&drhd->list, &dmar_drhd_units); 76 66 else 77 - list_add(&drhd->list, &dmar_drhd_units); 67 + list_add_rcu(&drhd->list, &dmar_drhd_units); 78 68 } 79 69 80 - static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, 81 - struct pci_dev **dev, u16 segment) 82 - { 83 - struct pci_bus *bus; 84 - struct pci_dev *pdev = NULL; 85 - struct acpi_dmar_pci_path *path; 86 - int count; 87 - 88 - bus = pci_find_bus(segment, scope->bus); 89 - path = (struct acpi_dmar_pci_path *)(scope + 1); 90 - count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 91 - / sizeof(struct acpi_dmar_pci_path); 92 - 93 - while (count) { 94 - if (pdev) 95 - pci_dev_put(pdev); 96 - /* 97 - * Some BIOSes list non-exist devices in DMAR table, just 98 - * ignore it 99 - */ 100 - if (!bus) { 101 - pr_warn("Device scope bus [%d] not found\n", scope->bus); 102 - break; 103 - } 104 - pdev = pci_get_slot(bus, PCI_DEVFN(path->device, path->function)); 105 - if (!pdev) { 106 - /* warning will be printed below */ 107 - break; 108 - } 109 - path ++; 110 - count --; 111 - bus = pdev->subordinate; 112 - } 113 - if (!pdev) { 114 - pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", 115 - segment, scope->bus, path->device, path->function); 116 - return 0; 117 - } 118 - if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ 119 - pdev->subordinate) || (scope->entry_type == \ 120 - ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) { 121 - pci_dev_put(pdev); 122 - pr_warn("Device scope type does not match for %s\n", 123 - pci_name(pdev)); 124 - return -EINVAL; 125 - } 126 - *dev = pdev; 127 - return 0; 128 - } 129 - 130 - int __init dmar_parse_dev_scope(void *start, void *end, int *cnt, 131 - struct pci_dev ***devices, u16 segment) 70 + void *dmar_alloc_dev_scope(void *start, void *end, int *cnt) 132 71 { 133 72 struct acpi_dmar_device_scope *scope; 134 - void * tmp = start; 135 - int index; 136 - int ret; 137 73 138 74 *cnt = 0; 139 75 while (start < end) { 140 76 scope = start; 141 - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || 77 + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ACPI || 78 + scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || 142 79 scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) 143 80 (*cnt)++; 144 81 else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && ··· 95 138 start += scope->length; 96 139 } 97 140 if (*cnt == 0) 141 + return NULL; 142 + 143 + return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL); 144 + } 145 + 146 + void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt) 147 + { 148 + int i; 149 + struct device *tmp_dev; 150 + 151 + if (*devices && *cnt) { 152 + for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) 153 + put_device(tmp_dev); 154 + kfree(*devices); 155 + } 156 + 157 + *devices = NULL; 158 + *cnt = 0; 159 + } 160 + 161 + /* Optimize out kzalloc()/kfree() for normal cases */ 162 + static char dmar_pci_notify_info_buf[64]; 163 + 164 + static struct dmar_pci_notify_info * 165 + dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) 166 + { 167 + int level = 0; 168 + size_t size; 169 + struct pci_dev *tmp; 170 + struct dmar_pci_notify_info *info; 171 + 172 + BUG_ON(dev->is_virtfn); 173 + 174 + /* Only generate path[] for device addition event */ 175 + if (event == BUS_NOTIFY_ADD_DEVICE) 176 + for (tmp = dev; tmp; tmp = tmp->bus->self) 177 + level++; 178 + 179 + size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path); 180 + if (size <= sizeof(dmar_pci_notify_info_buf)) { 181 + info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; 182 + } else { 183 + info = kzalloc(size, GFP_KERNEL); 184 + if (!info) { 185 + pr_warn("Out of memory when allocating notify_info " 186 + "for %s.\n", pci_name(dev)); 187 + if (dmar_dev_scope_status == 0) 188 + dmar_dev_scope_status = -ENOMEM; 189 + return NULL; 190 + } 191 + } 192 + 193 + info->event = event; 194 + info->dev = dev; 195 + info->seg = pci_domain_nr(dev->bus); 196 + info->level = level; 197 + if (event == BUS_NOTIFY_ADD_DEVICE) { 198 + for (tmp = dev, level--; tmp; tmp = tmp->bus->self) { 199 + info->path[level].device = PCI_SLOT(tmp->devfn); 200 + info->path[level].function = PCI_FUNC(tmp->devfn); 201 + if (pci_is_root_bus(tmp->bus)) 202 + info->bus = tmp->bus->number; 203 + } 204 + } 205 + 206 + return info; 207 + } 208 + 209 + static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info) 210 + { 211 + if ((void *)info != dmar_pci_notify_info_buf) 212 + kfree(info); 213 + } 214 + 215 + static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, 216 + struct acpi_dmar_pci_path *path, int count) 217 + { 218 + int i; 219 + 220 + if (info->bus != bus) 221 + return false; 222 + if (info->level != count) 223 + return false; 224 + 225 + for (i = 0; i < count; i++) { 226 + if (path[i].device != info->path[i].device || 227 + path[i].function != info->path[i].function) 228 + return false; 229 + } 230 + 231 + return true; 232 + } 233 + 234 + /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ 235 + int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, 236 + void *start, void*end, u16 segment, 237 + struct dmar_dev_scope *devices, 238 + int devices_cnt) 239 + { 240 + int i, level; 241 + struct device *tmp, *dev = &info->dev->dev; 242 + struct acpi_dmar_device_scope *scope; 243 + struct acpi_dmar_pci_path *path; 244 + 245 + if (segment != info->seg) 98 246 return 0; 99 247 100 - *devices = kcalloc(*cnt, sizeof(struct pci_dev *), GFP_KERNEL); 101 - if (!*devices) 102 - return -ENOMEM; 103 - 104 - start = tmp; 105 - index = 0; 106 - while (start < end) { 248 + for (; start < end; start += scope->length) { 107 249 scope = start; 108 - if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || 109 - scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) { 110 - ret = dmar_parse_one_dev_scope(scope, 111 - &(*devices)[index], segment); 112 - if (ret) { 113 - dmar_free_dev_scope(devices, cnt); 114 - return ret; 115 - } 116 - index ++; 250 + if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && 251 + scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE) 252 + continue; 253 + 254 + path = (struct acpi_dmar_pci_path *)(scope + 1); 255 + level = (scope->length - sizeof(*scope)) / sizeof(*path); 256 + if (!dmar_match_pci_path(info, scope->bus, path, level)) 257 + continue; 258 + 259 + if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^ 260 + (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) { 261 + pr_warn("Device scope type does not match for %s\n", 262 + pci_name(info->dev)); 263 + return -EINVAL; 117 264 } 118 - start += scope->length; 265 + 266 + for_each_dev_scope(devices, devices_cnt, i, tmp) 267 + if (tmp == NULL) { 268 + devices[i].bus = info->dev->bus->number; 269 + devices[i].devfn = info->dev->devfn; 270 + rcu_assign_pointer(devices[i].dev, 271 + get_device(dev)); 272 + return 1; 273 + } 274 + BUG_ON(i >= devices_cnt); 119 275 } 120 276 121 277 return 0; 122 278 } 123 279 124 - void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt) 280 + int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, 281 + struct dmar_dev_scope *devices, int count) 125 282 { 126 - if (*devices && *cnt) { 127 - while (--*cnt >= 0) 128 - pci_dev_put((*devices)[*cnt]); 129 - kfree(*devices); 130 - *devices = NULL; 131 - *cnt = 0; 132 - } 283 + int index; 284 + struct device *tmp; 285 + 286 + if (info->seg != segment) 287 + return 0; 288 + 289 + for_each_active_dev_scope(devices, count, index, tmp) 290 + if (tmp == &info->dev->dev) { 291 + rcu_assign_pointer(devices[index].dev, NULL); 292 + synchronize_rcu(); 293 + put_device(tmp); 294 + return 1; 295 + } 296 + 297 + return 0; 133 298 } 299 + 300 + static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) 301 + { 302 + int ret = 0; 303 + struct dmar_drhd_unit *dmaru; 304 + struct acpi_dmar_hardware_unit *drhd; 305 + 306 + for_each_drhd_unit(dmaru) { 307 + if (dmaru->include_all) 308 + continue; 309 + 310 + drhd = container_of(dmaru->hdr, 311 + struct acpi_dmar_hardware_unit, header); 312 + ret = dmar_insert_dev_scope(info, (void *)(drhd + 1), 313 + ((void *)drhd) + drhd->header.length, 314 + dmaru->segment, 315 + dmaru->devices, dmaru->devices_cnt); 316 + if (ret != 0) 317 + break; 318 + } 319 + if (ret >= 0) 320 + ret = dmar_iommu_notify_scope_dev(info); 321 + if (ret < 0 && dmar_dev_scope_status == 0) 322 + dmar_dev_scope_status = ret; 323 + 324 + return ret; 325 + } 326 + 327 + static void dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info) 328 + { 329 + struct dmar_drhd_unit *dmaru; 330 + 331 + for_each_drhd_unit(dmaru) 332 + if (dmar_remove_dev_scope(info, dmaru->segment, 333 + dmaru->devices, dmaru->devices_cnt)) 334 + break; 335 + dmar_iommu_notify_scope_dev(info); 336 + } 337 + 338 + static int dmar_pci_bus_notifier(struct notifier_block *nb, 339 + unsigned long action, void *data) 340 + { 341 + struct pci_dev *pdev = to_pci_dev(data); 342 + struct dmar_pci_notify_info *info; 343 + 344 + /* Only care about add/remove events for physical functions */ 345 + if (pdev->is_virtfn) 346 + return NOTIFY_DONE; 347 + if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) 348 + return NOTIFY_DONE; 349 + 350 + info = dmar_alloc_pci_notify_info(pdev, action); 351 + if (!info) 352 + return NOTIFY_DONE; 353 + 354 + down_write(&dmar_global_lock); 355 + if (action == BUS_NOTIFY_ADD_DEVICE) 356 + dmar_pci_bus_add_dev(info); 357 + else if (action == BUS_NOTIFY_DEL_DEVICE) 358 + dmar_pci_bus_del_dev(info); 359 + up_write(&dmar_global_lock); 360 + 361 + dmar_free_pci_notify_info(info); 362 + 363 + return NOTIFY_OK; 364 + } 365 + 366 + static struct notifier_block dmar_pci_bus_nb = { 367 + .notifier_call = dmar_pci_bus_notifier, 368 + .priority = INT_MIN, 369 + }; 134 370 135 371 /** 136 372 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition ··· 346 196 dmaru->reg_base_addr = drhd->address; 347 197 dmaru->segment = drhd->segment; 348 198 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 199 + dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1), 200 + ((void *)drhd) + drhd->header.length, 201 + &dmaru->devices_cnt); 202 + if (dmaru->devices_cnt && dmaru->devices == NULL) { 203 + kfree(dmaru); 204 + return -ENOMEM; 205 + } 349 206 350 207 ret = alloc_iommu(dmaru); 351 208 if (ret) { 209 + dmar_free_dev_scope(&dmaru->devices, 210 + &dmaru->devices_cnt); 352 211 kfree(dmaru); 353 212 return ret; 354 213 } ··· 374 215 kfree(dmaru); 375 216 } 376 217 377 - static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) 218 + static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) 378 219 { 379 - struct acpi_dmar_hardware_unit *drhd; 220 + struct acpi_dmar_andd *andd = (void *)header; 380 221 381 - drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; 222 + /* Check for NUL termination within the designated length */ 223 + if (strnlen(andd->object_name, header->length - 8) == header->length - 8) { 224 + WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND, 225 + "Your BIOS is broken; ANDD object name is not NUL-terminated\n" 226 + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", 227 + dmi_get_system_info(DMI_BIOS_VENDOR), 228 + dmi_get_system_info(DMI_BIOS_VERSION), 229 + dmi_get_system_info(DMI_PRODUCT_VERSION)); 230 + return -EINVAL; 231 + } 232 + pr_info("ANDD device: %x name: %s\n", andd->device_number, 233 + andd->object_name); 382 234 383 - if (dmaru->include_all) 384 - return 0; 385 - 386 - return dmar_parse_dev_scope((void *)(drhd + 1), 387 - ((void *)drhd) + drhd->header.length, 388 - &dmaru->devices_cnt, &dmaru->devices, 389 - drhd->segment); 235 + return 0; 390 236 } 391 237 392 238 #ifdef CONFIG_ACPI_NUMA ··· 456 292 pr_info("RHSA base: %#016Lx proximity domain: %#x\n", 457 293 (unsigned long long)rhsa->base_address, 458 294 rhsa->proximity_domain); 295 + break; 296 + case ACPI_DMAR_TYPE_ANDD: 297 + /* We don't print this here because we need to sanity-check 298 + it first. So print it in dmar_parse_one_andd() instead. */ 459 299 break; 460 300 } 461 301 } ··· 546 378 ret = dmar_parse_one_rhsa(entry_header); 547 379 #endif 548 380 break; 381 + case ACPI_DMAR_TYPE_ANDD: 382 + ret = dmar_parse_one_andd(entry_header); 383 + break; 549 384 default: 550 385 pr_warn("Unknown DMAR structure type %d\n", 551 386 entry_header->type); ··· 565 394 return ret; 566 395 } 567 396 568 - static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, 569 - struct pci_dev *dev) 397 + static int dmar_pci_device_match(struct dmar_dev_scope devices[], 398 + int cnt, struct pci_dev *dev) 570 399 { 571 400 int index; 401 + struct device *tmp; 572 402 573 403 while (dev) { 574 - for (index = 0; index < cnt; index++) 575 - if (dev == devices[index]) 404 + for_each_active_dev_scope(devices, cnt, index, tmp) 405 + if (dev_is_pci(tmp) && dev == to_pci_dev(tmp)) 576 406 return 1; 577 407 578 408 /* Check our parent */ ··· 586 414 struct dmar_drhd_unit * 587 415 dmar_find_matched_drhd_unit(struct pci_dev *dev) 588 416 { 589 - struct dmar_drhd_unit *dmaru = NULL; 417 + struct dmar_drhd_unit *dmaru; 590 418 struct acpi_dmar_hardware_unit *drhd; 591 419 592 420 dev = pci_physfn(dev); 593 421 422 + rcu_read_lock(); 594 423 for_each_drhd_unit(dmaru) { 595 424 drhd = container_of(dmaru->hdr, 596 425 struct acpi_dmar_hardware_unit, ··· 599 426 600 427 if (dmaru->include_all && 601 428 drhd->segment == pci_domain_nr(dev->bus)) 602 - return dmaru; 429 + goto out; 603 430 604 431 if (dmar_pci_device_match(dmaru->devices, 605 432 dmaru->devices_cnt, dev)) 606 - return dmaru; 433 + goto out; 607 434 } 435 + dmaru = NULL; 436 + out: 437 + rcu_read_unlock(); 608 438 609 - return NULL; 439 + return dmaru; 440 + } 441 + 442 + static void __init dmar_acpi_insert_dev_scope(u8 device_number, 443 + struct acpi_device *adev) 444 + { 445 + struct dmar_drhd_unit *dmaru; 446 + struct acpi_dmar_hardware_unit *drhd; 447 + struct acpi_dmar_device_scope *scope; 448 + struct device *tmp; 449 + int i; 450 + struct acpi_dmar_pci_path *path; 451 + 452 + for_each_drhd_unit(dmaru) { 453 + drhd = container_of(dmaru->hdr, 454 + struct acpi_dmar_hardware_unit, 455 + header); 456 + 457 + for (scope = (void *)(drhd + 1); 458 + (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; 459 + scope = ((void *)scope) + scope->length) { 460 + if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ACPI) 461 + continue; 462 + if (scope->enumeration_id != device_number) 463 + continue; 464 + 465 + path = (void *)(scope + 1); 466 + pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", 467 + dev_name(&adev->dev), dmaru->reg_base_addr, 468 + scope->bus, path->device, path->function); 469 + for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) 470 + if (tmp == NULL) { 471 + dmaru->devices[i].bus = scope->bus; 472 + dmaru->devices[i].devfn = PCI_DEVFN(path->device, 473 + path->function); 474 + rcu_assign_pointer(dmaru->devices[i].dev, 475 + get_device(&adev->dev)); 476 + return; 477 + } 478 + BUG_ON(i >= dmaru->devices_cnt); 479 + } 480 + } 481 + pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n", 482 + device_number, dev_name(&adev->dev)); 483 + } 484 + 485 + static int __init dmar_acpi_dev_scope_init(void) 486 + { 487 + struct acpi_dmar_andd *andd; 488 + 489 + if (dmar_tbl == NULL) 490 + return -ENODEV; 491 + 492 + for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar); 493 + ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length; 494 + andd = ((void *)andd) + andd->header.length) { 495 + if (andd->header.type == ACPI_DMAR_TYPE_ANDD) { 496 + acpi_handle h; 497 + struct acpi_device *adev; 498 + 499 + if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, 500 + andd->object_name, 501 + &h))) { 502 + pr_err("Failed to find handle for ACPI object %s\n", 503 + andd->object_name); 504 + continue; 505 + } 506 + acpi_bus_get_device(h, &adev); 507 + if (!adev) { 508 + pr_err("Failed to get device for ACPI object %s\n", 509 + andd->object_name); 510 + continue; 511 + } 512 + dmar_acpi_insert_dev_scope(andd->device_number, adev); 513 + } 514 + } 515 + return 0; 610 516 } 611 517 612 518 int __init dmar_dev_scope_init(void) 613 519 { 614 - static int dmar_dev_scope_initialized; 615 - struct dmar_drhd_unit *drhd; 616 - int ret = -ENODEV; 520 + struct pci_dev *dev = NULL; 521 + struct dmar_pci_notify_info *info; 617 522 618 - if (dmar_dev_scope_initialized) 619 - return dmar_dev_scope_initialized; 523 + if (dmar_dev_scope_status != 1) 524 + return dmar_dev_scope_status; 620 525 621 - if (list_empty(&dmar_drhd_units)) 622 - goto fail; 526 + if (list_empty(&dmar_drhd_units)) { 527 + dmar_dev_scope_status = -ENODEV; 528 + } else { 529 + dmar_dev_scope_status = 0; 623 530 624 - list_for_each_entry(drhd, &dmar_drhd_units, list) { 625 - ret = dmar_parse_dev(drhd); 626 - if (ret) 627 - goto fail; 531 + dmar_acpi_dev_scope_init(); 532 + 533 + for_each_pci_dev(dev) { 534 + if (dev->is_virtfn) 535 + continue; 536 + 537 + info = dmar_alloc_pci_notify_info(dev, 538 + BUS_NOTIFY_ADD_DEVICE); 539 + if (!info) { 540 + return dmar_dev_scope_status; 541 + } else { 542 + dmar_pci_bus_add_dev(info); 543 + dmar_free_pci_notify_info(info); 544 + } 545 + } 546 + 547 + bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb); 628 548 } 629 549 630 - ret = dmar_parse_rmrr_atsr_dev(); 631 - if (ret) 632 - goto fail; 633 - 634 - dmar_dev_scope_initialized = 1; 635 - return 0; 636 - 637 - fail: 638 - dmar_dev_scope_initialized = ret; 639 - return ret; 550 + return dmar_dev_scope_status; 640 551 } 641 552 642 553 ··· 814 557 { 815 558 int ret; 816 559 560 + down_write(&dmar_global_lock); 817 561 ret = dmar_table_detect(); 818 562 if (ret) 819 563 ret = check_zero_address(); ··· 832 574 } 833 575 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); 834 576 dmar_tbl = NULL; 577 + up_write(&dmar_global_lock); 835 578 836 579 return ret ? 1 : -ENODEV; 837 580 } ··· 955 696 } 956 697 iommu->agaw = agaw; 957 698 iommu->msagaw = msagaw; 699 + iommu->segment = drhd->segment; 958 700 959 701 iommu->node = -1; 960 702 ··· 1646 1386 if (irq_remapping_enabled || intel_iommu_enabled) 1647 1387 return 0; 1648 1388 1389 + if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) 1390 + bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb); 1391 + 1392 + down_write(&dmar_global_lock); 1649 1393 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { 1650 1394 list_del(&dmaru->list); 1651 1395 dmar_free_drhd(dmaru); 1652 1396 } 1397 + up_write(&dmar_global_lock); 1653 1398 1654 1399 return 0; 1655 1400 }
+940 -698
drivers/iommu/intel-iommu.c
··· 1 1 /* 2 - * Copyright (c) 2006, Intel Corporation. 2 + * Copyright © 2006-2014 Intel Corporation. 3 3 * 4 4 * This program is free software; you can redistribute it and/or modify it 5 5 * under the terms and conditions of the GNU General Public License, ··· 10 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 11 * more details. 12 12 * 13 - * You should have received a copy of the GNU General Public License along with 14 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 - * Place - Suite 330, Boston, MA 02111-1307 USA. 16 - * 17 - * Copyright (C) 2006-2008 Intel Corporation 18 - * Author: Ashok Raj <ashok.raj@intel.com> 19 - * Author: Shaohua Li <shaohua.li@intel.com> 20 - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 21 - * Author: Fenghua Yu <fenghua.yu@intel.com> 13 + * Authors: David Woodhouse <dwmw2@infradead.org>, 14 + * Ashok Raj <ashok.raj@intel.com>, 15 + * Shaohua Li <shaohua.li@intel.com>, 16 + * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>, 17 + * Fenghua Yu <fenghua.yu@intel.com> 22 18 */ 23 19 24 20 #include <linux/init.h> ··· 29 33 #include <linux/dmar.h> 30 34 #include <linux/dma-mapping.h> 31 35 #include <linux/mempool.h> 36 + #include <linux/memory.h> 32 37 #include <linux/timer.h> 33 38 #include <linux/iova.h> 34 39 #include <linux/iommu.h> ··· 369 372 struct device_domain_info { 370 373 struct list_head link; /* link to domain siblings */ 371 374 struct list_head global; /* link to global list */ 372 - int segment; /* PCI domain */ 373 375 u8 bus; /* PCI bus number */ 374 376 u8 devfn; /* PCI devfn number */ 375 - struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */ 377 + struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ 376 378 struct intel_iommu *iommu; /* IOMMU used by this device */ 377 379 struct dmar_domain *domain; /* pointer to domain */ 378 380 }; 381 + 382 + struct dmar_rmrr_unit { 383 + struct list_head list; /* list of rmrr units */ 384 + struct acpi_dmar_header *hdr; /* ACPI header */ 385 + u64 base_address; /* reserved base address*/ 386 + u64 end_address; /* reserved end address */ 387 + struct dmar_dev_scope *devices; /* target devices */ 388 + int devices_cnt; /* target device count */ 389 + }; 390 + 391 + struct dmar_atsr_unit { 392 + struct list_head list; /* list of ATSR units */ 393 + struct acpi_dmar_header *hdr; /* ACPI header */ 394 + struct dmar_dev_scope *devices; /* target devices */ 395 + int devices_cnt; /* target device count */ 396 + u8 include_all:1; /* include all ports */ 397 + }; 398 + 399 + static LIST_HEAD(dmar_atsr_units); 400 + static LIST_HEAD(dmar_rmrr_units); 401 + 402 + #define for_each_rmrr_units(rmrr) \ 403 + list_for_each_entry(rmrr, &dmar_rmrr_units, list) 379 404 380 405 static void flush_unmaps_timeout(unsigned long data); 381 406 ··· 408 389 int next; 409 390 struct iova *iova[HIGH_WATER_MARK]; 410 391 struct dmar_domain *domain[HIGH_WATER_MARK]; 392 + struct page *freelist[HIGH_WATER_MARK]; 411 393 }; 412 394 413 395 static struct deferred_flush_tables *deferred_flush; ··· 422 402 static int timer_on; 423 403 static long list_size; 424 404 405 + static void domain_exit(struct dmar_domain *domain); 425 406 static void domain_remove_dev_info(struct dmar_domain *domain); 407 + static void domain_remove_one_dev_info(struct dmar_domain *domain, 408 + struct device *dev); 409 + static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 410 + struct device *dev); 426 411 427 412 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON 428 413 int dmar_disabled = 0; ··· 591 566 592 567 static void domain_update_iommu_coherency(struct dmar_domain *domain) 593 568 { 594 - int i; 569 + struct dmar_drhd_unit *drhd; 570 + struct intel_iommu *iommu; 571 + int i, found = 0; 595 572 596 - i = find_first_bit(domain->iommu_bmp, g_num_of_iommus); 597 - 598 - domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0; 573 + domain->iommu_coherency = 1; 599 574 600 575 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { 576 + found = 1; 601 577 if (!ecap_coherent(g_iommus[i]->ecap)) { 602 578 domain->iommu_coherency = 0; 603 579 break; 604 580 } 605 581 } 582 + if (found) 583 + return; 584 + 585 + /* No hardware attached; use lowest common denominator */ 586 + rcu_read_lock(); 587 + for_each_active_iommu(iommu, drhd) { 588 + if (!ecap_coherent(iommu->ecap)) { 589 + domain->iommu_coherency = 0; 590 + break; 591 + } 592 + } 593 + rcu_read_unlock(); 606 594 } 607 595 608 596 static void domain_update_iommu_snooping(struct dmar_domain *domain) ··· 644 606 } 645 607 646 608 /* set iommu_superpage to the smallest common denominator */ 609 + rcu_read_lock(); 647 610 for_each_active_iommu(iommu, drhd) { 648 611 mask &= cap_super_page_val(iommu->cap); 649 612 if (!mask) { 650 613 break; 651 614 } 652 615 } 616 + rcu_read_unlock(); 617 + 653 618 domain->iommu_superpage = fls(mask); 654 619 } 655 620 ··· 664 623 domain_update_iommu_superpage(domain); 665 624 } 666 625 667 - static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn) 626 + static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) 668 627 { 669 628 struct dmar_drhd_unit *drhd = NULL; 629 + struct intel_iommu *iommu; 630 + struct device *tmp; 631 + struct pci_dev *ptmp, *pdev = NULL; 632 + u16 segment; 670 633 int i; 671 634 672 - for_each_active_drhd_unit(drhd) { 673 - if (segment != drhd->segment) 635 + if (dev_is_pci(dev)) { 636 + pdev = to_pci_dev(dev); 637 + segment = pci_domain_nr(pdev->bus); 638 + } else if (ACPI_COMPANION(dev)) 639 + dev = &ACPI_COMPANION(dev)->dev; 640 + 641 + rcu_read_lock(); 642 + for_each_active_iommu(iommu, drhd) { 643 + if (pdev && segment != drhd->segment) 674 644 continue; 675 645 676 - for (i = 0; i < drhd->devices_cnt; i++) { 677 - if (drhd->devices[i] && 678 - drhd->devices[i]->bus->number == bus && 679 - drhd->devices[i]->devfn == devfn) 680 - return drhd->iommu; 681 - if (drhd->devices[i] && 682 - drhd->devices[i]->subordinate && 683 - drhd->devices[i]->subordinate->number <= bus && 684 - drhd->devices[i]->subordinate->busn_res.end >= bus) 685 - return drhd->iommu; 646 + for_each_active_dev_scope(drhd->devices, 647 + drhd->devices_cnt, i, tmp) { 648 + if (tmp == dev) { 649 + *bus = drhd->devices[i].bus; 650 + *devfn = drhd->devices[i].devfn; 651 + goto out; 652 + } 653 + 654 + if (!pdev || !dev_is_pci(tmp)) 655 + continue; 656 + 657 + ptmp = to_pci_dev(tmp); 658 + if (ptmp->subordinate && 659 + ptmp->subordinate->number <= pdev->bus->number && 660 + ptmp->subordinate->busn_res.end >= pdev->bus->number) 661 + goto got_pdev; 686 662 } 687 663 688 - if (drhd->include_all) 689 - return drhd->iommu; 664 + if (pdev && drhd->include_all) { 665 + got_pdev: 666 + *bus = pdev->bus->number; 667 + *devfn = pdev->devfn; 668 + goto out; 669 + } 690 670 } 671 + iommu = NULL; 672 + out: 673 + rcu_read_unlock(); 691 674 692 - return NULL; 675 + return iommu; 693 676 } 694 677 695 678 static void domain_flush_cache(struct dmar_domain *domain, ··· 813 748 } 814 749 815 750 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, 816 - unsigned long pfn, int target_level) 751 + unsigned long pfn, int *target_level) 817 752 { 818 753 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 819 754 struct dma_pte *parent, *pte = NULL; ··· 828 763 829 764 parent = domain->pgd; 830 765 831 - while (level > 0) { 766 + while (1) { 832 767 void *tmp_page; 833 768 834 769 offset = pfn_level_offset(pfn, level); 835 770 pte = &parent[offset]; 836 - if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) 771 + if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) 837 772 break; 838 - if (level == target_level) 773 + if (level == *target_level) 839 774 break; 840 775 841 776 if (!dma_pte_present(pte)) { ··· 856 791 domain_flush_cache(domain, pte, sizeof(*pte)); 857 792 } 858 793 } 794 + if (level == 1) 795 + break; 796 + 859 797 parent = phys_to_virt(dma_pte_addr(pte)); 860 798 level--; 861 799 } 800 + 801 + if (!*target_level) 802 + *target_level = level; 862 803 863 804 return pte; 864 805 } ··· 903 832 } 904 833 905 834 /* clear last level pte, a tlb flush should be followed */ 906 - static int dma_pte_clear_range(struct dmar_domain *domain, 835 + static void dma_pte_clear_range(struct dmar_domain *domain, 907 836 unsigned long start_pfn, 908 837 unsigned long last_pfn) 909 838 { ··· 933 862 (void *)pte - (void *)first_pte); 934 863 935 864 } while (start_pfn && start_pfn <= last_pfn); 936 - 937 - return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH); 938 865 } 939 866 940 867 static void dma_pte_free_level(struct dmar_domain *domain, int level, ··· 987 918 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { 988 919 free_pgtable_page(domain->pgd); 989 920 domain->pgd = NULL; 921 + } 922 + } 923 + 924 + /* When a page at a given level is being unlinked from its parent, we don't 925 + need to *modify* it at all. All we need to do is make a list of all the 926 + pages which can be freed just as soon as we've flushed the IOTLB and we 927 + know the hardware page-walk will no longer touch them. 928 + The 'pte' argument is the *parent* PTE, pointing to the page that is to 929 + be freed. */ 930 + static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, 931 + int level, struct dma_pte *pte, 932 + struct page *freelist) 933 + { 934 + struct page *pg; 935 + 936 + pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT); 937 + pg->freelist = freelist; 938 + freelist = pg; 939 + 940 + if (level == 1) 941 + return freelist; 942 + 943 + for (pte = page_address(pg); !first_pte_in_page(pte); pte++) { 944 + if (dma_pte_present(pte) && !dma_pte_superpage(pte)) 945 + freelist = dma_pte_list_pagetables(domain, level - 1, 946 + pte, freelist); 947 + } 948 + 949 + return freelist; 950 + } 951 + 952 + static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, 953 + struct dma_pte *pte, unsigned long pfn, 954 + unsigned long start_pfn, 955 + unsigned long last_pfn, 956 + struct page *freelist) 957 + { 958 + struct dma_pte *first_pte = NULL, *last_pte = NULL; 959 + 960 + pfn = max(start_pfn, pfn); 961 + pte = &pte[pfn_level_offset(pfn, level)]; 962 + 963 + do { 964 + unsigned long level_pfn; 965 + 966 + if (!dma_pte_present(pte)) 967 + goto next; 968 + 969 + level_pfn = pfn & level_mask(level); 970 + 971 + /* If range covers entire pagetable, free it */ 972 + if (start_pfn <= level_pfn && 973 + last_pfn >= level_pfn + level_size(level) - 1) { 974 + /* These suborbinate page tables are going away entirely. Don't 975 + bother to clear them; we're just going to *free* them. */ 976 + if (level > 1 && !dma_pte_superpage(pte)) 977 + freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); 978 + 979 + dma_clear_pte(pte); 980 + if (!first_pte) 981 + first_pte = pte; 982 + last_pte = pte; 983 + } else if (level > 1) { 984 + /* Recurse down into a level that isn't *entirely* obsolete */ 985 + freelist = dma_pte_clear_level(domain, level - 1, 986 + phys_to_virt(dma_pte_addr(pte)), 987 + level_pfn, start_pfn, last_pfn, 988 + freelist); 989 + } 990 + next: 991 + pfn += level_size(level); 992 + } while (!first_pte_in_page(++pte) && pfn <= last_pfn); 993 + 994 + if (first_pte) 995 + domain_flush_cache(domain, first_pte, 996 + (void *)++last_pte - (void *)first_pte); 997 + 998 + return freelist; 999 + } 1000 + 1001 + /* We can't just free the pages because the IOMMU may still be walking 1002 + the page tables, and may have cached the intermediate levels. The 1003 + pages can only be freed after the IOTLB flush has been done. */ 1004 + struct page *domain_unmap(struct dmar_domain *domain, 1005 + unsigned long start_pfn, 1006 + unsigned long last_pfn) 1007 + { 1008 + int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 1009 + struct page *freelist = NULL; 1010 + 1011 + BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 1012 + BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 1013 + BUG_ON(start_pfn > last_pfn); 1014 + 1015 + /* we don't need lock here; nobody else touches the iova range */ 1016 + freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), 1017 + domain->pgd, 0, start_pfn, last_pfn, NULL); 1018 + 1019 + /* free pgd */ 1020 + if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { 1021 + struct page *pgd_page = virt_to_page(domain->pgd); 1022 + pgd_page->freelist = freelist; 1023 + freelist = pgd_page; 1024 + 1025 + domain->pgd = NULL; 1026 + } 1027 + 1028 + return freelist; 1029 + } 1030 + 1031 + void dma_free_pagelist(struct page *freelist) 1032 + { 1033 + struct page *pg; 1034 + 1035 + while ((pg = freelist)) { 1036 + freelist = pg->freelist; 1037 + free_pgtable_page(page_address(pg)); 990 1038 } 991 1039 } 992 1040 ··· 1216 1030 break; 1217 1031 case DMA_TLB_PSI_FLUSH: 1218 1032 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did); 1219 - /* Note: always flush non-leaf currently */ 1033 + /* IH bit is passed in as part of address */ 1220 1034 val_iva = size_order | addr; 1221 1035 break; 1222 1036 default: ··· 1255 1069 (unsigned long long)DMA_TLB_IAIG(val)); 1256 1070 } 1257 1071 1258 - static struct device_domain_info *iommu_support_dev_iotlb( 1259 - struct dmar_domain *domain, int segment, u8 bus, u8 devfn) 1072 + static struct device_domain_info * 1073 + iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, 1074 + u8 bus, u8 devfn) 1260 1075 { 1261 1076 int found = 0; 1262 1077 unsigned long flags; 1263 1078 struct device_domain_info *info; 1264 - struct intel_iommu *iommu = device_to_iommu(segment, bus, devfn); 1079 + struct pci_dev *pdev; 1265 1080 1266 1081 if (!ecap_dev_iotlb_support(iommu->ecap)) 1267 1082 return NULL; ··· 1278 1091 } 1279 1092 spin_unlock_irqrestore(&device_domain_lock, flags); 1280 1093 1281 - if (!found || !info->dev) 1094 + if (!found || !info->dev || !dev_is_pci(info->dev)) 1282 1095 return NULL; 1283 1096 1284 - if (!pci_find_ext_capability(info->dev, PCI_EXT_CAP_ID_ATS)) 1097 + pdev = to_pci_dev(info->dev); 1098 + 1099 + if (!pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS)) 1285 1100 return NULL; 1286 1101 1287 - if (!dmar_find_matched_atsr_unit(info->dev)) 1102 + if (!dmar_find_matched_atsr_unit(pdev)) 1288 1103 return NULL; 1289 - 1290 - info->iommu = iommu; 1291 1104 1292 1105 return info; 1293 1106 } 1294 1107 1295 1108 static void iommu_enable_dev_iotlb(struct device_domain_info *info) 1296 1109 { 1297 - if (!info) 1110 + if (!info || !dev_is_pci(info->dev)) 1298 1111 return; 1299 1112 1300 - pci_enable_ats(info->dev, VTD_PAGE_SHIFT); 1113 + pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT); 1301 1114 } 1302 1115 1303 1116 static void iommu_disable_dev_iotlb(struct device_domain_info *info) 1304 1117 { 1305 - if (!info->dev || !pci_ats_enabled(info->dev)) 1118 + if (!info->dev || !dev_is_pci(info->dev) || 1119 + !pci_ats_enabled(to_pci_dev(info->dev))) 1306 1120 return; 1307 1121 1308 - pci_disable_ats(info->dev); 1122 + pci_disable_ats(to_pci_dev(info->dev)); 1309 1123 } 1310 1124 1311 1125 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, ··· 1318 1130 1319 1131 spin_lock_irqsave(&device_domain_lock, flags); 1320 1132 list_for_each_entry(info, &domain->devices, link) { 1321 - if (!info->dev || !pci_ats_enabled(info->dev)) 1133 + struct pci_dev *pdev; 1134 + if (!info->dev || !dev_is_pci(info->dev)) 1135 + continue; 1136 + 1137 + pdev = to_pci_dev(info->dev); 1138 + if (!pci_ats_enabled(pdev)) 1322 1139 continue; 1323 1140 1324 1141 sid = info->bus << 8 | info->devfn; 1325 - qdep = pci_ats_queue_depth(info->dev); 1142 + qdep = pci_ats_queue_depth(pdev); 1326 1143 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); 1327 1144 } 1328 1145 spin_unlock_irqrestore(&device_domain_lock, flags); 1329 1146 } 1330 1147 1331 1148 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, 1332 - unsigned long pfn, unsigned int pages, int map) 1149 + unsigned long pfn, unsigned int pages, int ih, int map) 1333 1150 { 1334 1151 unsigned int mask = ilog2(__roundup_pow_of_two(pages)); 1335 1152 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT; 1336 1153 1337 1154 BUG_ON(pages == 0); 1338 1155 1156 + if (ih) 1157 + ih = 1 << 6; 1339 1158 /* 1340 1159 * Fallback to domain selective flush if no PSI support or the size is 1341 1160 * too big. ··· 1353 1158 iommu->flush.flush_iotlb(iommu, did, 0, 0, 1354 1159 DMA_TLB_DSI_FLUSH); 1355 1160 else 1356 - iommu->flush.flush_iotlb(iommu, did, addr, mask, 1161 + iommu->flush.flush_iotlb(iommu, did, addr | ih, mask, 1357 1162 DMA_TLB_PSI_FLUSH); 1358 1163 1359 1164 /* ··· 1456 1261 return 0; 1457 1262 } 1458 1263 1459 - 1460 - static void domain_exit(struct dmar_domain *domain); 1461 - static void vm_domain_exit(struct dmar_domain *domain); 1462 - 1463 1264 static void free_dmar_iommu(struct intel_iommu *iommu) 1464 1265 { 1465 1266 struct dmar_domain *domain; ··· 1464 1273 1465 1274 if ((iommu->domains) && (iommu->domain_ids)) { 1466 1275 for_each_set_bit(i, iommu->domain_ids, cap_ndoms(iommu->cap)) { 1276 + /* 1277 + * Domain id 0 is reserved for invalid translation 1278 + * if hardware supports caching mode. 1279 + */ 1280 + if (cap_caching_mode(iommu->cap) && i == 0) 1281 + continue; 1282 + 1467 1283 domain = iommu->domains[i]; 1468 1284 clear_bit(i, iommu->domain_ids); 1469 1285 1470 1286 spin_lock_irqsave(&domain->iommu_lock, flags); 1471 1287 count = --domain->iommu_count; 1472 1288 spin_unlock_irqrestore(&domain->iommu_lock, flags); 1473 - if (count == 0) { 1474 - if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) 1475 - vm_domain_exit(domain); 1476 - else 1477 - domain_exit(domain); 1478 - } 1289 + if (count == 0) 1290 + domain_exit(domain); 1479 1291 } 1480 1292 } 1481 1293 ··· 1492 1298 1493 1299 g_iommus[iommu->seq_id] = NULL; 1494 1300 1495 - /* if all iommus are freed, free g_iommus */ 1496 - for (i = 0; i < g_num_of_iommus; i++) { 1497 - if (g_iommus[i]) 1498 - break; 1499 - } 1500 - 1501 - if (i == g_num_of_iommus) 1502 - kfree(g_iommus); 1503 - 1504 1301 /* free context mapping */ 1505 1302 free_context_table(iommu); 1506 1303 } 1507 1304 1508 - static struct dmar_domain *alloc_domain(void) 1305 + static struct dmar_domain *alloc_domain(bool vm) 1509 1306 { 1307 + /* domain id for virtual machine, it won't be set in context */ 1308 + static atomic_t vm_domid = ATOMIC_INIT(0); 1510 1309 struct dmar_domain *domain; 1511 1310 1512 1311 domain = alloc_domain_mem(); ··· 1507 1320 return NULL; 1508 1321 1509 1322 domain->nid = -1; 1323 + domain->iommu_count = 0; 1510 1324 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); 1511 1325 domain->flags = 0; 1326 + spin_lock_init(&domain->iommu_lock); 1327 + INIT_LIST_HEAD(&domain->devices); 1328 + if (vm) { 1329 + domain->id = atomic_inc_return(&vm_domid); 1330 + domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; 1331 + } 1512 1332 1513 1333 return domain; 1514 1334 } ··· 1539 1345 } 1540 1346 1541 1347 domain->id = num; 1348 + domain->iommu_count++; 1542 1349 set_bit(num, iommu->domain_ids); 1543 1350 set_bit(iommu->seq_id, domain->iommu_bmp); 1544 1351 iommu->domains[num] = domain; ··· 1553 1358 { 1554 1359 unsigned long flags; 1555 1360 int num, ndomains; 1556 - int found = 0; 1557 1361 1558 1362 spin_lock_irqsave(&iommu->lock, flags); 1559 1363 ndomains = cap_ndoms(iommu->cap); 1560 1364 for_each_set_bit(num, iommu->domain_ids, ndomains) { 1561 1365 if (iommu->domains[num] == domain) { 1562 - found = 1; 1366 + clear_bit(num, iommu->domain_ids); 1367 + iommu->domains[num] = NULL; 1563 1368 break; 1564 1369 } 1565 - } 1566 - 1567 - if (found) { 1568 - clear_bit(num, iommu->domain_ids); 1569 - clear_bit(iommu->seq_id, domain->iommu_bmp); 1570 - iommu->domains[num] = NULL; 1571 1370 } 1572 1371 spin_unlock_irqrestore(&iommu->lock, flags); 1573 1372 } ··· 1634 1445 unsigned long sagaw; 1635 1446 1636 1447 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 1637 - spin_lock_init(&domain->iommu_lock); 1638 - 1639 1448 domain_reserve_special_ranges(domain); 1640 1449 1641 1450 /* calculate AGAW */ ··· 1652 1465 return -ENODEV; 1653 1466 } 1654 1467 domain->agaw = agaw; 1655 - INIT_LIST_HEAD(&domain->devices); 1656 1468 1657 1469 if (ecap_coherent(iommu->ecap)) 1658 1470 domain->iommu_coherency = 1; ··· 1663 1477 else 1664 1478 domain->iommu_snooping = 0; 1665 1479 1666 - domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); 1667 - domain->iommu_count = 1; 1480 + if (intel_iommu_superpage) 1481 + domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); 1482 + else 1483 + domain->iommu_superpage = 0; 1484 + 1668 1485 domain->nid = iommu->node; 1669 1486 1670 1487 /* always allocate the top pgd */ ··· 1682 1493 { 1683 1494 struct dmar_drhd_unit *drhd; 1684 1495 struct intel_iommu *iommu; 1496 + struct page *freelist = NULL; 1685 1497 1686 1498 /* Domain 0 is reserved, so dont process it */ 1687 1499 if (!domain) ··· 1692 1502 if (!intel_iommu_strict) 1693 1503 flush_unmaps_timeout(0); 1694 1504 1505 + /* remove associated devices */ 1695 1506 domain_remove_dev_info(domain); 1507 + 1696 1508 /* destroy iovas */ 1697 1509 put_iova_domain(&domain->iovad); 1698 1510 1699 - /* clear ptes */ 1700 - dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 1511 + freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 1701 1512 1702 - /* free page tables */ 1703 - dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 1704 - 1513 + /* clear attached or cached domains */ 1514 + rcu_read_lock(); 1705 1515 for_each_active_iommu(iommu, drhd) 1706 - if (test_bit(iommu->seq_id, domain->iommu_bmp)) 1516 + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || 1517 + test_bit(iommu->seq_id, domain->iommu_bmp)) 1707 1518 iommu_detach_domain(domain, iommu); 1519 + rcu_read_unlock(); 1520 + 1521 + dma_free_pagelist(freelist); 1708 1522 1709 1523 free_domain_mem(domain); 1710 1524 } 1711 1525 1712 - static int domain_context_mapping_one(struct dmar_domain *domain, int segment, 1713 - u8 bus, u8 devfn, int translation) 1526 + static int domain_context_mapping_one(struct dmar_domain *domain, 1527 + struct intel_iommu *iommu, 1528 + u8 bus, u8 devfn, int translation) 1714 1529 { 1715 1530 struct context_entry *context; 1716 1531 unsigned long flags; 1717 - struct intel_iommu *iommu; 1718 1532 struct dma_pte *pgd; 1719 1533 unsigned long num; 1720 1534 unsigned long ndomains; ··· 1732 1538 BUG_ON(!domain->pgd); 1733 1539 BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && 1734 1540 translation != CONTEXT_TT_MULTI_LEVEL); 1735 - 1736 - iommu = device_to_iommu(segment, bus, devfn); 1737 - if (!iommu) 1738 - return -ENODEV; 1739 1541 1740 1542 context = device_to_context_entry(iommu, bus, devfn); 1741 1543 if (!context) ··· 1790 1600 context_set_domain_id(context, id); 1791 1601 1792 1602 if (translation != CONTEXT_TT_PASS_THROUGH) { 1793 - info = iommu_support_dev_iotlb(domain, segment, bus, devfn); 1603 + info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); 1794 1604 translation = info ? CONTEXT_TT_DEV_IOTLB : 1795 1605 CONTEXT_TT_MULTI_LEVEL; 1796 1606 } ··· 1840 1650 } 1841 1651 1842 1652 static int 1843 - domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev, 1844 - int translation) 1653 + domain_context_mapping(struct dmar_domain *domain, struct device *dev, 1654 + int translation) 1845 1655 { 1846 1656 int ret; 1847 - struct pci_dev *tmp, *parent; 1657 + struct pci_dev *pdev, *tmp, *parent; 1658 + struct intel_iommu *iommu; 1659 + u8 bus, devfn; 1848 1660 1849 - ret = domain_context_mapping_one(domain, pci_domain_nr(pdev->bus), 1850 - pdev->bus->number, pdev->devfn, 1661 + iommu = device_to_iommu(dev, &bus, &devfn); 1662 + if (!iommu) 1663 + return -ENODEV; 1664 + 1665 + ret = domain_context_mapping_one(domain, iommu, bus, devfn, 1851 1666 translation); 1852 - if (ret) 1667 + if (ret || !dev_is_pci(dev)) 1853 1668 return ret; 1854 1669 1855 1670 /* dependent device mapping */ 1671 + pdev = to_pci_dev(dev); 1856 1672 tmp = pci_find_upstream_pcie_bridge(pdev); 1857 1673 if (!tmp) 1858 1674 return 0; 1859 1675 /* Secondary interface's bus number and devfn 0 */ 1860 1676 parent = pdev->bus->self; 1861 1677 while (parent != tmp) { 1862 - ret = domain_context_mapping_one(domain, 1863 - pci_domain_nr(parent->bus), 1678 + ret = domain_context_mapping_one(domain, iommu, 1864 1679 parent->bus->number, 1865 1680 parent->devfn, translation); 1866 1681 if (ret) ··· 1873 1678 parent = parent->bus->self; 1874 1679 } 1875 1680 if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */ 1876 - return domain_context_mapping_one(domain, 1877 - pci_domain_nr(tmp->subordinate), 1681 + return domain_context_mapping_one(domain, iommu, 1878 1682 tmp->subordinate->number, 0, 1879 1683 translation); 1880 1684 else /* this is a legacy PCI bridge */ 1881 - return domain_context_mapping_one(domain, 1882 - pci_domain_nr(tmp->bus), 1685 + return domain_context_mapping_one(domain, iommu, 1883 1686 tmp->bus->number, 1884 1687 tmp->devfn, 1885 1688 translation); 1886 1689 } 1887 1690 1888 - static int domain_context_mapped(struct pci_dev *pdev) 1691 + static int domain_context_mapped(struct device *dev) 1889 1692 { 1890 1693 int ret; 1891 - struct pci_dev *tmp, *parent; 1694 + struct pci_dev *pdev, *tmp, *parent; 1892 1695 struct intel_iommu *iommu; 1696 + u8 bus, devfn; 1893 1697 1894 - iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, 1895 - pdev->devfn); 1698 + iommu = device_to_iommu(dev, &bus, &devfn); 1896 1699 if (!iommu) 1897 1700 return -ENODEV; 1898 1701 1899 - ret = device_context_mapped(iommu, pdev->bus->number, pdev->devfn); 1900 - if (!ret) 1702 + ret = device_context_mapped(iommu, bus, devfn); 1703 + if (!ret || !dev_is_pci(dev)) 1901 1704 return ret; 1705 + 1902 1706 /* dependent device mapping */ 1707 + pdev = to_pci_dev(dev); 1903 1708 tmp = pci_find_upstream_pcie_bridge(pdev); 1904 1709 if (!tmp) 1905 1710 return ret; ··· 1995 1800 if (!pte) { 1996 1801 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); 1997 1802 1998 - first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl); 1803 + first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); 1999 1804 if (!pte) 2000 1805 return -ENOMEM; 2001 1806 /* It is large page*/ ··· 2094 1899 list_del(&info->link); 2095 1900 list_del(&info->global); 2096 1901 if (info->dev) 2097 - info->dev->dev.archdata.iommu = NULL; 1902 + info->dev->archdata.iommu = NULL; 2098 1903 } 2099 1904 2100 1905 static void domain_remove_dev_info(struct dmar_domain *domain) 2101 1906 { 2102 1907 struct device_domain_info *info; 2103 - unsigned long flags; 2104 - struct intel_iommu *iommu; 1908 + unsigned long flags, flags2; 2105 1909 2106 1910 spin_lock_irqsave(&device_domain_lock, flags); 2107 1911 while (!list_empty(&domain->devices)) { ··· 2110 1916 spin_unlock_irqrestore(&device_domain_lock, flags); 2111 1917 2112 1918 iommu_disable_dev_iotlb(info); 2113 - iommu = device_to_iommu(info->segment, info->bus, info->devfn); 2114 - iommu_detach_dev(iommu, info->bus, info->devfn); 2115 - free_devinfo_mem(info); 1919 + iommu_detach_dev(info->iommu, info->bus, info->devfn); 2116 1920 1921 + if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { 1922 + iommu_detach_dependent_devices(info->iommu, info->dev); 1923 + /* clear this iommu in iommu_bmp, update iommu count 1924 + * and capabilities 1925 + */ 1926 + spin_lock_irqsave(&domain->iommu_lock, flags2); 1927 + if (test_and_clear_bit(info->iommu->seq_id, 1928 + domain->iommu_bmp)) { 1929 + domain->iommu_count--; 1930 + domain_update_iommu_cap(domain); 1931 + } 1932 + spin_unlock_irqrestore(&domain->iommu_lock, flags2); 1933 + } 1934 + 1935 + free_devinfo_mem(info); 2117 1936 spin_lock_irqsave(&device_domain_lock, flags); 2118 1937 } 2119 1938 spin_unlock_irqrestore(&device_domain_lock, flags); ··· 2134 1927 2135 1928 /* 2136 1929 * find_domain 2137 - * Note: we use struct pci_dev->dev.archdata.iommu stores the info 1930 + * Note: we use struct device->archdata.iommu stores the info 2138 1931 */ 2139 - static struct dmar_domain * 2140 - find_domain(struct pci_dev *pdev) 1932 + static struct dmar_domain *find_domain(struct device *dev) 2141 1933 { 2142 1934 struct device_domain_info *info; 2143 1935 2144 1936 /* No lock here, assumes no domain exit in normal case */ 2145 - info = pdev->dev.archdata.iommu; 1937 + info = dev->archdata.iommu; 2146 1938 if (info) 2147 1939 return info->domain; 2148 1940 return NULL; 2149 1941 } 2150 1942 2151 - /* domain is initialized */ 2152 - static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) 1943 + static inline struct device_domain_info * 1944 + dmar_search_domain_by_dev_info(int segment, int bus, int devfn) 2153 1945 { 2154 - struct dmar_domain *domain, *found = NULL; 2155 - struct intel_iommu *iommu; 2156 - struct dmar_drhd_unit *drhd; 2157 - struct device_domain_info *info, *tmp; 2158 - struct pci_dev *dev_tmp; 2159 - unsigned long flags; 2160 - int bus = 0, devfn = 0; 2161 - int segment; 2162 - int ret; 1946 + struct device_domain_info *info; 2163 1947 2164 - domain = find_domain(pdev); 1948 + list_for_each_entry(info, &device_domain_list, global) 1949 + if (info->iommu->segment == segment && info->bus == bus && 1950 + info->devfn == devfn) 1951 + return info; 1952 + 1953 + return NULL; 1954 + } 1955 + 1956 + static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, 1957 + int bus, int devfn, 1958 + struct device *dev, 1959 + struct dmar_domain *domain) 1960 + { 1961 + struct dmar_domain *found = NULL; 1962 + struct device_domain_info *info; 1963 + unsigned long flags; 1964 + 1965 + info = alloc_devinfo_mem(); 1966 + if (!info) 1967 + return NULL; 1968 + 1969 + info->bus = bus; 1970 + info->devfn = devfn; 1971 + info->dev = dev; 1972 + info->domain = domain; 1973 + info->iommu = iommu; 1974 + if (!dev) 1975 + domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; 1976 + 1977 + spin_lock_irqsave(&device_domain_lock, flags); 1978 + if (dev) 1979 + found = find_domain(dev); 1980 + else { 1981 + struct device_domain_info *info2; 1982 + info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn); 1983 + if (info2) 1984 + found = info2->domain; 1985 + } 1986 + if (found) { 1987 + spin_unlock_irqrestore(&device_domain_lock, flags); 1988 + free_devinfo_mem(info); 1989 + /* Caller must free the original domain */ 1990 + return found; 1991 + } 1992 + 1993 + list_add(&info->link, &domain->devices); 1994 + list_add(&info->global, &device_domain_list); 1995 + if (dev) 1996 + dev->archdata.iommu = info; 1997 + spin_unlock_irqrestore(&device_domain_lock, flags); 1998 + 1999 + return domain; 2000 + } 2001 + 2002 + /* domain is initialized */ 2003 + static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) 2004 + { 2005 + struct dmar_domain *domain, *free = NULL; 2006 + struct intel_iommu *iommu = NULL; 2007 + struct device_domain_info *info; 2008 + struct pci_dev *dev_tmp = NULL; 2009 + unsigned long flags; 2010 + u8 bus, devfn, bridge_bus, bridge_devfn; 2011 + 2012 + domain = find_domain(dev); 2165 2013 if (domain) 2166 2014 return domain; 2167 2015 2168 - segment = pci_domain_nr(pdev->bus); 2016 + if (dev_is_pci(dev)) { 2017 + struct pci_dev *pdev = to_pci_dev(dev); 2018 + u16 segment; 2169 2019 2170 - dev_tmp = pci_find_upstream_pcie_bridge(pdev); 2171 - if (dev_tmp) { 2172 - if (pci_is_pcie(dev_tmp)) { 2173 - bus = dev_tmp->subordinate->number; 2174 - devfn = 0; 2175 - } else { 2176 - bus = dev_tmp->bus->number; 2177 - devfn = dev_tmp->devfn; 2178 - } 2179 - spin_lock_irqsave(&device_domain_lock, flags); 2180 - list_for_each_entry(info, &device_domain_list, global) { 2181 - if (info->segment == segment && 2182 - info->bus == bus && info->devfn == devfn) { 2183 - found = info->domain; 2184 - break; 2020 + segment = pci_domain_nr(pdev->bus); 2021 + dev_tmp = pci_find_upstream_pcie_bridge(pdev); 2022 + if (dev_tmp) { 2023 + if (pci_is_pcie(dev_tmp)) { 2024 + bridge_bus = dev_tmp->subordinate->number; 2025 + bridge_devfn = 0; 2026 + } else { 2027 + bridge_bus = dev_tmp->bus->number; 2028 + bridge_devfn = dev_tmp->devfn; 2185 2029 } 2186 - } 2187 - spin_unlock_irqrestore(&device_domain_lock, flags); 2188 - /* pcie-pci bridge already has a domain, uses it */ 2189 - if (found) { 2190 - domain = found; 2191 - goto found_domain; 2030 + spin_lock_irqsave(&device_domain_lock, flags); 2031 + info = dmar_search_domain_by_dev_info(segment, bus, devfn); 2032 + if (info) { 2033 + iommu = info->iommu; 2034 + domain = info->domain; 2035 + } 2036 + spin_unlock_irqrestore(&device_domain_lock, flags); 2037 + /* pcie-pci bridge already has a domain, uses it */ 2038 + if (info) 2039 + goto found_domain; 2192 2040 } 2193 2041 } 2194 2042 2195 - domain = alloc_domain(); 2043 + iommu = device_to_iommu(dev, &bus, &devfn); 2044 + if (!iommu) 2045 + goto error; 2046 + 2047 + /* Allocate and initialize new domain for the device */ 2048 + domain = alloc_domain(false); 2196 2049 if (!domain) 2197 2050 goto error; 2198 - 2199 - /* Allocate new domain for the device */ 2200 - drhd = dmar_find_matched_drhd_unit(pdev); 2201 - if (!drhd) { 2202 - printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", 2203 - pci_name(pdev)); 2051 + if (iommu_attach_domain(domain, iommu)) { 2204 2052 free_domain_mem(domain); 2205 - return NULL; 2206 - } 2207 - iommu = drhd->iommu; 2208 - 2209 - ret = iommu_attach_domain(domain, iommu); 2210 - if (ret) { 2211 - free_domain_mem(domain); 2053 + domain = NULL; 2212 2054 goto error; 2213 2055 } 2214 - 2215 - if (domain_init(domain, gaw)) { 2216 - domain_exit(domain); 2056 + free = domain; 2057 + if (domain_init(domain, gaw)) 2217 2058 goto error; 2218 - } 2219 2059 2220 2060 /* register pcie-to-pci device */ 2221 2061 if (dev_tmp) { 2222 - info = alloc_devinfo_mem(); 2223 - if (!info) { 2224 - domain_exit(domain); 2062 + domain = dmar_insert_dev_info(iommu, bridge_bus, bridge_devfn, 2063 + NULL, domain); 2064 + if (!domain) 2225 2065 goto error; 2226 - } 2227 - info->segment = segment; 2228 - info->bus = bus; 2229 - info->devfn = devfn; 2230 - info->dev = NULL; 2231 - info->domain = domain; 2232 - /* This domain is shared by devices under p2p bridge */ 2233 - domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; 2234 - 2235 - /* pcie-to-pci bridge already has a domain, uses it */ 2236 - found = NULL; 2237 - spin_lock_irqsave(&device_domain_lock, flags); 2238 - list_for_each_entry(tmp, &device_domain_list, global) { 2239 - if (tmp->segment == segment && 2240 - tmp->bus == bus && tmp->devfn == devfn) { 2241 - found = tmp->domain; 2242 - break; 2243 - } 2244 - } 2245 - if (found) { 2246 - spin_unlock_irqrestore(&device_domain_lock, flags); 2247 - free_devinfo_mem(info); 2248 - domain_exit(domain); 2249 - domain = found; 2250 - } else { 2251 - list_add(&info->link, &domain->devices); 2252 - list_add(&info->global, &device_domain_list); 2253 - spin_unlock_irqrestore(&device_domain_lock, flags); 2254 - } 2255 2066 } 2256 2067 2257 2068 found_domain: 2258 - info = alloc_devinfo_mem(); 2259 - if (!info) 2260 - goto error; 2261 - info->segment = segment; 2262 - info->bus = pdev->bus->number; 2263 - info->devfn = pdev->devfn; 2264 - info->dev = pdev; 2265 - info->domain = domain; 2266 - spin_lock_irqsave(&device_domain_lock, flags); 2267 - /* somebody is fast */ 2268 - found = find_domain(pdev); 2269 - if (found != NULL) { 2270 - spin_unlock_irqrestore(&device_domain_lock, flags); 2271 - if (found != domain) { 2272 - domain_exit(domain); 2273 - domain = found; 2274 - } 2275 - free_devinfo_mem(info); 2276 - return domain; 2277 - } 2278 - list_add(&info->link, &domain->devices); 2279 - list_add(&info->global, &device_domain_list); 2280 - pdev->dev.archdata.iommu = info; 2281 - spin_unlock_irqrestore(&device_domain_lock, flags); 2282 - return domain; 2069 + domain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); 2283 2070 error: 2284 - /* recheck it here, maybe others set it */ 2285 - return find_domain(pdev); 2071 + if (free != domain) 2072 + domain_exit(free); 2073 + 2074 + return domain; 2286 2075 } 2287 2076 2288 2077 static int iommu_identity_mapping; ··· 2312 2109 DMA_PTE_READ|DMA_PTE_WRITE); 2313 2110 } 2314 2111 2315 - static int iommu_prepare_identity_map(struct pci_dev *pdev, 2112 + static int iommu_prepare_identity_map(struct device *dev, 2316 2113 unsigned long long start, 2317 2114 unsigned long long end) 2318 2115 { 2319 2116 struct dmar_domain *domain; 2320 2117 int ret; 2321 2118 2322 - domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 2119 + domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 2323 2120 if (!domain) 2324 2121 return -ENOMEM; 2325 2122 ··· 2329 2126 up to start with in si_domain */ 2330 2127 if (domain == si_domain && hw_pass_through) { 2331 2128 printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n", 2332 - pci_name(pdev), start, end); 2129 + dev_name(dev), start, end); 2333 2130 return 0; 2334 2131 } 2335 2132 2336 2133 printk(KERN_INFO 2337 2134 "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", 2338 - pci_name(pdev), start, end); 2135 + dev_name(dev), start, end); 2339 2136 2340 2137 if (end < start) { 2341 2138 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n" ··· 2363 2160 goto error; 2364 2161 2365 2162 /* context entry init */ 2366 - ret = domain_context_mapping(domain, pdev, CONTEXT_TT_MULTI_LEVEL); 2163 + ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); 2367 2164 if (ret) 2368 2165 goto error; 2369 2166 ··· 2375 2172 } 2376 2173 2377 2174 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr, 2378 - struct pci_dev *pdev) 2175 + struct device *dev) 2379 2176 { 2380 - if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2177 + if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) 2381 2178 return 0; 2382 - return iommu_prepare_identity_map(pdev, rmrr->base_address, 2383 - rmrr->end_address); 2179 + return iommu_prepare_identity_map(dev, rmrr->base_address, 2180 + rmrr->end_address); 2384 2181 } 2385 2182 2386 2183 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA ··· 2394 2191 return; 2395 2192 2396 2193 printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n"); 2397 - ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1); 2194 + ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1); 2398 2195 2399 2196 if (ret) 2400 2197 printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; " ··· 2416 2213 struct intel_iommu *iommu; 2417 2214 int nid, ret = 0; 2418 2215 2419 - si_domain = alloc_domain(); 2216 + si_domain = alloc_domain(false); 2420 2217 if (!si_domain) 2421 2218 return -EFAULT; 2219 + 2220 + si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2422 2221 2423 2222 for_each_active_iommu(iommu, drhd) { 2424 2223 ret = iommu_attach_domain(si_domain, iommu); ··· 2435 2230 return -EFAULT; 2436 2231 } 2437 2232 2438 - si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2439 2233 pr_debug("IOMMU: identity mapping domain is domain %d\n", 2440 2234 si_domain->id); 2441 2235 ··· 2456 2252 return 0; 2457 2253 } 2458 2254 2459 - static void domain_remove_one_dev_info(struct dmar_domain *domain, 2460 - struct pci_dev *pdev); 2461 - static int identity_mapping(struct pci_dev *pdev) 2255 + static int identity_mapping(struct device *dev) 2462 2256 { 2463 2257 struct device_domain_info *info; 2464 2258 2465 2259 if (likely(!iommu_identity_mapping)) 2466 2260 return 0; 2467 2261 2468 - info = pdev->dev.archdata.iommu; 2262 + info = dev->archdata.iommu; 2469 2263 if (info && info != DUMMY_DEVICE_DOMAIN_INFO) 2470 2264 return (info->domain == si_domain); 2471 2265 ··· 2471 2269 } 2472 2270 2473 2271 static int domain_add_dev_info(struct dmar_domain *domain, 2474 - struct pci_dev *pdev, 2475 - int translation) 2272 + struct device *dev, int translation) 2476 2273 { 2477 - struct device_domain_info *info; 2478 - unsigned long flags; 2274 + struct dmar_domain *ndomain; 2275 + struct intel_iommu *iommu; 2276 + u8 bus, devfn; 2479 2277 int ret; 2480 2278 2481 - info = alloc_devinfo_mem(); 2482 - if (!info) 2483 - return -ENOMEM; 2279 + iommu = device_to_iommu(dev, &bus, &devfn); 2280 + if (!iommu) 2281 + return -ENODEV; 2484 2282 2485 - info->segment = pci_domain_nr(pdev->bus); 2486 - info->bus = pdev->bus->number; 2487 - info->devfn = pdev->devfn; 2488 - info->dev = pdev; 2489 - info->domain = domain; 2283 + ndomain = dmar_insert_dev_info(iommu, bus, devfn, dev, domain); 2284 + if (ndomain != domain) 2285 + return -EBUSY; 2490 2286 2491 - spin_lock_irqsave(&device_domain_lock, flags); 2492 - list_add(&info->link, &domain->devices); 2493 - list_add(&info->global, &device_domain_list); 2494 - pdev->dev.archdata.iommu = info; 2495 - spin_unlock_irqrestore(&device_domain_lock, flags); 2496 - 2497 - ret = domain_context_mapping(domain, pdev, translation); 2287 + ret = domain_context_mapping(domain, dev, translation); 2498 2288 if (ret) { 2499 - spin_lock_irqsave(&device_domain_lock, flags); 2500 - unlink_domain_info(info); 2501 - spin_unlock_irqrestore(&device_domain_lock, flags); 2502 - free_devinfo_mem(info); 2289 + domain_remove_one_dev_info(domain, dev); 2503 2290 return ret; 2504 2291 } 2505 2292 2506 2293 return 0; 2507 2294 } 2508 2295 2509 - static bool device_has_rmrr(struct pci_dev *dev) 2296 + static bool device_has_rmrr(struct device *dev) 2510 2297 { 2511 2298 struct dmar_rmrr_unit *rmrr; 2299 + struct device *tmp; 2512 2300 int i; 2513 2301 2302 + rcu_read_lock(); 2514 2303 for_each_rmrr_units(rmrr) { 2515 - for (i = 0; i < rmrr->devices_cnt; i++) { 2516 - /* 2517 - * Return TRUE if this RMRR contains the device that 2518 - * is passed in. 2519 - */ 2520 - if (rmrr->devices[i] == dev) 2304 + /* 2305 + * Return TRUE if this RMRR contains the device that 2306 + * is passed in. 2307 + */ 2308 + for_each_active_dev_scope(rmrr->devices, 2309 + rmrr->devices_cnt, i, tmp) 2310 + if (tmp == dev) { 2311 + rcu_read_unlock(); 2521 2312 return true; 2522 - } 2313 + } 2523 2314 } 2315 + rcu_read_unlock(); 2524 2316 return false; 2525 2317 } 2526 2318 2527 - static int iommu_should_identity_map(struct pci_dev *pdev, int startup) 2319 + static int iommu_should_identity_map(struct device *dev, int startup) 2528 2320 { 2529 2321 2530 - /* 2531 - * We want to prevent any device associated with an RMRR from 2532 - * getting placed into the SI Domain. This is done because 2533 - * problems exist when devices are moved in and out of domains 2534 - * and their respective RMRR info is lost. We exempt USB devices 2535 - * from this process due to their usage of RMRRs that are known 2536 - * to not be needed after BIOS hand-off to OS. 2537 - */ 2538 - if (device_has_rmrr(pdev) && 2539 - (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) 2540 - return 0; 2322 + if (dev_is_pci(dev)) { 2323 + struct pci_dev *pdev = to_pci_dev(dev); 2541 2324 2542 - if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) 2543 - return 1; 2544 - 2545 - if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) 2546 - return 1; 2547 - 2548 - if (!(iommu_identity_mapping & IDENTMAP_ALL)) 2549 - return 0; 2550 - 2551 - /* 2552 - * We want to start off with all devices in the 1:1 domain, and 2553 - * take them out later if we find they can't access all of memory. 2554 - * 2555 - * However, we can't do this for PCI devices behind bridges, 2556 - * because all PCI devices behind the same bridge will end up 2557 - * with the same source-id on their transactions. 2558 - * 2559 - * Practically speaking, we can't change things around for these 2560 - * devices at run-time, because we can't be sure there'll be no 2561 - * DMA transactions in flight for any of their siblings. 2562 - * 2563 - * So PCI devices (unless they're on the root bus) as well as 2564 - * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of 2565 - * the 1:1 domain, just in _case_ one of their siblings turns out 2566 - * not to be able to map all of memory. 2567 - */ 2568 - if (!pci_is_pcie(pdev)) { 2569 - if (!pci_is_root_bus(pdev->bus)) 2325 + /* 2326 + * We want to prevent any device associated with an RMRR from 2327 + * getting placed into the SI Domain. This is done because 2328 + * problems exist when devices are moved in and out of domains 2329 + * and their respective RMRR info is lost. We exempt USB devices 2330 + * from this process due to their usage of RMRRs that are known 2331 + * to not be needed after BIOS hand-off to OS. 2332 + */ 2333 + if (device_has_rmrr(dev) && 2334 + (pdev->class >> 8) != PCI_CLASS_SERIAL_USB) 2570 2335 return 0; 2571 - if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) 2572 - return 0; 2573 - } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) 2574 - return 0; 2575 2336 2576 - /* 2337 + if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) 2338 + return 1; 2339 + 2340 + if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev)) 2341 + return 1; 2342 + 2343 + if (!(iommu_identity_mapping & IDENTMAP_ALL)) 2344 + return 0; 2345 + 2346 + /* 2347 + * We want to start off with all devices in the 1:1 domain, and 2348 + * take them out later if we find they can't access all of memory. 2349 + * 2350 + * However, we can't do this for PCI devices behind bridges, 2351 + * because all PCI devices behind the same bridge will end up 2352 + * with the same source-id on their transactions. 2353 + * 2354 + * Practically speaking, we can't change things around for these 2355 + * devices at run-time, because we can't be sure there'll be no 2356 + * DMA transactions in flight for any of their siblings. 2357 + * 2358 + * So PCI devices (unless they're on the root bus) as well as 2359 + * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of 2360 + * the 1:1 domain, just in _case_ one of their siblings turns out 2361 + * not to be able to map all of memory. 2362 + */ 2363 + if (!pci_is_pcie(pdev)) { 2364 + if (!pci_is_root_bus(pdev->bus)) 2365 + return 0; 2366 + if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) 2367 + return 0; 2368 + } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE) 2369 + return 0; 2370 + } else { 2371 + if (device_has_rmrr(dev)) 2372 + return 0; 2373 + } 2374 + 2375 + /* 2577 2376 * At boot time, we don't yet know if devices will be 64-bit capable. 2578 - * Assume that they will -- if they turn out not to be, then we can 2377 + * Assume that they will — if they turn out not to be, then we can 2579 2378 * take them out of the 1:1 domain later. 2580 2379 */ 2581 2380 if (!startup) { ··· 2584 2381 * If the device's dma_mask is less than the system's memory 2585 2382 * size then this is not a candidate for identity mapping. 2586 2383 */ 2587 - u64 dma_mask = pdev->dma_mask; 2384 + u64 dma_mask = *dev->dma_mask; 2588 2385 2589 - if (pdev->dev.coherent_dma_mask && 2590 - pdev->dev.coherent_dma_mask < dma_mask) 2591 - dma_mask = pdev->dev.coherent_dma_mask; 2386 + if (dev->coherent_dma_mask && 2387 + dev->coherent_dma_mask < dma_mask) 2388 + dma_mask = dev->coherent_dma_mask; 2592 2389 2593 - return dma_mask >= dma_get_required_mask(&pdev->dev); 2390 + return dma_mask >= dma_get_required_mask(dev); 2594 2391 } 2595 2392 2596 2393 return 1; 2597 2394 } 2598 2395 2396 + static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw) 2397 + { 2398 + int ret; 2399 + 2400 + if (!iommu_should_identity_map(dev, 1)) 2401 + return 0; 2402 + 2403 + ret = domain_add_dev_info(si_domain, dev, 2404 + hw ? CONTEXT_TT_PASS_THROUGH : 2405 + CONTEXT_TT_MULTI_LEVEL); 2406 + if (!ret) 2407 + pr_info("IOMMU: %s identity mapping for device %s\n", 2408 + hw ? "hardware" : "software", dev_name(dev)); 2409 + else if (ret == -ENODEV) 2410 + /* device not associated with an iommu */ 2411 + ret = 0; 2412 + 2413 + return ret; 2414 + } 2415 + 2416 + 2599 2417 static int __init iommu_prepare_static_identity_mapping(int hw) 2600 2418 { 2601 2419 struct pci_dev *pdev = NULL; 2602 - int ret; 2420 + struct dmar_drhd_unit *drhd; 2421 + struct intel_iommu *iommu; 2422 + struct device *dev; 2423 + int i; 2424 + int ret = 0; 2603 2425 2604 2426 ret = si_domain_init(hw); 2605 2427 if (ret) 2606 2428 return -EFAULT; 2607 2429 2608 2430 for_each_pci_dev(pdev) { 2609 - if (iommu_should_identity_map(pdev, 1)) { 2610 - ret = domain_add_dev_info(si_domain, pdev, 2611 - hw ? CONTEXT_TT_PASS_THROUGH : 2612 - CONTEXT_TT_MULTI_LEVEL); 2613 - if (ret) { 2614 - /* device not associated with an iommu */ 2615 - if (ret == -ENODEV) 2616 - continue; 2617 - return ret; 2618 - } 2619 - pr_info("IOMMU: %s identity mapping for device %s\n", 2620 - hw ? "hardware" : "software", pci_name(pdev)); 2621 - } 2431 + ret = dev_prepare_static_identity_mapping(&pdev->dev, hw); 2432 + if (ret) 2433 + return ret; 2622 2434 } 2435 + 2436 + for_each_active_iommu(iommu, drhd) 2437 + for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) { 2438 + struct acpi_device_physical_node *pn; 2439 + struct acpi_device *adev; 2440 + 2441 + if (dev->bus != &acpi_bus_type) 2442 + continue; 2443 + 2444 + adev= to_acpi_device(dev); 2445 + mutex_lock(&adev->physical_node_lock); 2446 + list_for_each_entry(pn, &adev->physical_node_list, node) { 2447 + ret = dev_prepare_static_identity_mapping(pn->dev, hw); 2448 + if (ret) 2449 + break; 2450 + } 2451 + mutex_unlock(&adev->physical_node_lock); 2452 + if (ret) 2453 + return ret; 2454 + } 2623 2455 2624 2456 return 0; 2625 2457 } ··· 2663 2425 { 2664 2426 struct dmar_drhd_unit *drhd; 2665 2427 struct dmar_rmrr_unit *rmrr; 2666 - struct pci_dev *pdev; 2428 + struct device *dev; 2667 2429 struct intel_iommu *iommu; 2668 2430 int i, ret; 2669 2431 ··· 2699 2461 sizeof(struct deferred_flush_tables), GFP_KERNEL); 2700 2462 if (!deferred_flush) { 2701 2463 ret = -ENOMEM; 2702 - goto error; 2464 + goto free_g_iommus; 2703 2465 } 2704 2466 2705 2467 for_each_active_iommu(iommu, drhd) { ··· 2707 2469 2708 2470 ret = iommu_init_domains(iommu); 2709 2471 if (ret) 2710 - goto error; 2472 + goto free_iommu; 2711 2473 2712 2474 /* 2713 2475 * TBD: ··· 2717 2479 ret = iommu_alloc_root_entry(iommu); 2718 2480 if (ret) { 2719 2481 printk(KERN_ERR "IOMMU: allocate root entry failed\n"); 2720 - goto error; 2482 + goto free_iommu; 2721 2483 } 2722 2484 if (!ecap_pass_through(iommu->ecap)) 2723 2485 hw_pass_through = 0; ··· 2786 2548 ret = iommu_prepare_static_identity_mapping(hw_pass_through); 2787 2549 if (ret) { 2788 2550 printk(KERN_CRIT "Failed to setup IOMMU pass-through\n"); 2789 - goto error; 2551 + goto free_iommu; 2790 2552 } 2791 2553 } 2792 2554 /* ··· 2805 2567 */ 2806 2568 printk(KERN_INFO "IOMMU: Setting RMRR:\n"); 2807 2569 for_each_rmrr_units(rmrr) { 2808 - for (i = 0; i < rmrr->devices_cnt; i++) { 2809 - pdev = rmrr->devices[i]; 2810 - /* 2811 - * some BIOS lists non-exist devices in DMAR 2812 - * table. 2813 - */ 2814 - if (!pdev) 2815 - continue; 2816 - ret = iommu_prepare_rmrr_dev(rmrr, pdev); 2570 + /* some BIOS lists non-exist devices in DMAR table. */ 2571 + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, 2572 + i, dev) { 2573 + ret = iommu_prepare_rmrr_dev(rmrr, dev); 2817 2574 if (ret) 2818 2575 printk(KERN_ERR 2819 2576 "IOMMU: mapping reserved region failed\n"); ··· 2839 2606 2840 2607 ret = dmar_set_interrupt(iommu); 2841 2608 if (ret) 2842 - goto error; 2609 + goto free_iommu; 2843 2610 2844 2611 iommu_set_root_entry(iommu); 2845 2612 ··· 2848 2615 2849 2616 ret = iommu_enable_translation(iommu); 2850 2617 if (ret) 2851 - goto error; 2618 + goto free_iommu; 2852 2619 2853 2620 iommu_disable_protect_mem_regions(iommu); 2854 2621 } 2855 2622 2856 2623 return 0; 2857 - error: 2624 + 2625 + free_iommu: 2858 2626 for_each_active_iommu(iommu, drhd) 2859 2627 free_dmar_iommu(iommu); 2860 2628 kfree(deferred_flush); 2629 + free_g_iommus: 2861 2630 kfree(g_iommus); 2631 + error: 2862 2632 return ret; 2863 2633 } 2864 2634 ··· 2870 2634 struct dmar_domain *domain, 2871 2635 unsigned long nrpages, uint64_t dma_mask) 2872 2636 { 2873 - struct pci_dev *pdev = to_pci_dev(dev); 2874 2637 struct iova *iova = NULL; 2875 2638 2876 2639 /* Restrict dma_mask to the width that the iommu can handle */ ··· 2889 2654 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); 2890 2655 if (unlikely(!iova)) { 2891 2656 printk(KERN_ERR "Allocating %ld-page iova for %s failed", 2892 - nrpages, pci_name(pdev)); 2657 + nrpages, dev_name(dev)); 2893 2658 return NULL; 2894 2659 } 2895 2660 2896 2661 return iova; 2897 2662 } 2898 2663 2899 - static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev) 2664 + static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) 2900 2665 { 2901 2666 struct dmar_domain *domain; 2902 2667 int ret; 2903 2668 2904 - domain = get_domain_for_dev(pdev, 2905 - DEFAULT_DOMAIN_ADDRESS_WIDTH); 2669 + domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); 2906 2670 if (!domain) { 2907 - printk(KERN_ERR 2908 - "Allocating domain for %s failed", pci_name(pdev)); 2671 + printk(KERN_ERR "Allocating domain for %s failed", 2672 + dev_name(dev)); 2909 2673 return NULL; 2910 2674 } 2911 2675 2912 2676 /* make sure context mapping is ok */ 2913 - if (unlikely(!domain_context_mapped(pdev))) { 2914 - ret = domain_context_mapping(domain, pdev, 2915 - CONTEXT_TT_MULTI_LEVEL); 2677 + if (unlikely(!domain_context_mapped(dev))) { 2678 + ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); 2916 2679 if (ret) { 2917 - printk(KERN_ERR 2918 - "Domain context map for %s failed", 2919 - pci_name(pdev)); 2680 + printk(KERN_ERR "Domain context map for %s failed", 2681 + dev_name(dev)); 2920 2682 return NULL; 2921 2683 } 2922 2684 } ··· 2921 2689 return domain; 2922 2690 } 2923 2691 2924 - static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev) 2692 + static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) 2925 2693 { 2926 2694 struct device_domain_info *info; 2927 2695 2928 2696 /* No lock here, assumes no domain exit in normal case */ 2929 - info = dev->dev.archdata.iommu; 2697 + info = dev->archdata.iommu; 2930 2698 if (likely(info)) 2931 2699 return info->domain; 2932 2700 2933 2701 return __get_valid_domain_for_dev(dev); 2934 2702 } 2935 2703 2936 - static int iommu_dummy(struct pci_dev *pdev) 2704 + static int iommu_dummy(struct device *dev) 2937 2705 { 2938 - return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; 2706 + return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; 2939 2707 } 2940 2708 2941 - /* Check if the pdev needs to go through non-identity map and unmap process.*/ 2709 + /* Check if the dev needs to go through non-identity map and unmap process.*/ 2942 2710 static int iommu_no_mapping(struct device *dev) 2943 2711 { 2944 - struct pci_dev *pdev; 2945 2712 int found; 2946 2713 2947 - if (unlikely(!dev_is_pci(dev))) 2948 - return 1; 2949 - 2950 - pdev = to_pci_dev(dev); 2951 - if (iommu_dummy(pdev)) 2714 + if (iommu_dummy(dev)) 2952 2715 return 1; 2953 2716 2954 2717 if (!iommu_identity_mapping) 2955 2718 return 0; 2956 2719 2957 - found = identity_mapping(pdev); 2720 + found = identity_mapping(dev); 2958 2721 if (found) { 2959 - if (iommu_should_identity_map(pdev, 0)) 2722 + if (iommu_should_identity_map(dev, 0)) 2960 2723 return 1; 2961 2724 else { 2962 2725 /* 2963 2726 * 32 bit DMA is removed from si_domain and fall back 2964 2727 * to non-identity mapping. 2965 2728 */ 2966 - domain_remove_one_dev_info(si_domain, pdev); 2729 + domain_remove_one_dev_info(si_domain, dev); 2967 2730 printk(KERN_INFO "32bit %s uses non-identity mapping\n", 2968 - pci_name(pdev)); 2731 + dev_name(dev)); 2969 2732 return 0; 2970 2733 } 2971 2734 } else { ··· 2968 2741 * In case of a detached 64 bit DMA device from vm, the device 2969 2742 * is put into si_domain for identity mapping. 2970 2743 */ 2971 - if (iommu_should_identity_map(pdev, 0)) { 2744 + if (iommu_should_identity_map(dev, 0)) { 2972 2745 int ret; 2973 - ret = domain_add_dev_info(si_domain, pdev, 2746 + ret = domain_add_dev_info(si_domain, dev, 2974 2747 hw_pass_through ? 2975 2748 CONTEXT_TT_PASS_THROUGH : 2976 2749 CONTEXT_TT_MULTI_LEVEL); 2977 2750 if (!ret) { 2978 2751 printk(KERN_INFO "64bit %s uses identity mapping\n", 2979 - pci_name(pdev)); 2752 + dev_name(dev)); 2980 2753 return 1; 2981 2754 } 2982 2755 } ··· 2985 2758 return 0; 2986 2759 } 2987 2760 2988 - static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2761 + static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, 2989 2762 size_t size, int dir, u64 dma_mask) 2990 2763 { 2991 - struct pci_dev *pdev = to_pci_dev(hwdev); 2992 2764 struct dmar_domain *domain; 2993 2765 phys_addr_t start_paddr; 2994 2766 struct iova *iova; ··· 2998 2772 2999 2773 BUG_ON(dir == DMA_NONE); 3000 2774 3001 - if (iommu_no_mapping(hwdev)) 2775 + if (iommu_no_mapping(dev)) 3002 2776 return paddr; 3003 2777 3004 - domain = get_valid_domain_for_dev(pdev); 2778 + domain = get_valid_domain_for_dev(dev); 3005 2779 if (!domain) 3006 2780 return 0; 3007 2781 3008 2782 iommu = domain_get_iommu(domain); 3009 2783 size = aligned_nrpages(paddr, size); 3010 2784 3011 - iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); 2785 + iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); 3012 2786 if (!iova) 3013 2787 goto error; 3014 2788 ··· 3034 2808 3035 2809 /* it's a non-present to present mapping. Only flush if caching mode */ 3036 2810 if (cap_caching_mode(iommu->cap)) 3037 - iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 1); 2811 + iommu_flush_iotlb_psi(iommu, domain->id, mm_to_dma_pfn(iova->pfn_lo), size, 0, 1); 3038 2812 else 3039 2813 iommu_flush_write_buffer(iommu); 3040 2814 ··· 3046 2820 if (iova) 3047 2821 __free_iova(&domain->iovad, iova); 3048 2822 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", 3049 - pci_name(pdev), size, (unsigned long long)paddr, dir); 2823 + dev_name(dev), size, (unsigned long long)paddr, dir); 3050 2824 return 0; 3051 2825 } 3052 2826 ··· 3056 2830 struct dma_attrs *attrs) 3057 2831 { 3058 2832 return __intel_map_single(dev, page_to_phys(page) + offset, size, 3059 - dir, to_pci_dev(dev)->dma_mask); 2833 + dir, *dev->dma_mask); 3060 2834 } 3061 2835 3062 2836 static void flush_unmaps(void) ··· 3086 2860 /* On real hardware multiple invalidations are expensive */ 3087 2861 if (cap_caching_mode(iommu->cap)) 3088 2862 iommu_flush_iotlb_psi(iommu, domain->id, 3089 - iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 0); 2863 + iova->pfn_lo, iova->pfn_hi - iova->pfn_lo + 1, 2864 + !deferred_flush[i].freelist[j], 0); 3090 2865 else { 3091 2866 mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1)); 3092 2867 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], 3093 2868 (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask); 3094 2869 } 3095 2870 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); 2871 + if (deferred_flush[i].freelist[j]) 2872 + dma_free_pagelist(deferred_flush[i].freelist[j]); 3096 2873 } 3097 2874 deferred_flush[i].next = 0; 3098 2875 } ··· 3112 2883 spin_unlock_irqrestore(&async_umap_flush_lock, flags); 3113 2884 } 3114 2885 3115 - static void add_unmap(struct dmar_domain *dom, struct iova *iova) 2886 + static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist) 3116 2887 { 3117 2888 unsigned long flags; 3118 2889 int next, iommu_id; ··· 3128 2899 next = deferred_flush[iommu_id].next; 3129 2900 deferred_flush[iommu_id].domain[next] = dom; 3130 2901 deferred_flush[iommu_id].iova[next] = iova; 2902 + deferred_flush[iommu_id].freelist[next] = freelist; 3131 2903 deferred_flush[iommu_id].next++; 3132 2904 3133 2905 if (!timer_on) { ··· 3143 2913 size_t size, enum dma_data_direction dir, 3144 2914 struct dma_attrs *attrs) 3145 2915 { 3146 - struct pci_dev *pdev = to_pci_dev(dev); 3147 2916 struct dmar_domain *domain; 3148 2917 unsigned long start_pfn, last_pfn; 3149 2918 struct iova *iova; 3150 2919 struct intel_iommu *iommu; 2920 + struct page *freelist; 3151 2921 3152 2922 if (iommu_no_mapping(dev)) 3153 2923 return; 3154 2924 3155 - domain = find_domain(pdev); 2925 + domain = find_domain(dev); 3156 2926 BUG_ON(!domain); 3157 2927 3158 2928 iommu = domain_get_iommu(domain); ··· 3166 2936 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; 3167 2937 3168 2938 pr_debug("Device %s unmapping: pfn %lx-%lx\n", 3169 - pci_name(pdev), start_pfn, last_pfn); 2939 + dev_name(dev), start_pfn, last_pfn); 3170 2940 3171 - /* clear the whole page */ 3172 - dma_pte_clear_range(domain, start_pfn, last_pfn); 3173 - 3174 - /* free page tables */ 3175 - dma_pte_free_pagetable(domain, start_pfn, last_pfn); 2941 + freelist = domain_unmap(domain, start_pfn, last_pfn); 3176 2942 3177 2943 if (intel_iommu_strict) { 3178 2944 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 3179 - last_pfn - start_pfn + 1, 0); 2945 + last_pfn - start_pfn + 1, !freelist, 0); 3180 2946 /* free iova */ 3181 2947 __free_iova(&domain->iovad, iova); 2948 + dma_free_pagelist(freelist); 3182 2949 } else { 3183 - add_unmap(domain, iova); 2950 + add_unmap(domain, iova, freelist); 3184 2951 /* 3185 2952 * queue up the release of the unmap to save the 1/6th of the 3186 2953 * cpu used up by the iotlb flush operation... ··· 3185 2958 } 3186 2959 } 3187 2960 3188 - static void *intel_alloc_coherent(struct device *hwdev, size_t size, 2961 + static void *intel_alloc_coherent(struct device *dev, size_t size, 3189 2962 dma_addr_t *dma_handle, gfp_t flags, 3190 2963 struct dma_attrs *attrs) 3191 2964 { ··· 3195 2968 size = PAGE_ALIGN(size); 3196 2969 order = get_order(size); 3197 2970 3198 - if (!iommu_no_mapping(hwdev)) 2971 + if (!iommu_no_mapping(dev)) 3199 2972 flags &= ~(GFP_DMA | GFP_DMA32); 3200 - else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { 3201 - if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) 2973 + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) { 2974 + if (dev->coherent_dma_mask < DMA_BIT_MASK(32)) 3202 2975 flags |= GFP_DMA; 3203 2976 else 3204 2977 flags |= GFP_DMA32; ··· 3209 2982 return NULL; 3210 2983 memset(vaddr, 0, size); 3211 2984 3212 - *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, 2985 + *dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size, 3213 2986 DMA_BIDIRECTIONAL, 3214 - hwdev->coherent_dma_mask); 2987 + dev->coherent_dma_mask); 3215 2988 if (*dma_handle) 3216 2989 return vaddr; 3217 2990 free_pages((unsigned long)vaddr, order); 3218 2991 return NULL; 3219 2992 } 3220 2993 3221 - static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 2994 + static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, 3222 2995 dma_addr_t dma_handle, struct dma_attrs *attrs) 3223 2996 { 3224 2997 int order; ··· 3226 2999 size = PAGE_ALIGN(size); 3227 3000 order = get_order(size); 3228 3001 3229 - intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); 3002 + intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); 3230 3003 free_pages((unsigned long)vaddr, order); 3231 3004 } 3232 3005 3233 - static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 3006 + static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, 3234 3007 int nelems, enum dma_data_direction dir, 3235 3008 struct dma_attrs *attrs) 3236 3009 { 3237 - struct pci_dev *pdev = to_pci_dev(hwdev); 3238 3010 struct dmar_domain *domain; 3239 3011 unsigned long start_pfn, last_pfn; 3240 3012 struct iova *iova; 3241 3013 struct intel_iommu *iommu; 3014 + struct page *freelist; 3242 3015 3243 - if (iommu_no_mapping(hwdev)) 3016 + if (iommu_no_mapping(dev)) 3244 3017 return; 3245 3018 3246 - domain = find_domain(pdev); 3019 + domain = find_domain(dev); 3247 3020 BUG_ON(!domain); 3248 3021 3249 3022 iommu = domain_get_iommu(domain); ··· 3256 3029 start_pfn = mm_to_dma_pfn(iova->pfn_lo); 3257 3030 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; 3258 3031 3259 - /* clear the whole page */ 3260 - dma_pte_clear_range(domain, start_pfn, last_pfn); 3261 - 3262 - /* free page tables */ 3263 - dma_pte_free_pagetable(domain, start_pfn, last_pfn); 3032 + freelist = domain_unmap(domain, start_pfn, last_pfn); 3264 3033 3265 3034 if (intel_iommu_strict) { 3266 3035 iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, 3267 - last_pfn - start_pfn + 1, 0); 3036 + last_pfn - start_pfn + 1, !freelist, 0); 3268 3037 /* free iova */ 3269 3038 __free_iova(&domain->iovad, iova); 3039 + dma_free_pagelist(freelist); 3270 3040 } else { 3271 - add_unmap(domain, iova); 3041 + add_unmap(domain, iova, freelist); 3272 3042 /* 3273 3043 * queue up the release of the unmap to save the 1/6th of the 3274 3044 * cpu used up by the iotlb flush operation... ··· 3287 3063 return nelems; 3288 3064 } 3289 3065 3290 - static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 3066 + static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, 3291 3067 enum dma_data_direction dir, struct dma_attrs *attrs) 3292 3068 { 3293 3069 int i; 3294 - struct pci_dev *pdev = to_pci_dev(hwdev); 3295 3070 struct dmar_domain *domain; 3296 3071 size_t size = 0; 3297 3072 int prot = 0; ··· 3301 3078 struct intel_iommu *iommu; 3302 3079 3303 3080 BUG_ON(dir == DMA_NONE); 3304 - if (iommu_no_mapping(hwdev)) 3305 - return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 3081 + if (iommu_no_mapping(dev)) 3082 + return intel_nontranslate_map_sg(dev, sglist, nelems, dir); 3306 3083 3307 - domain = get_valid_domain_for_dev(pdev); 3084 + domain = get_valid_domain_for_dev(dev); 3308 3085 if (!domain) 3309 3086 return 0; 3310 3087 ··· 3313 3090 for_each_sg(sglist, sg, nelems, i) 3314 3091 size += aligned_nrpages(sg->offset, sg->length); 3315 3092 3316 - iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), 3317 - pdev->dma_mask); 3093 + iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), 3094 + *dev->dma_mask); 3318 3095 if (!iova) { 3319 3096 sglist->dma_length = 0; 3320 3097 return 0; ··· 3347 3124 3348 3125 /* it's a non-present to present mapping. Only flush if caching mode */ 3349 3126 if (cap_caching_mode(iommu->cap)) 3350 - iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 1); 3127 + iommu_flush_iotlb_psi(iommu, domain->id, start_vpfn, size, 0, 1); 3351 3128 else 3352 3129 iommu_flush_write_buffer(iommu); 3353 3130 ··· 3482 3259 static void __init init_no_remapping_devices(void) 3483 3260 { 3484 3261 struct dmar_drhd_unit *drhd; 3262 + struct device *dev; 3263 + int i; 3485 3264 3486 3265 for_each_drhd_unit(drhd) { 3487 3266 if (!drhd->include_all) { 3488 - int i; 3489 - for (i = 0; i < drhd->devices_cnt; i++) 3490 - if (drhd->devices[i] != NULL) 3491 - break; 3492 - /* ignore DMAR unit if no pci devices exist */ 3267 + for_each_active_dev_scope(drhd->devices, 3268 + drhd->devices_cnt, i, dev) 3269 + break; 3270 + /* ignore DMAR unit if no devices exist */ 3493 3271 if (i == drhd->devices_cnt) 3494 3272 drhd->ignored = 1; 3495 3273 } 3496 3274 } 3497 3275 3498 3276 for_each_active_drhd_unit(drhd) { 3499 - int i; 3500 3277 if (drhd->include_all) 3501 3278 continue; 3502 3279 3503 - for (i = 0; i < drhd->devices_cnt; i++) 3504 - if (drhd->devices[i] && 3505 - !IS_GFX_DEVICE(drhd->devices[i])) 3280 + for_each_active_dev_scope(drhd->devices, 3281 + drhd->devices_cnt, i, dev) 3282 + if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev))) 3506 3283 break; 3507 - 3508 3284 if (i < drhd->devices_cnt) 3509 3285 continue; 3510 3286 ··· 3513 3291 intel_iommu_gfx_mapped = 1; 3514 3292 } else { 3515 3293 drhd->ignored = 1; 3516 - for (i = 0; i < drhd->devices_cnt; i++) { 3517 - if (!drhd->devices[i]) 3518 - continue; 3519 - drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; 3520 - } 3294 + for_each_active_dev_scope(drhd->devices, 3295 + drhd->devices_cnt, i, dev) 3296 + dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; 3521 3297 } 3522 3298 } 3523 3299 } ··· 3658 3438 static inline void init_iommu_pm_ops(void) {} 3659 3439 #endif /* CONFIG_PM */ 3660 3440 3661 - LIST_HEAD(dmar_rmrr_units); 3662 - 3663 - static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) 3664 - { 3665 - list_add(&rmrr->list, &dmar_rmrr_units); 3666 - } 3667 - 3668 3441 3669 3442 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) 3670 3443 { ··· 3672 3459 rmrr = (struct acpi_dmar_reserved_memory *)header; 3673 3460 rmrru->base_address = rmrr->base_address; 3674 3461 rmrru->end_address = rmrr->end_address; 3462 + rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), 3463 + ((void *)rmrr) + rmrr->header.length, 3464 + &rmrru->devices_cnt); 3465 + if (rmrru->devices_cnt && rmrru->devices == NULL) { 3466 + kfree(rmrru); 3467 + return -ENOMEM; 3468 + } 3675 3469 3676 - dmar_register_rmrr_unit(rmrru); 3470 + list_add(&rmrru->list, &dmar_rmrr_units); 3471 + 3677 3472 return 0; 3678 3473 } 3679 - 3680 - static int __init 3681 - rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) 3682 - { 3683 - struct acpi_dmar_reserved_memory *rmrr; 3684 - 3685 - rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; 3686 - return dmar_parse_dev_scope((void *)(rmrr + 1), 3687 - ((void *)rmrr) + rmrr->header.length, 3688 - &rmrru->devices_cnt, &rmrru->devices, 3689 - rmrr->segment); 3690 - } 3691 - 3692 - static LIST_HEAD(dmar_atsr_units); 3693 3474 3694 3475 int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) 3695 3476 { ··· 3697 3490 3698 3491 atsru->hdr = hdr; 3699 3492 atsru->include_all = atsr->flags & 0x1; 3493 + if (!atsru->include_all) { 3494 + atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), 3495 + (void *)atsr + atsr->header.length, 3496 + &atsru->devices_cnt); 3497 + if (atsru->devices_cnt && atsru->devices == NULL) { 3498 + kfree(atsru); 3499 + return -ENOMEM; 3500 + } 3501 + } 3700 3502 3701 - list_add(&atsru->list, &dmar_atsr_units); 3503 + list_add_rcu(&atsru->list, &dmar_atsr_units); 3702 3504 3703 3505 return 0; 3704 - } 3705 - 3706 - static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) 3707 - { 3708 - struct acpi_dmar_atsr *atsr; 3709 - 3710 - if (atsru->include_all) 3711 - return 0; 3712 - 3713 - atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 3714 - return dmar_parse_dev_scope((void *)(atsr + 1), 3715 - (void *)atsr + atsr->header.length, 3716 - &atsru->devices_cnt, &atsru->devices, 3717 - atsr->segment); 3718 3506 } 3719 3507 3720 3508 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) ··· 3737 3535 3738 3536 int dmar_find_matched_atsr_unit(struct pci_dev *dev) 3739 3537 { 3740 - int i; 3538 + int i, ret = 1; 3741 3539 struct pci_bus *bus; 3540 + struct pci_dev *bridge = NULL; 3541 + struct device *tmp; 3742 3542 struct acpi_dmar_atsr *atsr; 3743 3543 struct dmar_atsr_unit *atsru; 3744 3544 3745 3545 dev = pci_physfn(dev); 3746 - 3747 - list_for_each_entry(atsru, &dmar_atsr_units, list) { 3748 - atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 3749 - if (atsr->segment == pci_domain_nr(dev->bus)) 3750 - goto found; 3751 - } 3752 - 3753 - return 0; 3754 - 3755 - found: 3756 3546 for (bus = dev->bus; bus; bus = bus->parent) { 3757 - struct pci_dev *bridge = bus->self; 3758 - 3547 + bridge = bus->self; 3759 3548 if (!bridge || !pci_is_pcie(bridge) || 3760 3549 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) 3761 3550 return 0; 3762 - 3763 - if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { 3764 - for (i = 0; i < atsru->devices_cnt; i++) 3765 - if (atsru->devices[i] == bridge) 3766 - return 1; 3551 + if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) 3767 3552 break; 3553 + } 3554 + if (!bridge) 3555 + return 0; 3556 + 3557 + rcu_read_lock(); 3558 + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { 3559 + atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 3560 + if (atsr->segment != pci_domain_nr(dev->bus)) 3561 + continue; 3562 + 3563 + for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) 3564 + if (tmp == &bridge->dev) 3565 + goto out; 3566 + 3567 + if (atsru->include_all) 3568 + goto out; 3569 + } 3570 + ret = 0; 3571 + out: 3572 + rcu_read_unlock(); 3573 + 3574 + return ret; 3575 + } 3576 + 3577 + int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) 3578 + { 3579 + int ret = 0; 3580 + struct dmar_rmrr_unit *rmrru; 3581 + struct dmar_atsr_unit *atsru; 3582 + struct acpi_dmar_atsr *atsr; 3583 + struct acpi_dmar_reserved_memory *rmrr; 3584 + 3585 + if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING) 3586 + return 0; 3587 + 3588 + list_for_each_entry(rmrru, &dmar_rmrr_units, list) { 3589 + rmrr = container_of(rmrru->hdr, 3590 + struct acpi_dmar_reserved_memory, header); 3591 + if (info->event == BUS_NOTIFY_ADD_DEVICE) { 3592 + ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1), 3593 + ((void *)rmrr) + rmrr->header.length, 3594 + rmrr->segment, rmrru->devices, 3595 + rmrru->devices_cnt); 3596 + if (ret > 0) 3597 + break; 3598 + else if(ret < 0) 3599 + return ret; 3600 + } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 3601 + if (dmar_remove_dev_scope(info, rmrr->segment, 3602 + rmrru->devices, rmrru->devices_cnt)) 3603 + break; 3768 3604 } 3769 3605 } 3770 3606 3771 - if (atsru->include_all) 3772 - return 1; 3607 + list_for_each_entry(atsru, &dmar_atsr_units, list) { 3608 + if (atsru->include_all) 3609 + continue; 3610 + 3611 + atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 3612 + if (info->event == BUS_NOTIFY_ADD_DEVICE) { 3613 + ret = dmar_insert_dev_scope(info, (void *)(atsr + 1), 3614 + (void *)atsr + atsr->header.length, 3615 + atsr->segment, atsru->devices, 3616 + atsru->devices_cnt); 3617 + if (ret > 0) 3618 + break; 3619 + else if(ret < 0) 3620 + return ret; 3621 + } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 3622 + if (dmar_remove_dev_scope(info, atsr->segment, 3623 + atsru->devices, atsru->devices_cnt)) 3624 + break; 3625 + } 3626 + } 3773 3627 3774 3628 return 0; 3775 - } 3776 - 3777 - int __init dmar_parse_rmrr_atsr_dev(void) 3778 - { 3779 - struct dmar_rmrr_unit *rmrr; 3780 - struct dmar_atsr_unit *atsr; 3781 - int ret = 0; 3782 - 3783 - list_for_each_entry(rmrr, &dmar_rmrr_units, list) { 3784 - ret = rmrr_parse_dev(rmrr); 3785 - if (ret) 3786 - return ret; 3787 - } 3788 - 3789 - list_for_each_entry(atsr, &dmar_atsr_units, list) { 3790 - ret = atsr_parse_dev(atsr); 3791 - if (ret) 3792 - return ret; 3793 - } 3794 - 3795 - return ret; 3796 3629 } 3797 3630 3798 3631 /* ··· 3840 3603 unsigned long action, void *data) 3841 3604 { 3842 3605 struct device *dev = data; 3843 - struct pci_dev *pdev = to_pci_dev(dev); 3844 3606 struct dmar_domain *domain; 3845 3607 3846 - if (iommu_no_mapping(dev)) 3608 + if (iommu_dummy(dev)) 3847 3609 return 0; 3848 3610 3849 - domain = find_domain(pdev); 3611 + if (action != BUS_NOTIFY_UNBOUND_DRIVER && 3612 + action != BUS_NOTIFY_DEL_DEVICE) 3613 + return 0; 3614 + 3615 + domain = find_domain(dev); 3850 3616 if (!domain) 3851 3617 return 0; 3852 3618 3853 - if (action == BUS_NOTIFY_UNBOUND_DRIVER && !iommu_pass_through) { 3854 - domain_remove_one_dev_info(domain, pdev); 3855 - 3856 - if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && 3857 - !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && 3858 - list_empty(&domain->devices)) 3859 - domain_exit(domain); 3860 - } 3619 + down_read(&dmar_global_lock); 3620 + domain_remove_one_dev_info(domain, dev); 3621 + if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) && 3622 + !(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) && 3623 + list_empty(&domain->devices)) 3624 + domain_exit(domain); 3625 + up_read(&dmar_global_lock); 3861 3626 3862 3627 return 0; 3863 3628 } 3864 3629 3865 3630 static struct notifier_block device_nb = { 3866 3631 .notifier_call = device_notifier, 3632 + }; 3633 + 3634 + static int intel_iommu_memory_notifier(struct notifier_block *nb, 3635 + unsigned long val, void *v) 3636 + { 3637 + struct memory_notify *mhp = v; 3638 + unsigned long long start, end; 3639 + unsigned long start_vpfn, last_vpfn; 3640 + 3641 + switch (val) { 3642 + case MEM_GOING_ONLINE: 3643 + start = mhp->start_pfn << PAGE_SHIFT; 3644 + end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1; 3645 + if (iommu_domain_identity_map(si_domain, start, end)) { 3646 + pr_warn("dmar: failed to build identity map for [%llx-%llx]\n", 3647 + start, end); 3648 + return NOTIFY_BAD; 3649 + } 3650 + break; 3651 + 3652 + case MEM_OFFLINE: 3653 + case MEM_CANCEL_ONLINE: 3654 + start_vpfn = mm_to_dma_pfn(mhp->start_pfn); 3655 + last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1); 3656 + while (start_vpfn <= last_vpfn) { 3657 + struct iova *iova; 3658 + struct dmar_drhd_unit *drhd; 3659 + struct intel_iommu *iommu; 3660 + struct page *freelist; 3661 + 3662 + iova = find_iova(&si_domain->iovad, start_vpfn); 3663 + if (iova == NULL) { 3664 + pr_debug("dmar: failed get IOVA for PFN %lx\n", 3665 + start_vpfn); 3666 + break; 3667 + } 3668 + 3669 + iova = split_and_remove_iova(&si_domain->iovad, iova, 3670 + start_vpfn, last_vpfn); 3671 + if (iova == NULL) { 3672 + pr_warn("dmar: failed to split IOVA PFN [%lx-%lx]\n", 3673 + start_vpfn, last_vpfn); 3674 + return NOTIFY_BAD; 3675 + } 3676 + 3677 + freelist = domain_unmap(si_domain, iova->pfn_lo, 3678 + iova->pfn_hi); 3679 + 3680 + rcu_read_lock(); 3681 + for_each_active_iommu(iommu, drhd) 3682 + iommu_flush_iotlb_psi(iommu, si_domain->id, 3683 + iova->pfn_lo, 3684 + iova->pfn_hi - iova->pfn_lo + 1, 3685 + !freelist, 0); 3686 + rcu_read_unlock(); 3687 + dma_free_pagelist(freelist); 3688 + 3689 + start_vpfn = iova->pfn_hi + 1; 3690 + free_iova_mem(iova); 3691 + } 3692 + break; 3693 + } 3694 + 3695 + return NOTIFY_OK; 3696 + } 3697 + 3698 + static struct notifier_block intel_iommu_memory_nb = { 3699 + .notifier_call = intel_iommu_memory_notifier, 3700 + .priority = 0 3867 3701 }; 3868 3702 3869 3703 int __init intel_iommu_init(void) ··· 3946 3638 /* VT-d is required for a TXT/tboot launch, so enforce that */ 3947 3639 force_on = tboot_force_iommu(); 3948 3640 3641 + if (iommu_init_mempool()) { 3642 + if (force_on) 3643 + panic("tboot: Failed to initialize iommu memory\n"); 3644 + return -ENOMEM; 3645 + } 3646 + 3647 + down_write(&dmar_global_lock); 3949 3648 if (dmar_table_init()) { 3950 3649 if (force_on) 3951 3650 panic("tboot: Failed to initialize DMAR table\n"); ··· 3975 3660 if (no_iommu || dmar_disabled) 3976 3661 goto out_free_dmar; 3977 3662 3978 - if (iommu_init_mempool()) { 3979 - if (force_on) 3980 - panic("tboot: Failed to initialize iommu memory\n"); 3981 - goto out_free_dmar; 3982 - } 3983 - 3984 3663 if (list_empty(&dmar_rmrr_units)) 3985 3664 printk(KERN_INFO "DMAR: No RMRR found\n"); 3986 3665 ··· 3984 3675 if (dmar_init_reserved_ranges()) { 3985 3676 if (force_on) 3986 3677 panic("tboot: Failed to reserve iommu ranges\n"); 3987 - goto out_free_mempool; 3678 + goto out_free_reserved_range; 3988 3679 } 3989 3680 3990 3681 init_no_remapping_devices(); ··· 3996 3687 printk(KERN_ERR "IOMMU: dmar init failed\n"); 3997 3688 goto out_free_reserved_range; 3998 3689 } 3690 + up_write(&dmar_global_lock); 3999 3691 printk(KERN_INFO 4000 3692 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); 4001 3693 ··· 4009 3699 init_iommu_pm_ops(); 4010 3700 4011 3701 bus_set_iommu(&pci_bus_type, &intel_iommu_ops); 4012 - 4013 3702 bus_register_notifier(&pci_bus_type, &device_nb); 3703 + if (si_domain && !hw_pass_through) 3704 + register_memory_notifier(&intel_iommu_memory_nb); 4014 3705 4015 3706 intel_iommu_enabled = 1; 4016 3707 ··· 4019 3708 4020 3709 out_free_reserved_range: 4021 3710 put_iova_domain(&reserved_iova_list); 4022 - out_free_mempool: 4023 - iommu_exit_mempool(); 4024 3711 out_free_dmar: 4025 3712 intel_iommu_free_dmars(); 3713 + up_write(&dmar_global_lock); 3714 + iommu_exit_mempool(); 4026 3715 return ret; 4027 3716 } 4028 3717 4029 3718 static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 4030 - struct pci_dev *pdev) 3719 + struct device *dev) 4031 3720 { 4032 - struct pci_dev *tmp, *parent; 3721 + struct pci_dev *tmp, *parent, *pdev; 4033 3722 4034 - if (!iommu || !pdev) 3723 + if (!iommu || !dev || !dev_is_pci(dev)) 4035 3724 return; 3725 + 3726 + pdev = to_pci_dev(dev); 4036 3727 4037 3728 /* dependent device detach */ 4038 3729 tmp = pci_find_upstream_pcie_bridge(pdev); ··· 4056 3743 } 4057 3744 4058 3745 static void domain_remove_one_dev_info(struct dmar_domain *domain, 4059 - struct pci_dev *pdev) 3746 + struct device *dev) 4060 3747 { 4061 3748 struct device_domain_info *info, *tmp; 4062 3749 struct intel_iommu *iommu; 4063 3750 unsigned long flags; 4064 3751 int found = 0; 3752 + u8 bus, devfn; 4065 3753 4066 - iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, 4067 - pdev->devfn); 3754 + iommu = device_to_iommu(dev, &bus, &devfn); 4068 3755 if (!iommu) 4069 3756 return; 4070 3757 4071 3758 spin_lock_irqsave(&device_domain_lock, flags); 4072 3759 list_for_each_entry_safe(info, tmp, &domain->devices, link) { 4073 - if (info->segment == pci_domain_nr(pdev->bus) && 4074 - info->bus == pdev->bus->number && 4075 - info->devfn == pdev->devfn) { 3760 + if (info->iommu == iommu && info->bus == bus && 3761 + info->devfn == devfn) { 4076 3762 unlink_domain_info(info); 4077 3763 spin_unlock_irqrestore(&device_domain_lock, flags); 4078 3764 4079 3765 iommu_disable_dev_iotlb(info); 4080 3766 iommu_detach_dev(iommu, info->bus, info->devfn); 4081 - iommu_detach_dependent_devices(iommu, pdev); 3767 + iommu_detach_dependent_devices(iommu, dev); 4082 3768 free_devinfo_mem(info); 4083 3769 4084 3770 spin_lock_irqsave(&device_domain_lock, flags); ··· 4092 3780 * owned by this domain, clear this iommu in iommu_bmp 4093 3781 * update iommu count and coherency 4094 3782 */ 4095 - if (iommu == device_to_iommu(info->segment, info->bus, 4096 - info->devfn)) 3783 + if (info->iommu == iommu) 4097 3784 found = 1; 4098 3785 } 4099 3786 ··· 4116 3805 } 4117 3806 } 4118 3807 4119 - static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) 4120 - { 4121 - struct device_domain_info *info; 4122 - struct intel_iommu *iommu; 4123 - unsigned long flags1, flags2; 4124 - 4125 - spin_lock_irqsave(&device_domain_lock, flags1); 4126 - while (!list_empty(&domain->devices)) { 4127 - info = list_entry(domain->devices.next, 4128 - struct device_domain_info, link); 4129 - unlink_domain_info(info); 4130 - spin_unlock_irqrestore(&device_domain_lock, flags1); 4131 - 4132 - iommu_disable_dev_iotlb(info); 4133 - iommu = device_to_iommu(info->segment, info->bus, info->devfn); 4134 - iommu_detach_dev(iommu, info->bus, info->devfn); 4135 - iommu_detach_dependent_devices(iommu, info->dev); 4136 - 4137 - /* clear this iommu in iommu_bmp, update iommu count 4138 - * and capabilities 4139 - */ 4140 - spin_lock_irqsave(&domain->iommu_lock, flags2); 4141 - if (test_and_clear_bit(iommu->seq_id, 4142 - domain->iommu_bmp)) { 4143 - domain->iommu_count--; 4144 - domain_update_iommu_cap(domain); 4145 - } 4146 - spin_unlock_irqrestore(&domain->iommu_lock, flags2); 4147 - 4148 - free_devinfo_mem(info); 4149 - spin_lock_irqsave(&device_domain_lock, flags1); 4150 - } 4151 - spin_unlock_irqrestore(&device_domain_lock, flags1); 4152 - } 4153 - 4154 - /* domain id for virtual machine, it won't be set in context */ 4155 - static atomic_t vm_domid = ATOMIC_INIT(0); 4156 - 4157 - static struct dmar_domain *iommu_alloc_vm_domain(void) 4158 - { 4159 - struct dmar_domain *domain; 4160 - 4161 - domain = alloc_domain_mem(); 4162 - if (!domain) 4163 - return NULL; 4164 - 4165 - domain->id = atomic_inc_return(&vm_domid); 4166 - domain->nid = -1; 4167 - memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); 4168 - domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; 4169 - 4170 - return domain; 4171 - } 4172 - 4173 3808 static int md_domain_init(struct dmar_domain *domain, int guest_width) 4174 3809 { 4175 3810 int adjust_width; 4176 3811 4177 3812 init_iova_domain(&domain->iovad, DMA_32BIT_PFN); 4178 - spin_lock_init(&domain->iommu_lock); 4179 - 4180 3813 domain_reserve_special_ranges(domain); 4181 3814 4182 3815 /* calculate AGAW */ ··· 4128 3873 adjust_width = guestwidth_to_adjustwidth(guest_width); 4129 3874 domain->agaw = width_to_agaw(adjust_width); 4130 3875 4131 - INIT_LIST_HEAD(&domain->devices); 4132 - 4133 - domain->iommu_count = 0; 4134 3876 domain->iommu_coherency = 0; 4135 3877 domain->iommu_snooping = 0; 4136 3878 domain->iommu_superpage = 0; ··· 4142 3890 return 0; 4143 3891 } 4144 3892 4145 - static void iommu_free_vm_domain(struct dmar_domain *domain) 4146 - { 4147 - unsigned long flags; 4148 - struct dmar_drhd_unit *drhd; 4149 - struct intel_iommu *iommu; 4150 - unsigned long i; 4151 - unsigned long ndomains; 4152 - 4153 - for_each_active_iommu(iommu, drhd) { 4154 - ndomains = cap_ndoms(iommu->cap); 4155 - for_each_set_bit(i, iommu->domain_ids, ndomains) { 4156 - if (iommu->domains[i] == domain) { 4157 - spin_lock_irqsave(&iommu->lock, flags); 4158 - clear_bit(i, iommu->domain_ids); 4159 - iommu->domains[i] = NULL; 4160 - spin_unlock_irqrestore(&iommu->lock, flags); 4161 - break; 4162 - } 4163 - } 4164 - } 4165 - } 4166 - 4167 - static void vm_domain_exit(struct dmar_domain *domain) 4168 - { 4169 - /* Domain 0 is reserved, so dont process it */ 4170 - if (!domain) 4171 - return; 4172 - 4173 - vm_domain_remove_all_dev_info(domain); 4174 - /* destroy iovas */ 4175 - put_iova_domain(&domain->iovad); 4176 - 4177 - /* clear ptes */ 4178 - dma_pte_clear_range(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 4179 - 4180 - /* free page tables */ 4181 - dma_pte_free_pagetable(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); 4182 - 4183 - iommu_free_vm_domain(domain); 4184 - free_domain_mem(domain); 4185 - } 4186 - 4187 3893 static int intel_iommu_domain_init(struct iommu_domain *domain) 4188 3894 { 4189 3895 struct dmar_domain *dmar_domain; 4190 3896 4191 - dmar_domain = iommu_alloc_vm_domain(); 3897 + dmar_domain = alloc_domain(true); 4192 3898 if (!dmar_domain) { 4193 3899 printk(KERN_ERR 4194 3900 "intel_iommu_domain_init: dmar_domain == NULL\n"); ··· 4155 3945 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { 4156 3946 printk(KERN_ERR 4157 3947 "intel_iommu_domain_init() failed\n"); 4158 - vm_domain_exit(dmar_domain); 3948 + domain_exit(dmar_domain); 4159 3949 return -ENOMEM; 4160 3950 } 4161 3951 domain_update_iommu_cap(dmar_domain); ··· 4173 3963 struct dmar_domain *dmar_domain = domain->priv; 4174 3964 4175 3965 domain->priv = NULL; 4176 - vm_domain_exit(dmar_domain); 3966 + domain_exit(dmar_domain); 4177 3967 } 4178 3968 4179 3969 static int intel_iommu_attach_device(struct iommu_domain *domain, 4180 3970 struct device *dev) 4181 3971 { 4182 3972 struct dmar_domain *dmar_domain = domain->priv; 4183 - struct pci_dev *pdev = to_pci_dev(dev); 4184 3973 struct intel_iommu *iommu; 4185 3974 int addr_width; 3975 + u8 bus, devfn; 4186 3976 4187 - /* normally pdev is not mapped */ 4188 - if (unlikely(domain_context_mapped(pdev))) { 3977 + /* normally dev is not mapped */ 3978 + if (unlikely(domain_context_mapped(dev))) { 4189 3979 struct dmar_domain *old_domain; 4190 3980 4191 - old_domain = find_domain(pdev); 3981 + old_domain = find_domain(dev); 4192 3982 if (old_domain) { 4193 3983 if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE || 4194 3984 dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) 4195 - domain_remove_one_dev_info(old_domain, pdev); 3985 + domain_remove_one_dev_info(old_domain, dev); 4196 3986 else 4197 3987 domain_remove_dev_info(old_domain); 4198 3988 } 4199 3989 } 4200 3990 4201 - iommu = device_to_iommu(pci_domain_nr(pdev->bus), pdev->bus->number, 4202 - pdev->devfn); 3991 + iommu = device_to_iommu(dev, &bus, &devfn); 4203 3992 if (!iommu) 4204 3993 return -ENODEV; 4205 3994 ··· 4230 4021 dmar_domain->agaw--; 4231 4022 } 4232 4023 4233 - return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL); 4024 + return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL); 4234 4025 } 4235 4026 4236 4027 static void intel_iommu_detach_device(struct iommu_domain *domain, 4237 4028 struct device *dev) 4238 4029 { 4239 4030 struct dmar_domain *dmar_domain = domain->priv; 4240 - struct pci_dev *pdev = to_pci_dev(dev); 4241 4031 4242 - domain_remove_one_dev_info(dmar_domain, pdev); 4032 + domain_remove_one_dev_info(dmar_domain, dev); 4243 4033 } 4244 4034 4245 4035 static int intel_iommu_map(struct iommu_domain *domain, ··· 4280 4072 } 4281 4073 4282 4074 static size_t intel_iommu_unmap(struct iommu_domain *domain, 4283 - unsigned long iova, size_t size) 4075 + unsigned long iova, size_t size) 4284 4076 { 4285 4077 struct dmar_domain *dmar_domain = domain->priv; 4286 - int order; 4078 + struct page *freelist = NULL; 4079 + struct intel_iommu *iommu; 4080 + unsigned long start_pfn, last_pfn; 4081 + unsigned int npages; 4082 + int iommu_id, num, ndomains, level = 0; 4287 4083 4288 - order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 4289 - (iova + size - 1) >> VTD_PAGE_SHIFT); 4084 + /* Cope with horrid API which requires us to unmap more than the 4085 + size argument if it happens to be a large-page mapping. */ 4086 + if (!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level)) 4087 + BUG(); 4088 + 4089 + if (size < VTD_PAGE_SIZE << level_to_offset_bits(level)) 4090 + size = VTD_PAGE_SIZE << level_to_offset_bits(level); 4091 + 4092 + start_pfn = iova >> VTD_PAGE_SHIFT; 4093 + last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; 4094 + 4095 + freelist = domain_unmap(dmar_domain, start_pfn, last_pfn); 4096 + 4097 + npages = last_pfn - start_pfn + 1; 4098 + 4099 + for_each_set_bit(iommu_id, dmar_domain->iommu_bmp, g_num_of_iommus) { 4100 + iommu = g_iommus[iommu_id]; 4101 + 4102 + /* 4103 + * find bit position of dmar_domain 4104 + */ 4105 + ndomains = cap_ndoms(iommu->cap); 4106 + for_each_set_bit(num, iommu->domain_ids, ndomains) { 4107 + if (iommu->domains[num] == dmar_domain) 4108 + iommu_flush_iotlb_psi(iommu, num, start_pfn, 4109 + npages, !freelist, 0); 4110 + } 4111 + 4112 + } 4113 + 4114 + dma_free_pagelist(freelist); 4290 4115 4291 4116 if (dmar_domain->max_addr == iova + size) 4292 4117 dmar_domain->max_addr = iova; 4293 4118 4294 - return PAGE_SIZE << order; 4119 + return size; 4295 4120 } 4296 4121 4297 4122 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ··· 4332 4091 { 4333 4092 struct dmar_domain *dmar_domain = domain->priv; 4334 4093 struct dma_pte *pte; 4094 + int level = 0; 4335 4095 u64 phys = 0; 4336 4096 4337 - pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0); 4097 + pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level); 4338 4098 if (pte) 4339 4099 phys = dma_pte_addr(pte); 4340 4100 ··· 4363 4121 struct pci_dev *bridge, *dma_pdev = NULL; 4364 4122 struct iommu_group *group; 4365 4123 int ret; 4124 + u8 bus, devfn; 4366 4125 4367 - if (!device_to_iommu(pci_domain_nr(pdev->bus), 4368 - pdev->bus->number, pdev->devfn)) 4126 + if (!device_to_iommu(dev, &bus, &devfn)) 4369 4127 return -ENODEV; 4370 4128 4371 4129 bridge = pci_find_upstream_pcie_bridge(pdev);
+72 -36
drivers/iommu/intel_irq_remapping.c
··· 38 38 static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 39 39 static int ir_ioapic_num, ir_hpet_num; 40 40 41 + /* 42 + * Lock ordering: 43 + * ->dmar_global_lock 44 + * ->irq_2_ir_lock 45 + * ->qi->q_lock 46 + * ->iommu->register_lock 47 + * Note: 48 + * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called 49 + * in single-threaded environment with interrupt disabled, so no need to tabke 50 + * the dmar_global_lock. 51 + */ 41 52 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); 42 53 43 54 static int __init parse_ioapics_under_ir(void); ··· 318 307 if (!irte) 319 308 return -1; 320 309 310 + down_read(&dmar_global_lock); 321 311 for (i = 0; i < MAX_IO_APICS; i++) { 322 312 if (ir_ioapic[i].id == apic) { 323 313 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; 324 314 break; 325 315 } 326 316 } 317 + up_read(&dmar_global_lock); 327 318 328 319 if (sid == 0) { 329 320 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); ··· 345 332 if (!irte) 346 333 return -1; 347 334 335 + down_read(&dmar_global_lock); 348 336 for (i = 0; i < MAX_HPET_TBS; i++) { 349 337 if (ir_hpet[i].id == id) { 350 338 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; 351 339 break; 352 340 } 353 341 } 342 + up_read(&dmar_global_lock); 354 343 355 344 if (sid == 0) { 356 345 pr_warning("Failed to set source-id of HPET block (%d)\n", id); ··· 809 794 810 795 static int __init ir_dev_scope_init(void) 811 796 { 797 + int ret; 798 + 812 799 if (!irq_remapping_enabled) 813 800 return 0; 814 801 815 - return dmar_dev_scope_init(); 802 + down_write(&dmar_global_lock); 803 + ret = dmar_dev_scope_init(); 804 + up_write(&dmar_global_lock); 805 + 806 + return ret; 816 807 } 817 808 rootfs_initcall(ir_dev_scope_init); 818 809 ··· 899 878 struct io_apic_irq_attr *attr) 900 879 { 901 880 int ioapic_id = mpc_ioapic_id(attr->ioapic); 902 - struct intel_iommu *iommu = map_ioapic_to_ir(ioapic_id); 881 + struct intel_iommu *iommu; 903 882 struct IR_IO_APIC_route_entry *entry; 904 883 struct irte irte; 905 884 int index; 906 885 886 + down_read(&dmar_global_lock); 887 + iommu = map_ioapic_to_ir(ioapic_id); 907 888 if (!iommu) { 908 889 pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); 909 - return -ENODEV; 890 + index = -ENODEV; 891 + } else { 892 + index = alloc_irte(iommu, irq, 1); 893 + if (index < 0) { 894 + pr_warn("Failed to allocate IRTE for ioapic %d\n", 895 + ioapic_id); 896 + index = -ENOMEM; 897 + } 910 898 } 911 - 912 - entry = (struct IR_IO_APIC_route_entry *)route_entry; 913 - 914 - index = alloc_irte(iommu, irq, 1); 915 - if (index < 0) { 916 - pr_warn("Failed to allocate IRTE for ioapic %d\n", ioapic_id); 917 - return -ENOMEM; 918 - } 899 + up_read(&dmar_global_lock); 900 + if (index < 0) 901 + return index; 919 902 920 903 prepare_irte(&irte, vector, destination); 921 904 ··· 938 913 irte.avail, irte.vector, irte.dest_id, 939 914 irte.sid, irte.sq, irte.svt); 940 915 916 + entry = (struct IR_IO_APIC_route_entry *)route_entry; 941 917 memset(entry, 0, sizeof(*entry)); 942 918 943 919 entry->index2 = (index >> 15) & 0x1; ··· 1069 1043 struct intel_iommu *iommu; 1070 1044 int index; 1071 1045 1046 + down_read(&dmar_global_lock); 1072 1047 iommu = map_dev_to_ir(dev); 1073 1048 if (!iommu) { 1074 1049 printk(KERN_ERR 1075 1050 "Unable to map PCI %s to iommu\n", pci_name(dev)); 1076 - return -ENOENT; 1051 + index = -ENOENT; 1052 + } else { 1053 + index = alloc_irte(iommu, irq, nvec); 1054 + if (index < 0) { 1055 + printk(KERN_ERR 1056 + "Unable to allocate %d IRTE for PCI %s\n", 1057 + nvec, pci_name(dev)); 1058 + index = -ENOSPC; 1059 + } 1077 1060 } 1061 + up_read(&dmar_global_lock); 1078 1062 1079 - index = alloc_irte(iommu, irq, nvec); 1080 - if (index < 0) { 1081 - printk(KERN_ERR 1082 - "Unable to allocate %d IRTE for PCI %s\n", nvec, 1083 - pci_name(dev)); 1084 - return -ENOSPC; 1085 - } 1086 1063 return index; 1087 1064 } 1088 1065 ··· 1093 1064 int index, int sub_handle) 1094 1065 { 1095 1066 struct intel_iommu *iommu; 1067 + int ret = -ENOENT; 1096 1068 1069 + down_read(&dmar_global_lock); 1097 1070 iommu = map_dev_to_ir(pdev); 1098 - if (!iommu) 1099 - return -ENOENT; 1100 - /* 1101 - * setup the mapping between the irq and the IRTE 1102 - * base index, the sub_handle pointing to the 1103 - * appropriate interrupt remap table entry. 1104 - */ 1105 - set_irte_irq(irq, iommu, index, sub_handle); 1071 + if (iommu) { 1072 + /* 1073 + * setup the mapping between the irq and the IRTE 1074 + * base index, the sub_handle pointing to the 1075 + * appropriate interrupt remap table entry. 1076 + */ 1077 + set_irte_irq(irq, iommu, index, sub_handle); 1078 + ret = 0; 1079 + } 1080 + up_read(&dmar_global_lock); 1106 1081 1107 - return 0; 1082 + return ret; 1108 1083 } 1109 1084 1110 1085 static int intel_setup_hpet_msi(unsigned int irq, unsigned int id) 1111 1086 { 1112 - struct intel_iommu *iommu = map_hpet_to_ir(id); 1087 + int ret = -1; 1088 + struct intel_iommu *iommu; 1113 1089 int index; 1114 1090 1115 - if (!iommu) 1116 - return -1; 1091 + down_read(&dmar_global_lock); 1092 + iommu = map_hpet_to_ir(id); 1093 + if (iommu) { 1094 + index = alloc_irte(iommu, irq, 1); 1095 + if (index >= 0) 1096 + ret = 0; 1097 + } 1098 + up_read(&dmar_global_lock); 1117 1099 1118 - index = alloc_irte(iommu, irq, 1); 1119 - if (index < 0) 1120 - return -1; 1121 - 1122 - return 0; 1100 + return ret; 1123 1101 } 1124 1102 1125 1103 struct irq_remap_ops intel_irq_remap_ops = {
+58 -6
drivers/iommu/iova.c
··· 342 342 return 0; 343 343 } 344 344 345 + static inline struct iova * 346 + alloc_and_init_iova(unsigned long pfn_lo, unsigned long pfn_hi) 347 + { 348 + struct iova *iova; 349 + 350 + iova = alloc_iova_mem(); 351 + if (iova) { 352 + iova->pfn_lo = pfn_lo; 353 + iova->pfn_hi = pfn_hi; 354 + } 355 + 356 + return iova; 357 + } 358 + 345 359 static struct iova * 346 360 __insert_new_range(struct iova_domain *iovad, 347 361 unsigned long pfn_lo, unsigned long pfn_hi) 348 362 { 349 363 struct iova *iova; 350 364 351 - iova = alloc_iova_mem(); 352 - if (!iova) 353 - return iova; 365 + iova = alloc_and_init_iova(pfn_lo, pfn_hi); 366 + if (iova) 367 + iova_insert_rbtree(&iovad->rbroot, iova); 354 368 355 - iova->pfn_hi = pfn_hi; 356 - iova->pfn_lo = pfn_lo; 357 - iova_insert_rbtree(&iovad->rbroot, iova); 358 369 return iova; 359 370 } 360 371 ··· 443 432 iova->pfn_lo, iova->pfn_lo); 444 433 } 445 434 spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); 435 + } 436 + 437 + struct iova * 438 + split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, 439 + unsigned long pfn_lo, unsigned long pfn_hi) 440 + { 441 + unsigned long flags; 442 + struct iova *prev = NULL, *next = NULL; 443 + 444 + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); 445 + if (iova->pfn_lo < pfn_lo) { 446 + prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); 447 + if (prev == NULL) 448 + goto error; 449 + } 450 + if (iova->pfn_hi > pfn_hi) { 451 + next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); 452 + if (next == NULL) 453 + goto error; 454 + } 455 + 456 + __cached_rbnode_delete_update(iovad, iova); 457 + rb_erase(&iova->node, &iovad->rbroot); 458 + 459 + if (prev) { 460 + iova_insert_rbtree(&iovad->rbroot, prev); 461 + iova->pfn_lo = pfn_lo; 462 + } 463 + if (next) { 464 + iova_insert_rbtree(&iovad->rbroot, next); 465 + iova->pfn_hi = pfn_hi; 466 + } 467 + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 468 + 469 + return iova; 470 + 471 + error: 472 + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 473 + if (prev) 474 + free_iova_mem(prev); 475 + return NULL; 446 476 }
+104 -58
drivers/iommu/omap-iommu.c
··· 23 23 #include <linux/spinlock.h> 24 24 #include <linux/io.h> 25 25 #include <linux/pm_runtime.h> 26 + #include <linux/of.h> 27 + #include <linux/of_iommu.h> 28 + #include <linux/of_irq.h> 26 29 27 30 #include <asm/cacheflush.h> 28 31 ··· 149 146 struct platform_device *pdev = to_platform_device(obj->dev); 150 147 struct iommu_platform_data *pdata = pdev->dev.platform_data; 151 148 152 - if (!pdata) 153 - return -EINVAL; 154 - 155 149 if (!arch_iommu) 156 150 return -ENODEV; 157 151 158 - if (pdata->deassert_reset) { 152 + if (pdata && pdata->deassert_reset) { 159 153 err = pdata->deassert_reset(pdev, pdata->reset_name); 160 154 if (err) { 161 155 dev_err(obj->dev, "deassert_reset failed: %d\n", err); ··· 172 172 struct platform_device *pdev = to_platform_device(obj->dev); 173 173 struct iommu_platform_data *pdata = pdev->dev.platform_data; 174 174 175 - if (!pdata) 176 - return; 177 - 178 175 arch_iommu->disable(obj); 179 176 180 177 pm_runtime_put_sync(obj->dev); 181 178 182 - if (pdata->assert_reset) 179 + if (pdata && pdata->assert_reset) 183 180 pdata->assert_reset(pdev, pdata->reset_name); 184 181 } 185 182 ··· 520 523 static void iopte_free(u32 *iopte) 521 524 { 522 525 /* Note: freed iopte's must be clean ready for re-use */ 523 - kmem_cache_free(iopte_cachep, iopte); 526 + if (iopte) 527 + kmem_cache_free(iopte_cachep, iopte); 524 528 } 525 529 526 530 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da) ··· 861 863 **/ 862 864 static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) 863 865 { 864 - int err = -ENOMEM; 866 + int err; 865 867 struct device *dev; 866 868 struct omap_iommu *obj; 867 869 ··· 869 871 (void *)name, 870 872 device_match_by_alias); 871 873 if (!dev) 872 - return NULL; 874 + return ERR_PTR(-ENODEV); 873 875 874 876 obj = to_iommu(dev); 875 877 ··· 888 890 goto err_enable; 889 891 flush_iotlb_all(obj); 890 892 891 - if (!try_module_get(obj->owner)) 893 + if (!try_module_get(obj->owner)) { 894 + err = -ENODEV; 892 895 goto err_module; 896 + } 893 897 894 898 spin_unlock(&obj->iommu_lock); 895 899 ··· 940 940 struct omap_iommu *obj; 941 941 struct resource *res; 942 942 struct iommu_platform_data *pdata = pdev->dev.platform_data; 943 + struct device_node *of = pdev->dev.of_node; 943 944 944 - obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 945 + obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 945 946 if (!obj) 946 947 return -ENOMEM; 947 948 948 - obj->nr_tlb_entries = pdata->nr_tlb_entries; 949 - obj->name = pdata->name; 949 + if (of) { 950 + obj->name = dev_name(&pdev->dev); 951 + obj->nr_tlb_entries = 32; 952 + err = of_property_read_u32(of, "ti,#tlb-entries", 953 + &obj->nr_tlb_entries); 954 + if (err && err != -EINVAL) 955 + return err; 956 + if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8) 957 + return -EINVAL; 958 + /* 959 + * da_start and da_end are needed for omap-iovmm, so hardcode 960 + * these values as used by OMAP3 ISP - the only user for 961 + * omap-iovmm 962 + */ 963 + obj->da_start = 0; 964 + obj->da_end = 0xfffff000; 965 + if (of_find_property(of, "ti,iommu-bus-err-back", NULL)) 966 + obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN; 967 + } else { 968 + obj->nr_tlb_entries = pdata->nr_tlb_entries; 969 + obj->name = pdata->name; 970 + obj->da_start = pdata->da_start; 971 + obj->da_end = pdata->da_end; 972 + } 973 + if (obj->da_end <= obj->da_start) 974 + return -EINVAL; 975 + 950 976 obj->dev = &pdev->dev; 951 977 obj->ctx = (void *)obj + sizeof(*obj); 952 - obj->da_start = pdata->da_start; 953 - obj->da_end = pdata->da_end; 954 978 955 979 spin_lock_init(&obj->iommu_lock); 956 980 mutex_init(&obj->mmap_lock); ··· 982 958 INIT_LIST_HEAD(&obj->mmap); 983 959 984 960 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 985 - if (!res) { 986 - err = -ENODEV; 987 - goto err_mem; 988 - } 989 - 990 - res = request_mem_region(res->start, resource_size(res), 991 - dev_name(&pdev->dev)); 992 - if (!res) { 993 - err = -EIO; 994 - goto err_mem; 995 - } 996 - 997 - obj->regbase = ioremap(res->start, resource_size(res)); 998 - if (!obj->regbase) { 999 - err = -ENOMEM; 1000 - goto err_ioremap; 1001 - } 961 + obj->regbase = devm_ioremap_resource(obj->dev, res); 962 + if (IS_ERR(obj->regbase)) 963 + return PTR_ERR(obj->regbase); 1002 964 1003 965 irq = platform_get_irq(pdev, 0); 1004 - if (irq < 0) { 1005 - err = -ENODEV; 1006 - goto err_irq; 1007 - } 1008 - err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, 1009 - dev_name(&pdev->dev), obj); 966 + if (irq < 0) 967 + return -ENODEV; 968 + 969 + err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED, 970 + dev_name(obj->dev), obj); 1010 971 if (err < 0) 1011 - goto err_irq; 972 + return err; 1012 973 platform_set_drvdata(pdev, obj); 1013 974 1014 975 pm_runtime_irq_safe(obj->dev); ··· 1001 992 1002 993 dev_info(&pdev->dev, "%s registered\n", obj->name); 1003 994 return 0; 1004 - 1005 - err_irq: 1006 - iounmap(obj->regbase); 1007 - err_ioremap: 1008 - release_mem_region(res->start, resource_size(res)); 1009 - err_mem: 1010 - kfree(obj); 1011 - return err; 1012 995 } 1013 996 1014 997 static int omap_iommu_remove(struct platform_device *pdev) 1015 998 { 1016 - int irq; 1017 - struct resource *res; 1018 999 struct omap_iommu *obj = platform_get_drvdata(pdev); 1019 1000 1020 1001 iopgtable_clear_entry_all(obj); 1021 1002 1022 - irq = platform_get_irq(pdev, 0); 1023 - free_irq(irq, obj); 1024 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1025 - release_mem_region(res->start, resource_size(res)); 1026 - iounmap(obj->regbase); 1027 - 1028 1003 pm_runtime_disable(obj->dev); 1029 1004 1030 1005 dev_info(&pdev->dev, "%s removed\n", obj->name); 1031 - kfree(obj); 1032 1006 return 0; 1033 1007 } 1008 + 1009 + static struct of_device_id omap_iommu_of_match[] = { 1010 + { .compatible = "ti,omap2-iommu" }, 1011 + { .compatible = "ti,omap4-iommu" }, 1012 + { .compatible = "ti,dra7-iommu" }, 1013 + {}, 1014 + }; 1015 + MODULE_DEVICE_TABLE(of, omap_iommu_of_match); 1034 1016 1035 1017 static struct platform_driver omap_iommu_driver = { 1036 1018 .probe = omap_iommu_probe, 1037 1019 .remove = omap_iommu_remove, 1038 1020 .driver = { 1039 1021 .name = "omap-iommu", 1022 + .of_match_table = of_match_ptr(omap_iommu_of_match), 1040 1023 }, 1041 1024 }; 1042 1025 ··· 1254 1253 return 0; 1255 1254 } 1256 1255 1256 + static int omap_iommu_add_device(struct device *dev) 1257 + { 1258 + struct omap_iommu_arch_data *arch_data; 1259 + struct device_node *np; 1260 + 1261 + /* 1262 + * Allocate the archdata iommu structure for DT-based devices. 1263 + * 1264 + * TODO: Simplify this when removing non-DT support completely from the 1265 + * IOMMU users. 1266 + */ 1267 + if (!dev->of_node) 1268 + return 0; 1269 + 1270 + np = of_parse_phandle(dev->of_node, "iommus", 0); 1271 + if (!np) 1272 + return 0; 1273 + 1274 + arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL); 1275 + if (!arch_data) { 1276 + of_node_put(np); 1277 + return -ENOMEM; 1278 + } 1279 + 1280 + arch_data->name = kstrdup(dev_name(dev), GFP_KERNEL); 1281 + dev->archdata.iommu = arch_data; 1282 + 1283 + of_node_put(np); 1284 + 1285 + return 0; 1286 + } 1287 + 1288 + static void omap_iommu_remove_device(struct device *dev) 1289 + { 1290 + struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; 1291 + 1292 + if (!dev->of_node || !arch_data) 1293 + return; 1294 + 1295 + kfree(arch_data->name); 1296 + kfree(arch_data); 1297 + } 1298 + 1257 1299 static struct iommu_ops omap_iommu_ops = { 1258 1300 .domain_init = omap_iommu_domain_init, 1259 1301 .domain_destroy = omap_iommu_domain_destroy, ··· 1306 1262 .unmap = omap_iommu_unmap, 1307 1263 .iova_to_phys = omap_iommu_iova_to_phys, 1308 1264 .domain_has_cap = omap_iommu_domain_has_cap, 1265 + .add_device = omap_iommu_add_device, 1266 + .remove_device = omap_iommu_remove_device, 1309 1267 .pgsize_bitmap = OMAP_IOMMU_PGSIZES, 1310 1268 }; 1311 1269
+5
drivers/iommu/omap-iommu.h
··· 52 52 void *ctx; /* iommu context: registres saved area */ 53 53 u32 da_start; 54 54 u32 da_end; 55 + 56 + int has_bus_err_back; 55 57 }; 56 58 57 59 struct cr_regs { ··· 132 130 #define MMU_READ_CAM 0x68 133 131 #define MMU_READ_RAM 0x6c 134 132 #define MMU_EMU_FAULT_AD 0x70 133 + #define MMU_GP_REG 0x88 135 134 136 135 #define MMU_REG_SIZE 256 137 136 ··· 165 162 #define MMU_RAM_MIXED_SHIFT 6 166 163 #define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT) 167 164 #define MMU_RAM_MIXED MMU_RAM_MIXED_MASK 165 + 166 + #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 168 167 169 168 /* 170 169 * utilities for super page(16MB, 1MB, 64KB and 4KB)
+3
drivers/iommu/omap-iommu2.c
··· 98 98 99 99 iommu_write_reg(obj, pa, MMU_TTB); 100 100 101 + if (obj->has_bus_err_back) 102 + iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG); 103 + 101 104 __iommu_set_twl(obj, true); 102 105 103 106 return 0;
+13 -2
include/acpi/actbl2.h
··· 424 424 ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, 425 425 ACPI_DMAR_TYPE_ATSR = 2, 426 426 ACPI_DMAR_HARDWARE_AFFINITY = 3, 427 - ACPI_DMAR_TYPE_RESERVED = 4 /* 4 and greater are reserved */ 427 + ACPI_DMAR_TYPE_ANDD = 4, 428 + ACPI_DMAR_TYPE_RESERVED = 5 /* 5 and greater are reserved */ 428 429 }; 429 430 430 431 /* DMAR Device Scope structure */ ··· 446 445 ACPI_DMAR_SCOPE_TYPE_BRIDGE = 2, 447 446 ACPI_DMAR_SCOPE_TYPE_IOAPIC = 3, 448 447 ACPI_DMAR_SCOPE_TYPE_HPET = 4, 449 - ACPI_DMAR_SCOPE_TYPE_RESERVED = 5 /* 5 and greater are reserved */ 448 + ACPI_DMAR_SCOPE_TYPE_ACPI = 5, 449 + ACPI_DMAR_SCOPE_TYPE_RESERVED = 6 /* 6 and greater are reserved */ 450 450 }; 451 451 452 452 struct acpi_dmar_pci_path { ··· 507 505 u32 reserved; 508 506 u64 base_address; 509 507 u32 proximity_domain; 508 + }; 509 + 510 + /* 4: ACPI Namespace Device Declaration Structure */ 511 + 512 + struct acpi_dmar_andd { 513 + struct acpi_dmar_header header; 514 + u8 reserved[3]; 515 + u8 device_number; 516 + u8 object_name[]; 510 517 }; 511 518 512 519 /*******************************************************************************
+51 -31
include/linux/dmar.h
··· 25 25 #include <linux/types.h> 26 26 #include <linux/msi.h> 27 27 #include <linux/irqreturn.h> 28 + #include <linux/rwsem.h> 29 + #include <linux/rcupdate.h> 28 30 29 31 struct acpi_dmar_header; 30 32 ··· 36 34 37 35 struct intel_iommu; 38 36 37 + struct dmar_dev_scope { 38 + struct device __rcu *dev; 39 + u8 bus; 40 + u8 devfn; 41 + }; 42 + 39 43 #ifdef CONFIG_DMAR_TABLE 40 44 extern struct acpi_table_header *dmar_tbl; 41 45 struct dmar_drhd_unit { 42 46 struct list_head list; /* list of drhd units */ 43 47 struct acpi_dmar_header *hdr; /* ACPI header */ 44 48 u64 reg_base_addr; /* register base address*/ 45 - struct pci_dev **devices; /* target device array */ 49 + struct dmar_dev_scope *devices;/* target device array */ 46 50 int devices_cnt; /* target device count */ 47 51 u16 segment; /* PCI domain */ 48 52 u8 ignored:1; /* ignore drhd */ ··· 56 48 struct intel_iommu *iommu; 57 49 }; 58 50 51 + struct dmar_pci_notify_info { 52 + struct pci_dev *dev; 53 + unsigned long event; 54 + int bus; 55 + u16 seg; 56 + u16 level; 57 + struct acpi_dmar_pci_path path[]; 58 + } __attribute__((packed)); 59 + 60 + extern struct rw_semaphore dmar_global_lock; 59 61 extern struct list_head dmar_drhd_units; 60 62 61 63 #define for_each_drhd_unit(drhd) \ 62 - list_for_each_entry(drhd, &dmar_drhd_units, list) 64 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) 63 65 64 66 #define for_each_active_drhd_unit(drhd) \ 65 - list_for_each_entry(drhd, &dmar_drhd_units, list) \ 67 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ 66 68 if (drhd->ignored) {} else 67 69 68 70 #define for_each_active_iommu(i, drhd) \ 69 - list_for_each_entry(drhd, &dmar_drhd_units, list) \ 71 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ 70 72 if (i=drhd->iommu, drhd->ignored) {} else 71 73 72 74 #define for_each_iommu(i, drhd) \ 73 - list_for_each_entry(drhd, &dmar_drhd_units, list) \ 75 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \ 74 76 if (i=drhd->iommu, 0) {} else 77 + 78 + static inline bool dmar_rcu_check(void) 79 + { 80 + return rwsem_is_locked(&dmar_global_lock) || 81 + system_state == SYSTEM_BOOTING; 82 + } 83 + 84 + #define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check()) 85 + 86 + #define for_each_dev_scope(a, c, p, d) \ 87 + for ((p) = 0; ((d) = (p) < (c) ? dmar_rcu_dereference((a)[(p)].dev) : \ 88 + NULL, (p) < (c)); (p)++) 89 + 90 + #define for_each_active_dev_scope(a, c, p, d) \ 91 + for_each_dev_scope((a), (c), (p), (d)) if (!(d)) { continue; } else 75 92 76 93 extern int dmar_table_init(void); 77 94 extern int dmar_dev_scope_init(void); 78 95 extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, 79 - struct pci_dev ***devices, u16 segment); 80 - extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt); 81 - 96 + struct dmar_dev_scope **devices, u16 segment); 97 + extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); 98 + extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt); 99 + extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, 100 + void *start, void*end, u16 segment, 101 + struct dmar_dev_scope *devices, 102 + int devices_cnt); 103 + extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, 104 + u16 segment, struct dmar_dev_scope *devices, 105 + int count); 82 106 /* Intel IOMMU detection */ 83 107 extern int detect_intel_iommu(void); 84 108 extern int enable_drhd_fault_handling(void); 85 109 #else 110 + struct dmar_pci_notify_info; 86 111 static inline int detect_intel_iommu(void) 87 112 { 88 113 return -ENODEV; ··· 179 138 180 139 #ifdef CONFIG_INTEL_IOMMU 181 140 extern int iommu_detected, no_iommu; 182 - extern struct list_head dmar_rmrr_units; 183 - struct dmar_rmrr_unit { 184 - struct list_head list; /* list of rmrr units */ 185 - struct acpi_dmar_header *hdr; /* ACPI header */ 186 - u64 base_address; /* reserved base address*/ 187 - u64 end_address; /* reserved end address */ 188 - struct pci_dev **devices; /* target devices */ 189 - int devices_cnt; /* target device count */ 190 - }; 191 - 192 - #define for_each_rmrr_units(rmrr) \ 193 - list_for_each_entry(rmrr, &dmar_rmrr_units, list) 194 - 195 - struct dmar_atsr_unit { 196 - struct list_head list; /* list of ATSR units */ 197 - struct acpi_dmar_header *hdr; /* ACPI header */ 198 - struct pci_dev **devices; /* target devices */ 199 - int devices_cnt; /* target device count */ 200 - u8 include_all:1; /* include all ports */ 201 - }; 202 - 203 - int dmar_parse_rmrr_atsr_dev(void); 204 141 extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); 205 142 extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); 143 + extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); 206 144 extern int intel_iommu_init(void); 207 145 #else /* !CONFIG_INTEL_IOMMU: */ 208 146 static inline int intel_iommu_init(void) { return -ENODEV; } ··· 193 173 { 194 174 return 0; 195 175 } 196 - static inline int dmar_parse_rmrr_atsr_dev(void) 176 + static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) 197 177 { 198 178 return 0; 199 179 }
+1
include/linux/intel-iommu.h
··· 319 319 int agaw; /* agaw of this iommu */ 320 320 int msagaw; /* max sagaw of this iommu */ 321 321 unsigned int irq; 322 + u16 segment; /* PCI segment# */ 322 323 unsigned char name[13]; /* Device Name */ 323 324 324 325 #ifdef CONFIG_INTEL_IOMMU
+2
include/linux/iova.h
··· 47 47 void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); 48 48 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 49 49 void put_iova_domain(struct iova_domain *iovad); 50 + struct iova *split_and_remove_iova(struct iova_domain *iovad, 51 + struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); 50 52 51 53 #endif