Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU Updates from Joerg Roedel:
"A few patches have been queued up for this merge window:

- improvements for the ARM-SMMU driver (IOMMU_EXEC support, IOMMU
group support)
- updates and fixes for the shmobile IOMMU driver
- various fixes to generic IOMMU code and the Intel IOMMU driver
- some cleanups in IOMMU drivers (dev_is_pci() usage)"

* tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits)
iommu/vt-d: Fix signedness bug in alloc_irte()
iommu/vt-d: free all resources if failed to initialize DMARs
iommu/vt-d, trivial: clean sparse warnings
iommu/vt-d: fix wrong return value of dmar_table_init()
iommu/vt-d: release invalidation queue when destroying IOMMU unit
iommu/vt-d: fix access after free issue in function free_dmar_iommu()
iommu/vt-d: keep shared resources when failed to initialize iommu devices
iommu/vt-d: fix invalid memory access when freeing DMAR irq
iommu/vt-d, trivial: simplify code with existing macros
iommu/vt-d, trivial: use defined macro instead of hardcoding
iommu/vt-d: mark internal functions as static
iommu/vt-d, trivial: clean up unused code
iommu/vt-d, trivial: check suitable flag in function detect_intel_iommu()
iommu/vt-d, trivial: print correct domain id of static identity domain
iommu/vt-d, trivial: refine support of 64bit guest address
iommu/vt-d: fix resource leakage on error recovery path in iommu_init_domains()
iommu/vt-d: fix a race window in allocating domain ID for virtual machines
iommu/vt-d: fix PCI device reference leakage on error recovery path
drm/msm: Fix link error with !MSM_IOMMU
iommu/vt-d: use dedicated bitmap to track remapping entry allocation status
...

+279 -280
+1
drivers/gpu/drm/msm/Kconfig
··· 4 4 depends on DRM 5 5 depends on ARCH_MSM 6 6 depends on ARCH_MSM8960 7 + depends on MSM_IOMMU 7 8 select DRM_KMS_HELPER 8 9 select SHMEM 9 10 select TMPFS
+1
drivers/iommu/Kconfig
··· 207 207 bool "IOMMU for Renesas IPMMU/IPMMUI" 208 208 default n 209 209 depends on ARM 210 + depends on SH_MOBILE || COMPILE_TEST 210 211 select IOMMU_API 211 212 select ARM_DMA_USE_IOMMU 212 213 select SHMOBILE_IPMMU
+2 -2
drivers/iommu/amd_iommu.c
··· 248 248 if (!dev || !dev->dma_mask) 249 249 return false; 250 250 251 - /* No device or no PCI device */ 252 - if (dev->bus != &pci_bus_type) 251 + /* No PCI device */ 252 + if (!dev_is_pci(dev)) 253 253 return false; 254 254 255 255 devid = get_device_id(dev);
+26 -7
drivers/iommu/arm-smmu.c
··· 24 24 * - v7/v8 long-descriptor format 25 25 * - Non-secure access to the SMMU 26 26 * - 4k and 64k pages, with contiguous pte hints. 27 - * - Up to 39-bit addressing 27 + * - Up to 42-bit addressing (dependent on VA_BITS) 28 28 * - Context fault reporting 29 29 */ 30 30 ··· 61 61 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) 62 62 63 63 /* Page table bits */ 64 - #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) 64 + #define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) 65 65 #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) 66 66 #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) 67 67 #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) 68 68 #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) 69 69 #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) 70 + #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) 70 71 71 72 #if PAGE_SIZE == SZ_4K 72 73 #define ARM_SMMU_PTE_CONT_ENTRIES 16 ··· 1206 1205 unsigned long pfn, int flags, int stage) 1207 1206 { 1208 1207 pte_t *pte, *start; 1209 - pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; 1208 + pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; 1210 1209 1211 1210 if (pmd_none(*pmd)) { 1212 1211 /* Allocate a new set of tables */ ··· 1245 1244 } 1246 1245 1247 1246 /* If no access, create a faulting entry to avoid TLB fills */ 1248 - if (!(flags & (IOMMU_READ | IOMMU_WRITE))) 1247 + if (flags & IOMMU_EXEC) 1248 + pteval &= ~ARM_SMMU_PTE_XN; 1249 + else if (!(flags & (IOMMU_READ | IOMMU_WRITE))) 1249 1250 pteval &= ~ARM_SMMU_PTE_PAGE; 1250 1251 1251 1252 pteval |= ARM_SMMU_PTE_SH_IS; ··· 1497 1494 { 1498 1495 struct arm_smmu_device *child, *parent, *smmu; 1499 1496 struct arm_smmu_master *master = NULL; 1497 + struct iommu_group *group; 1498 + int ret; 1499 + 1500 + if (dev->archdata.iommu) { 1501 + dev_warn(dev, "IOMMU driver already assigned to device\n"); 1502 + return -EINVAL; 1503 + } 1500 1504 1501 1505 spin_lock(&arm_smmu_devices_lock); 1502 1506 list_for_each_entry(parent, &arm_smmu_devices, list) { ··· 1536 1526 if (!master) 1537 1527 return -ENODEV; 1538 1528 1529 + group = iommu_group_alloc(); 1530 + if (IS_ERR(group)) { 1531 + dev_err(dev, "Failed to allocate IOMMU group\n"); 1532 + return PTR_ERR(group); 1533 + } 1534 + 1535 + ret = iommu_group_add_device(group, dev); 1536 + iommu_group_put(group); 1539 1537 dev->archdata.iommu = smmu; 1540 - return 0; 1538 + 1539 + return ret; 1541 1540 } 1542 1541 1543 1542 static void arm_smmu_remove_device(struct device *dev) 1544 1543 { 1545 1544 dev->archdata.iommu = NULL; 1545 + iommu_group_remove_device(dev); 1546 1546 } 1547 1547 1548 1548 static struct iommu_ops arm_smmu_ops = { ··· 1750 1730 * allocation (PTRS_PER_PGD). 1751 1731 */ 1752 1732 #ifdef CONFIG_64BIT 1753 - /* Current maximum output size of 39 bits */ 1754 1733 smmu->s1_output_size = min(39UL, size); 1755 1734 #else 1756 1735 smmu->s1_output_size = min(32UL, size); ··· 1764 1745 } else { 1765 1746 #ifdef CONFIG_64BIT 1766 1747 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; 1767 - size = min(39, arm_smmu_id_size_to_bits(size)); 1748 + size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); 1768 1749 #else 1769 1750 size = 32; 1770 1751 #endif
+82 -53
drivers/iommu/dmar.c
··· 52 52 struct acpi_table_header * __initdata dmar_tbl; 53 53 static acpi_size dmar_tbl_size; 54 54 55 + static int alloc_iommu(struct dmar_drhd_unit *drhd); 56 + static void free_iommu(struct intel_iommu *iommu); 57 + 55 58 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 56 59 { 57 60 /* ··· 103 100 if (!pdev) { 104 101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", 105 102 segment, scope->bus, path->device, path->function); 106 - *dev = NULL; 107 103 return 0; 108 104 } 109 105 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ ··· 153 151 ret = dmar_parse_one_dev_scope(scope, 154 152 &(*devices)[index], segment); 155 153 if (ret) { 156 - kfree(*devices); 154 + dmar_free_dev_scope(devices, cnt); 157 155 return ret; 158 156 } 159 157 index ++; ··· 162 160 } 163 161 164 162 return 0; 163 + } 164 + 165 + void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt) 166 + { 167 + if (*devices && *cnt) { 168 + while (--*cnt >= 0) 169 + pci_dev_put((*devices)[*cnt]); 170 + kfree(*devices); 171 + *devices = NULL; 172 + *cnt = 0; 173 + } 165 174 } 166 175 167 176 /** ··· 206 193 return 0; 207 194 } 208 195 196 + static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) 197 + { 198 + if (dmaru->devices && dmaru->devices_cnt) 199 + dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt); 200 + if (dmaru->iommu) 201 + free_iommu(dmaru->iommu); 202 + kfree(dmaru); 203 + } 204 + 209 205 static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) 210 206 { 211 207 struct acpi_dmar_hardware_unit *drhd; 212 - int ret = 0; 213 208 214 209 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; 215 210 216 211 if (dmaru->include_all) 217 212 return 0; 218 213 219 - ret = dmar_parse_dev_scope((void *)(drhd + 1), 220 - ((void *)drhd) + drhd->header.length, 221 - &dmaru->devices_cnt, &dmaru->devices, 222 - drhd->segment); 223 - if (ret) { 224 - list_del(&dmaru->list); 225 - kfree(dmaru); 226 - } 227 - return ret; 214 + return dmar_parse_dev_scope((void *)(drhd + 1), 215 + ((void *)drhd) + drhd->header.length, 216 + &dmaru->devices_cnt, &dmaru->devices, 217 + drhd->segment); 228 218 } 229 219 230 220 #ifdef CONFIG_ACPI_NUMA ··· 439 423 int __init dmar_dev_scope_init(void) 440 424 { 441 425 static int dmar_dev_scope_initialized; 442 - struct dmar_drhd_unit *drhd, *drhd_n; 426 + struct dmar_drhd_unit *drhd; 443 427 int ret = -ENODEV; 444 428 445 429 if (dmar_dev_scope_initialized) ··· 448 432 if (list_empty(&dmar_drhd_units)) 449 433 goto fail; 450 434 451 - list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { 435 + list_for_each_entry(drhd, &dmar_drhd_units, list) { 452 436 ret = dmar_parse_dev(drhd); 453 437 if (ret) 454 438 goto fail; ··· 472 456 static int dmar_table_initialized; 473 457 int ret; 474 458 475 - if (dmar_table_initialized) 476 - return 0; 459 + if (dmar_table_initialized == 0) { 460 + ret = parse_dmar_table(); 461 + if (ret < 0) { 462 + if (ret != -ENODEV) 463 + pr_info("parse DMAR table failure.\n"); 464 + } else if (list_empty(&dmar_drhd_units)) { 465 + pr_info("No DMAR devices found\n"); 466 + ret = -ENODEV; 467 + } 477 468 478 - dmar_table_initialized = 1; 479 - 480 - ret = parse_dmar_table(); 481 - if (ret) { 482 - if (ret != -ENODEV) 483 - pr_info("parse DMAR table failure.\n"); 484 - return ret; 469 + if (ret < 0) 470 + dmar_table_initialized = ret; 471 + else 472 + dmar_table_initialized = 1; 485 473 } 486 474 487 - if (list_empty(&dmar_drhd_units)) { 488 - pr_info("No DMAR devices found\n"); 489 - return -ENODEV; 490 - } 491 - 492 - return 0; 475 + return dmar_table_initialized < 0 ? dmar_table_initialized : 0; 493 476 } 494 477 495 478 static void warn_invalid_dmar(u64 addr, const char *message) ··· 503 488 dmi_get_system_info(DMI_PRODUCT_VERSION)); 504 489 } 505 490 506 - int __init check_zero_address(void) 491 + static int __init check_zero_address(void) 507 492 { 508 493 struct acpi_table_dmar *dmar; 509 494 struct acpi_dmar_header *entry_header; ··· 561 546 if (ret) 562 547 ret = check_zero_address(); 563 548 { 564 - struct acpi_table_dmar *dmar; 565 - 566 - dmar = (struct acpi_table_dmar *) dmar_tbl; 567 - 568 - if (ret && irq_remapping_enabled && cpu_has_x2apic && 569 - dmar->flags & 0x1) 570 - pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); 571 - 572 549 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { 573 550 iommu_detected = 1; 574 551 /* Make sure ACS will be enabled */ ··· 572 565 x86_init.iommu.iommu_init = intel_iommu_init; 573 566 #endif 574 567 } 575 - early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 568 + early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); 576 569 dmar_tbl = NULL; 577 570 578 571 return ret ? 1 : -ENODEV; ··· 654 647 return err; 655 648 } 656 649 657 - int alloc_iommu(struct dmar_drhd_unit *drhd) 650 + static int alloc_iommu(struct dmar_drhd_unit *drhd) 658 651 { 659 652 struct intel_iommu *iommu; 660 653 u32 ver, sts; ··· 728 721 return err; 729 722 } 730 723 731 - void free_iommu(struct intel_iommu *iommu) 724 + static void free_iommu(struct intel_iommu *iommu) 732 725 { 733 - if (!iommu) 734 - return; 726 + if (iommu->irq) { 727 + free_irq(iommu->irq, iommu); 728 + irq_set_handler_data(iommu->irq, NULL); 729 + destroy_irq(iommu->irq); 730 + } 735 731 736 - free_dmar_iommu(iommu); 732 + if (iommu->qi) { 733 + free_page((unsigned long)iommu->qi->desc); 734 + kfree(iommu->qi->desc_status); 735 + kfree(iommu->qi); 736 + } 737 737 738 738 if (iommu->reg) 739 739 unmap_iommu(iommu); ··· 1064 1050 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); 1065 1051 if (!desc_page) { 1066 1052 kfree(qi); 1067 - iommu->qi = 0; 1053 + iommu->qi = NULL; 1068 1054 return -ENOMEM; 1069 1055 } 1070 1056 ··· 1074 1060 if (!qi->desc_status) { 1075 1061 free_page((unsigned long) qi->desc); 1076 1062 kfree(qi); 1077 - iommu->qi = 0; 1063 + iommu->qi = NULL; 1078 1064 return -ENOMEM; 1079 1065 } 1080 1066 ··· 1125 1111 "Blocked an interrupt request due to source-id verification failure", 1126 1112 }; 1127 1113 1128 - #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) 1129 - 1130 - const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) 1114 + static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) 1131 1115 { 1132 1116 if (fault_reason >= 0x20 && (fault_reason - 0x20 < 1133 1117 ARRAY_SIZE(irq_remap_fault_reasons))) { ··· 1315 1303 int __init enable_drhd_fault_handling(void) 1316 1304 { 1317 1305 struct dmar_drhd_unit *drhd; 1306 + struct intel_iommu *iommu; 1318 1307 1319 1308 /* 1320 1309 * Enable fault control interrupt. 1321 1310 */ 1322 - for_each_drhd_unit(drhd) { 1323 - int ret; 1324 - struct intel_iommu *iommu = drhd->iommu; 1311 + for_each_iommu(iommu, drhd) { 1325 1312 u32 fault_status; 1326 - ret = dmar_set_interrupt(iommu); 1313 + int ret = dmar_set_interrupt(iommu); 1327 1314 1328 1315 if (ret) { 1329 1316 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", ··· 1377 1366 return 0; 1378 1367 return dmar->flags & 0x1; 1379 1368 } 1369 + 1370 + static int __init dmar_free_unused_resources(void) 1371 + { 1372 + struct dmar_drhd_unit *dmaru, *dmaru_n; 1373 + 1374 + /* DMAR units are in use */ 1375 + if (irq_remapping_enabled || intel_iommu_enabled) 1376 + return 0; 1377 + 1378 + list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { 1379 + list_del(&dmaru->list); 1380 + dmar_free_drhd(dmaru); 1381 + } 1382 + 1383 + return 0; 1384 + } 1385 + 1386 + late_initcall(dmar_free_unused_resources); 1380 1387 IOMMU_INIT_POST(detect_intel_iommu);
+3 -3
drivers/iommu/fsl_pamu_domain.c
··· 691 691 * Use LIODN of the PCI controller while attaching a 692 692 * PCI device. 693 693 */ 694 - if (dev->bus == &pci_bus_type) { 694 + if (dev_is_pci(dev)) { 695 695 pdev = to_pci_dev(dev); 696 696 pci_ctl = pci_bus_to_host(pdev->bus); 697 697 /* ··· 729 729 * Use LIODN of the PCI controller while detaching a 730 730 * PCI device. 731 731 */ 732 - if (dev->bus == &pci_bus_type) { 732 + if (dev_is_pci(dev)) { 733 733 pdev = to_pci_dev(dev); 734 734 pci_ctl = pci_bus_to_host(pdev->bus); 735 735 /* ··· 1056 1056 * For platform devices we allocate a separate group for 1057 1057 * each of the devices. 1058 1058 */ 1059 - if (dev->bus == &pci_bus_type) { 1059 + if (dev_is_pci(dev)) { 1060 1060 pdev = to_pci_dev(dev); 1061 1061 /* Don't create device groups for virtual PCI bridges */ 1062 1062 if (pdev->subordinate)
+88 -128
drivers/iommu/intel-iommu.c
··· 63 63 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 64 64 65 65 #define MAX_AGAW_WIDTH 64 66 + #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT) 66 67 67 68 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) 68 69 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) ··· 107 106 108 107 static inline int agaw_to_width(int agaw) 109 108 { 110 - return 30 + agaw * LEVEL_STRIDE; 109 + return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH); 111 110 } 112 111 113 112 static inline int width_to_agaw(int width) 114 113 { 115 - return (width - 30) / LEVEL_STRIDE; 114 + return DIV_ROUND_UP(width - 30, LEVEL_STRIDE); 116 115 } 117 116 118 117 static inline unsigned int level_to_offset_bits(int level) ··· 142 141 143 142 static inline unsigned long lvl_to_nr_pages(unsigned int lvl) 144 143 { 145 - return 1 << ((lvl - 1) * LEVEL_STRIDE); 144 + return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH); 146 145 } 147 146 148 147 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things ··· 289 288 pte->val = 0; 290 289 } 291 290 292 - static inline void dma_set_pte_readable(struct dma_pte *pte) 293 - { 294 - pte->val |= DMA_PTE_READ; 295 - } 296 - 297 - static inline void dma_set_pte_writable(struct dma_pte *pte) 298 - { 299 - pte->val |= DMA_PTE_WRITE; 300 - } 301 - 302 - static inline void dma_set_pte_snp(struct dma_pte *pte) 303 - { 304 - pte->val |= DMA_PTE_SNP; 305 - } 306 - 307 - static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) 308 - { 309 - pte->val = (pte->val & ~3) | (prot & 3); 310 - } 311 - 312 291 static inline u64 dma_pte_addr(struct dma_pte *pte) 313 292 { 314 293 #ifdef CONFIG_64BIT ··· 297 316 /* Must have a full atomic 64-bit read */ 298 317 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; 299 318 #endif 300 - } 301 - 302 - static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn) 303 - { 304 - pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT; 305 319 } 306 320 307 321 static inline bool dma_pte_present(struct dma_pte *pte) ··· 382 406 383 407 static void flush_unmaps_timeout(unsigned long data); 384 408 385 - DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); 409 + static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); 386 410 387 411 #define HIGH_WATER_MARK 250 388 412 struct deferred_flush_tables { ··· 628 652 struct dmar_drhd_unit *drhd = NULL; 629 653 int i; 630 654 631 - for_each_drhd_unit(drhd) { 632 - if (drhd->ignored) 633 - continue; 655 + for_each_active_drhd_unit(drhd) { 634 656 if (segment != drhd->segment) 635 657 continue; 636 658 ··· 839 865 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 840 866 unsigned int large_page = 1; 841 867 struct dma_pte *first_pte, *pte; 842 - int order; 843 868 844 869 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 845 870 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); ··· 863 890 864 891 } while (start_pfn && start_pfn <= last_pfn); 865 892 866 - order = (large_page - 1) * 9; 867 - return order; 893 + return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH); 868 894 } 869 895 870 896 static void dma_pte_free_level(struct dmar_domain *domain, int level, ··· 1227 1255 unsigned long nlongs; 1228 1256 1229 1257 ndomains = cap_ndoms(iommu->cap); 1230 - pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id, 1231 - ndomains); 1258 + pr_debug("IOMMU%d: Number of Domains supported <%ld>\n", 1259 + iommu->seq_id, ndomains); 1232 1260 nlongs = BITS_TO_LONGS(ndomains); 1233 1261 1234 1262 spin_lock_init(&iommu->lock); ··· 1238 1266 */ 1239 1267 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); 1240 1268 if (!iommu->domain_ids) { 1241 - printk(KERN_ERR "Allocating domain id array failed\n"); 1269 + pr_err("IOMMU%d: allocating domain id array failed\n", 1270 + iommu->seq_id); 1242 1271 return -ENOMEM; 1243 1272 } 1244 1273 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), 1245 1274 GFP_KERNEL); 1246 1275 if (!iommu->domains) { 1247 - printk(KERN_ERR "Allocating domain array failed\n"); 1276 + pr_err("IOMMU%d: allocating domain array failed\n", 1277 + iommu->seq_id); 1278 + kfree(iommu->domain_ids); 1279 + iommu->domain_ids = NULL; 1248 1280 return -ENOMEM; 1249 1281 } 1250 1282 ··· 1265 1289 static void domain_exit(struct dmar_domain *domain); 1266 1290 static void vm_domain_exit(struct dmar_domain *domain); 1267 1291 1268 - void free_dmar_iommu(struct intel_iommu *iommu) 1292 + static void free_dmar_iommu(struct intel_iommu *iommu) 1269 1293 { 1270 1294 struct dmar_domain *domain; 1271 - int i; 1295 + int i, count; 1272 1296 unsigned long flags; 1273 1297 1274 1298 if ((iommu->domains) && (iommu->domain_ids)) { ··· 1277 1301 clear_bit(i, iommu->domain_ids); 1278 1302 1279 1303 spin_lock_irqsave(&domain->iommu_lock, flags); 1280 - if (--domain->iommu_count == 0) { 1304 + count = --domain->iommu_count; 1305 + spin_unlock_irqrestore(&domain->iommu_lock, flags); 1306 + if (count == 0) { 1281 1307 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) 1282 1308 vm_domain_exit(domain); 1283 1309 else 1284 1310 domain_exit(domain); 1285 1311 } 1286 - spin_unlock_irqrestore(&domain->iommu_lock, flags); 1287 1312 } 1288 1313 } 1289 1314 1290 1315 if (iommu->gcmd & DMA_GCMD_TE) 1291 1316 iommu_disable_translation(iommu); 1292 1317 1293 - if (iommu->irq) { 1294 - irq_set_handler_data(iommu->irq, NULL); 1295 - /* This will mask the irq */ 1296 - free_irq(iommu->irq, iommu); 1297 - destroy_irq(iommu->irq); 1298 - } 1299 - 1300 1318 kfree(iommu->domains); 1301 1319 kfree(iommu->domain_ids); 1320 + iommu->domains = NULL; 1321 + iommu->domain_ids = NULL; 1302 1322 1303 1323 g_iommus[iommu->seq_id] = NULL; 1304 1324 ··· 2217 2245 if (!si_domain) 2218 2246 return -EFAULT; 2219 2247 2220 - pr_debug("Identity mapping domain is domain %d\n", si_domain->id); 2221 - 2222 2248 for_each_active_iommu(iommu, drhd) { 2223 2249 ret = iommu_attach_domain(si_domain, iommu); 2224 2250 if (ret) { ··· 2231 2261 } 2232 2262 2233 2263 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2264 + pr_debug("IOMMU: identity mapping domain is domain %d\n", 2265 + si_domain->id); 2234 2266 2235 2267 if (hw) 2236 2268 return 0; ··· 2464 2492 goto error; 2465 2493 } 2466 2494 2467 - for_each_drhd_unit(drhd) { 2468 - if (drhd->ignored) 2469 - continue; 2470 - 2471 - iommu = drhd->iommu; 2495 + for_each_active_iommu(iommu, drhd) { 2472 2496 g_iommus[iommu->seq_id] = iommu; 2473 2497 2474 2498 ret = iommu_init_domains(iommu); ··· 2488 2520 /* 2489 2521 * Start from the sane iommu hardware state. 2490 2522 */ 2491 - for_each_drhd_unit(drhd) { 2492 - if (drhd->ignored) 2493 - continue; 2494 - 2495 - iommu = drhd->iommu; 2496 - 2523 + for_each_active_iommu(iommu, drhd) { 2497 2524 /* 2498 2525 * If the queued invalidation is already initialized by us 2499 2526 * (for example, while enabling interrupt-remapping) then ··· 2508 2545 dmar_disable_qi(iommu); 2509 2546 } 2510 2547 2511 - for_each_drhd_unit(drhd) { 2512 - if (drhd->ignored) 2513 - continue; 2514 - 2515 - iommu = drhd->iommu; 2516 - 2548 + for_each_active_iommu(iommu, drhd) { 2517 2549 if (dmar_enable_qi(iommu)) { 2518 2550 /* 2519 2551 * Queued Invalidate not enabled, use Register Based ··· 2591 2633 * global invalidate iotlb 2592 2634 * enable translation 2593 2635 */ 2594 - for_each_drhd_unit(drhd) { 2636 + for_each_iommu(iommu, drhd) { 2595 2637 if (drhd->ignored) { 2596 2638 /* 2597 2639 * we always have to disable PMRs or DMA may fail on 2598 2640 * this device 2599 2641 */ 2600 2642 if (force_on) 2601 - iommu_disable_protect_mem_regions(drhd->iommu); 2643 + iommu_disable_protect_mem_regions(iommu); 2602 2644 continue; 2603 2645 } 2604 - iommu = drhd->iommu; 2605 2646 2606 2647 iommu_flush_write_buffer(iommu); 2607 2648 ··· 2622 2665 2623 2666 return 0; 2624 2667 error: 2625 - for_each_drhd_unit(drhd) { 2626 - if (drhd->ignored) 2627 - continue; 2628 - iommu = drhd->iommu; 2629 - free_iommu(iommu); 2630 - } 2668 + for_each_active_iommu(iommu, drhd) 2669 + free_dmar_iommu(iommu); 2670 + kfree(deferred_flush); 2631 2671 kfree(g_iommus); 2632 2672 return ret; 2633 2673 } ··· 2712 2758 struct pci_dev *pdev; 2713 2759 int found; 2714 2760 2715 - if (unlikely(dev->bus != &pci_bus_type)) 2761 + if (unlikely(!dev_is_pci(dev))) 2716 2762 return 1; 2717 2763 2718 2764 pdev = to_pci_dev(dev); ··· 3272 3318 } 3273 3319 } 3274 3320 3275 - for_each_drhd_unit(drhd) { 3321 + for_each_active_drhd_unit(drhd) { 3276 3322 int i; 3277 - if (drhd->ignored || drhd->include_all) 3323 + if (drhd->include_all) 3278 3324 continue; 3279 3325 3280 3326 for (i = 0; i < drhd->devices_cnt; i++) ··· 3468 3514 rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) 3469 3515 { 3470 3516 struct acpi_dmar_reserved_memory *rmrr; 3471 - int ret; 3472 3517 3473 3518 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; 3474 - ret = dmar_parse_dev_scope((void *)(rmrr + 1), 3475 - ((void *)rmrr) + rmrr->header.length, 3476 - &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); 3477 - 3478 - if (ret || (rmrru->devices_cnt == 0)) { 3479 - list_del(&rmrru->list); 3480 - kfree(rmrru); 3481 - } 3482 - return ret; 3519 + return dmar_parse_dev_scope((void *)(rmrr + 1), 3520 + ((void *)rmrr) + rmrr->header.length, 3521 + &rmrru->devices_cnt, &rmrru->devices, 3522 + rmrr->segment); 3483 3523 } 3484 3524 3485 3525 static LIST_HEAD(dmar_atsr_units); ··· 3498 3550 3499 3551 static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) 3500 3552 { 3501 - int rc; 3502 3553 struct acpi_dmar_atsr *atsr; 3503 3554 3504 3555 if (atsru->include_all) 3505 3556 return 0; 3506 3557 3507 3558 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 3508 - rc = dmar_parse_dev_scope((void *)(atsr + 1), 3509 - (void *)atsr + atsr->header.length, 3510 - &atsru->devices_cnt, &atsru->devices, 3511 - atsr->segment); 3512 - if (rc || !atsru->devices_cnt) { 3513 - list_del(&atsru->list); 3514 - kfree(atsru); 3559 + return dmar_parse_dev_scope((void *)(atsr + 1), 3560 + (void *)atsr + atsr->header.length, 3561 + &atsru->devices_cnt, &atsru->devices, 3562 + atsr->segment); 3563 + } 3564 + 3565 + static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru) 3566 + { 3567 + dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); 3568 + kfree(atsru); 3569 + } 3570 + 3571 + static void intel_iommu_free_dmars(void) 3572 + { 3573 + struct dmar_rmrr_unit *rmrru, *rmrr_n; 3574 + struct dmar_atsr_unit *atsru, *atsr_n; 3575 + 3576 + list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { 3577 + list_del(&rmrru->list); 3578 + dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); 3579 + kfree(rmrru); 3515 3580 } 3516 3581 3517 - return rc; 3582 + list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) { 3583 + list_del(&atsru->list); 3584 + intel_iommu_free_atsr(atsru); 3585 + } 3518 3586 } 3519 3587 3520 3588 int dmar_find_matched_atsr_unit(struct pci_dev *dev) ··· 3574 3610 3575 3611 int __init dmar_parse_rmrr_atsr_dev(void) 3576 3612 { 3577 - struct dmar_rmrr_unit *rmrr, *rmrr_n; 3578 - struct dmar_atsr_unit *atsr, *atsr_n; 3613 + struct dmar_rmrr_unit *rmrr; 3614 + struct dmar_atsr_unit *atsr; 3579 3615 int ret = 0; 3580 3616 3581 - list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { 3617 + list_for_each_entry(rmrr, &dmar_rmrr_units, list) { 3582 3618 ret = rmrr_parse_dev(rmrr); 3583 3619 if (ret) 3584 3620 return ret; 3585 3621 } 3586 3622 3587 - list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { 3623 + list_for_each_entry(atsr, &dmar_atsr_units, list) { 3588 3624 ret = atsr_parse_dev(atsr); 3589 3625 if (ret) 3590 3626 return ret; ··· 3631 3667 3632 3668 int __init intel_iommu_init(void) 3633 3669 { 3634 - int ret = 0; 3670 + int ret = -ENODEV; 3635 3671 struct dmar_drhd_unit *drhd; 3672 + struct intel_iommu *iommu; 3636 3673 3637 3674 /* VT-d is required for a TXT/tboot launch, so enforce that */ 3638 3675 force_on = tboot_force_iommu(); ··· 3641 3676 if (dmar_table_init()) { 3642 3677 if (force_on) 3643 3678 panic("tboot: Failed to initialize DMAR table\n"); 3644 - return -ENODEV; 3679 + goto out_free_dmar; 3645 3680 } 3646 3681 3647 3682 /* 3648 3683 * Disable translation if already enabled prior to OS handover. 3649 3684 */ 3650 - for_each_drhd_unit(drhd) { 3651 - struct intel_iommu *iommu; 3652 - 3653 - if (drhd->ignored) 3654 - continue; 3655 - 3656 - iommu = drhd->iommu; 3685 + for_each_active_iommu(iommu, drhd) 3657 3686 if (iommu->gcmd & DMA_GCMD_TE) 3658 3687 iommu_disable_translation(iommu); 3659 - } 3660 3688 3661 3689 if (dmar_dev_scope_init() < 0) { 3662 3690 if (force_on) 3663 3691 panic("tboot: Failed to initialize DMAR device scope\n"); 3664 - return -ENODEV; 3692 + goto out_free_dmar; 3665 3693 } 3666 3694 3667 3695 if (no_iommu || dmar_disabled) 3668 - return -ENODEV; 3696 + goto out_free_dmar; 3669 3697 3670 3698 if (iommu_init_mempool()) { 3671 3699 if (force_on) 3672 3700 panic("tboot: Failed to initialize iommu memory\n"); 3673 - return -ENODEV; 3701 + goto out_free_dmar; 3674 3702 } 3675 3703 3676 3704 if (list_empty(&dmar_rmrr_units)) ··· 3675 3717 if (dmar_init_reserved_ranges()) { 3676 3718 if (force_on) 3677 3719 panic("tboot: Failed to reserve iommu ranges\n"); 3678 - return -ENODEV; 3720 + goto out_free_mempool; 3679 3721 } 3680 3722 3681 3723 init_no_remapping_devices(); ··· 3685 3727 if (force_on) 3686 3728 panic("tboot: Failed to initialize DMARs\n"); 3687 3729 printk(KERN_ERR "IOMMU: dmar init failed\n"); 3688 - put_iova_domain(&reserved_iova_list); 3689 - iommu_exit_mempool(); 3690 - return ret; 3730 + goto out_free_reserved_range; 3691 3731 } 3692 3732 printk(KERN_INFO 3693 3733 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); ··· 3705 3749 intel_iommu_enabled = 1; 3706 3750 3707 3751 return 0; 3752 + 3753 + out_free_reserved_range: 3754 + put_iova_domain(&reserved_iova_list); 3755 + out_free_mempool: 3756 + iommu_exit_mempool(); 3757 + out_free_dmar: 3758 + intel_iommu_free_dmars(); 3759 + return ret; 3708 3760 } 3709 3761 3710 3762 static void iommu_detach_dependent_devices(struct intel_iommu *iommu, ··· 3841 3877 } 3842 3878 3843 3879 /* domain id for virtual machine, it won't be set in context */ 3844 - static unsigned long vm_domid; 3880 + static atomic_t vm_domid = ATOMIC_INIT(0); 3845 3881 3846 3882 static struct dmar_domain *iommu_alloc_vm_domain(void) 3847 3883 { ··· 3851 3887 if (!domain) 3852 3888 return NULL; 3853 3889 3854 - domain->id = vm_domid++; 3890 + domain->id = atomic_inc_return(&vm_domid); 3855 3891 domain->nid = -1; 3856 3892 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); 3857 3893 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; ··· 3898 3934 unsigned long i; 3899 3935 unsigned long ndomains; 3900 3936 3901 - for_each_drhd_unit(drhd) { 3902 - if (drhd->ignored) 3903 - continue; 3904 - iommu = drhd->iommu; 3905 - 3937 + for_each_active_iommu(iommu, drhd) { 3906 3938 ndomains = cap_ndoms(iommu->cap); 3907 3939 for_each_set_bit(i, iommu->domain_ids, ndomains) { 3908 3940 if (iommu->domains[i] == domain) {
+45 -60
drivers/iommu/intel_irq_remapping.c
··· 40 40 41 41 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); 42 42 43 + static int __init parse_ioapics_under_ir(void); 44 + 43 45 static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 44 46 { 45 47 struct irq_cfg *cfg = irq_get_chip_data(irq); 46 48 return cfg ? &cfg->irq_2_iommu : NULL; 47 49 } 48 50 49 - int get_irte(int irq, struct irte *entry) 51 + static int get_irte(int irq, struct irte *entry) 50 52 { 51 53 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 52 54 unsigned long flags; ··· 71 69 struct ir_table *table = iommu->ir_table; 72 70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 73 71 struct irq_cfg *cfg = irq_get_chip_data(irq); 74 - u16 index, start_index; 75 72 unsigned int mask = 0; 76 73 unsigned long flags; 77 - int i; 74 + int index; 78 75 79 76 if (!count || !irq_iommu) 80 77 return -1; 81 - 82 - /* 83 - * start the IRTE search from index 0. 84 - */ 85 - index = start_index = 0; 86 78 87 79 if (count > 1) { 88 80 count = __roundup_pow_of_two(count); ··· 92 96 } 93 97 94 98 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 95 - do { 96 - for (i = index; i < index + count; i++) 97 - if (table->base[i].present) 98 - break; 99 - /* empty index found */ 100 - if (i == index + count) 101 - break; 102 - 103 - index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 104 - 105 - if (index == start_index) { 106 - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 107 - printk(KERN_ERR "can't allocate an IRTE\n"); 108 - return -1; 109 - } 110 - } while (1); 111 - 112 - for (i = index; i < index + count; i++) 113 - table->base[i].present = 1; 114 - 115 - cfg->remapped = 1; 116 - irq_iommu->iommu = iommu; 117 - irq_iommu->irte_index = index; 118 - irq_iommu->sub_handle = 0; 119 - irq_iommu->irte_mask = mask; 120 - 99 + index = bitmap_find_free_region(table->bitmap, 100 + INTR_REMAP_TABLE_ENTRIES, mask); 101 + if (index < 0) { 102 + pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); 103 + } else { 104 + cfg->remapped = 1; 105 + irq_iommu->iommu = iommu; 106 + irq_iommu->irte_index = index; 107 + irq_iommu->sub_handle = 0; 108 + irq_iommu->irte_mask = mask; 109 + } 121 110 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 122 111 123 112 return index; ··· 235 254 set_64bit(&entry->low, 0); 236 255 set_64bit(&entry->high, 0); 237 256 } 257 + bitmap_release_region(iommu->ir_table->bitmap, index, 258 + irq_iommu->irte_mask); 238 259 239 260 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); 240 261 } ··· 319 336 return -1; 320 337 } 321 338 322 - set_irte_sid(irte, 1, 0, sid); 339 + set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid); 323 340 324 341 return 0; 325 342 } ··· 436 453 { 437 454 struct ir_table *ir_table; 438 455 struct page *pages; 456 + unsigned long *bitmap; 439 457 440 458 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 441 459 GFP_ATOMIC); ··· 448 464 INTR_REMAP_PAGE_ORDER); 449 465 450 466 if (!pages) { 451 - printk(KERN_ERR "failed to allocate pages of order %d\n", 452 - INTR_REMAP_PAGE_ORDER); 467 + pr_err("IR%d: failed to allocate pages of order %d\n", 468 + iommu->seq_id, INTR_REMAP_PAGE_ORDER); 453 469 kfree(iommu->ir_table); 454 470 return -ENOMEM; 455 471 } 456 472 473 + bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), 474 + sizeof(long), GFP_ATOMIC); 475 + if (bitmap == NULL) { 476 + pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); 477 + __free_pages(pages, INTR_REMAP_PAGE_ORDER); 478 + kfree(ir_table); 479 + return -ENOMEM; 480 + } 481 + 457 482 ir_table->base = page_address(pages); 483 + ir_table->bitmap = bitmap; 458 484 459 485 iommu_set_irq_remapping(iommu, mode); 460 486 return 0; ··· 515 521 static int __init intel_irq_remapping_supported(void) 516 522 { 517 523 struct dmar_drhd_unit *drhd; 524 + struct intel_iommu *iommu; 518 525 519 526 if (disable_irq_remap) 520 527 return 0; ··· 534 539 if (!dmar_ir_support()) 535 540 return 0; 536 541 537 - for_each_drhd_unit(drhd) { 538 - struct intel_iommu *iommu = drhd->iommu; 539 - 542 + for_each_iommu(iommu, drhd) 540 543 if (!ecap_ir_support(iommu->ecap)) 541 544 return 0; 542 - } 543 545 544 546 return 1; 545 547 } ··· 544 552 static int __init intel_enable_irq_remapping(void) 545 553 { 546 554 struct dmar_drhd_unit *drhd; 555 + struct intel_iommu *iommu; 547 556 bool x2apic_present; 548 557 int setup = 0; 549 558 int eim = 0; ··· 557 564 } 558 565 559 566 if (x2apic_present) { 567 + pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); 568 + 560 569 eim = !dmar_x2apic_optout(); 561 570 if (!eim) 562 571 printk(KERN_WARNING ··· 567 572 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); 568 573 } 569 574 570 - for_each_drhd_unit(drhd) { 571 - struct intel_iommu *iommu = drhd->iommu; 572 - 575 + for_each_iommu(iommu, drhd) { 573 576 /* 574 577 * If the queued invalidation is already initialized, 575 578 * shouldn't disable it. ··· 592 599 /* 593 600 * check for the Interrupt-remapping support 594 601 */ 595 - for_each_drhd_unit(drhd) { 596 - struct intel_iommu *iommu = drhd->iommu; 597 - 602 + for_each_iommu(iommu, drhd) { 598 603 if (!ecap_ir_support(iommu->ecap)) 599 604 continue; 600 605 ··· 606 615 /* 607 616 * Enable queued invalidation for all the DRHD's. 608 617 */ 609 - for_each_drhd_unit(drhd) { 610 - int ret; 611 - struct intel_iommu *iommu = drhd->iommu; 612 - ret = dmar_enable_qi(iommu); 618 + for_each_iommu(iommu, drhd) { 619 + int ret = dmar_enable_qi(iommu); 613 620 614 621 if (ret) { 615 622 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " ··· 620 631 /* 621 632 * Setup Interrupt-remapping for all the DRHD's now. 622 633 */ 623 - for_each_drhd_unit(drhd) { 624 - struct intel_iommu *iommu = drhd->iommu; 625 - 634 + for_each_iommu(iommu, drhd) { 626 635 if (!ecap_ir_support(iommu->ecap)) 627 636 continue; 628 637 ··· 761 774 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 762 775 * hardware unit. 763 776 */ 764 - int __init parse_ioapics_under_ir(void) 777 + static int __init parse_ioapics_under_ir(void) 765 778 { 766 779 struct dmar_drhd_unit *drhd; 780 + struct intel_iommu *iommu; 767 781 int ir_supported = 0; 768 782 int ioapic_idx; 769 783 770 - for_each_drhd_unit(drhd) { 771 - struct intel_iommu *iommu = drhd->iommu; 772 - 784 + for_each_iommu(iommu, drhd) 773 785 if (ecap_ir_support(iommu->ecap)) { 774 786 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) 775 787 return -1; 776 788 777 789 ir_supported = 1; 778 790 } 779 - } 780 791 781 792 if (!ir_supported) 782 793 return 0; ··· 792 807 return 1; 793 808 } 794 809 795 - int __init ir_dev_scope_init(void) 810 + static int __init ir_dev_scope_init(void) 796 811 { 797 812 if (!irq_remapping_enabled) 798 813 return 0;
+3 -3
drivers/iommu/irq_remapping.c
··· 150 150 return do_setup_msix_irqs(dev, nvec); 151 151 } 152 152 153 - void eoi_ioapic_pin_remapped(int apic, int pin, int vector) 153 + static void eoi_ioapic_pin_remapped(int apic, int pin, int vector) 154 154 { 155 155 /* 156 156 * Intr-remapping uses pin number as the virtual vector ··· 295 295 vector, attr); 296 296 } 297 297 298 - int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, 299 - bool force) 298 + static int set_remapped_irq_affinity(struct irq_data *data, 299 + const struct cpumask *mask, bool force) 300 300 { 301 301 if (!config_enabled(CONFIG_SMP) || !remap_ops || 302 302 !remap_ops->set_affinity)
+1
drivers/iommu/of_iommu.c
··· 20 20 #include <linux/export.h> 21 21 #include <linux/limits.h> 22 22 #include <linux/of.h> 23 + #include <linux/of_iommu.h> 23 24 24 25 /** 25 26 * of_get_dma_window - Parse *dma-window property and returns 0 if found.
+1 -2
drivers/iommu/shmobile-iommu.c
··· 380 380 kmem_cache_destroy(l1cache); 381 381 return -ENOMEM; 382 382 } 383 - archdata = kmalloc(sizeof(*archdata), GFP_KERNEL); 383 + archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); 384 384 if (!archdata) { 385 385 kmem_cache_destroy(l1cache); 386 386 kmem_cache_destroy(l2cache); 387 387 return -ENOMEM; 388 388 } 389 389 spin_lock_init(&archdata->attach_lock); 390 - archdata->attached = NULL; 391 390 archdata->ipmmu = ipmmu; 392 391 ipmmu_archdata = archdata; 393 392 bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
+5 -5
drivers/iommu/shmobile-ipmmu.c
··· 35 35 if (!ipmmu) 36 36 return; 37 37 38 - mutex_lock(&ipmmu->flush_lock); 38 + spin_lock(&ipmmu->flush_lock); 39 39 if (ipmmu->tlb_enabled) 40 40 ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN); 41 41 else 42 42 ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH); 43 - mutex_unlock(&ipmmu->flush_lock); 43 + spin_unlock(&ipmmu->flush_lock); 44 44 } 45 45 46 46 void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, ··· 49 49 if (!ipmmu) 50 50 return; 51 51 52 - mutex_lock(&ipmmu->flush_lock); 52 + spin_lock(&ipmmu->flush_lock); 53 53 switch (size) { 54 54 default: 55 55 ipmmu->tlb_enabled = 0; ··· 85 85 } 86 86 ipmmu_reg_write(ipmmu, IMTTBR, phys); 87 87 ipmmu_reg_write(ipmmu, IMASID, asid); 88 - mutex_unlock(&ipmmu->flush_lock); 88 + spin_unlock(&ipmmu->flush_lock); 89 89 } 90 90 91 91 static int ipmmu_probe(struct platform_device *pdev) ··· 104 104 dev_err(&pdev->dev, "cannot allocate device data\n"); 105 105 return -ENOMEM; 106 106 } 107 - mutex_init(&ipmmu->flush_lock); 107 + spin_lock_init(&ipmmu->flush_lock); 108 108 ipmmu->dev = &pdev->dev; 109 109 ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, 110 110 resource_size(res));
+1 -1
drivers/iommu/shmobile-ipmmu.h
··· 14 14 struct device *dev; 15 15 void __iomem *ipmmu_base; 16 16 int tlb_enabled; 17 - struct mutex flush_lock; 17 + spinlock_t flush_lock; 18 18 const char * const *dev_names; 19 19 unsigned int num_dev_names; 20 20 };
-4
include/linux/dma_remapping.h
··· 27 27 28 28 29 29 #ifdef CONFIG_INTEL_IOMMU 30 - extern void free_dmar_iommu(struct intel_iommu *iommu); 31 30 extern int iommu_calculate_agaw(struct intel_iommu *iommu); 32 31 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); 33 32 extern int dmar_disabled; ··· 39 40 static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) 40 41 { 41 42 return 0; 42 - } 43 - static inline void free_dmar_iommu(struct intel_iommu *iommu) 44 - { 45 43 } 46 44 #define dmar_disabled (1) 47 45 #define intel_iommu_enabled (0)
+8 -5
include/linux/dmar.h
··· 33 33 #define DMAR_X2APIC_OPT_OUT 0x2 34 34 35 35 struct intel_iommu; 36 + 36 37 #ifdef CONFIG_DMAR_TABLE 37 38 extern struct acpi_table_header *dmar_tbl; 38 39 struct dmar_drhd_unit { ··· 53 52 #define for_each_drhd_unit(drhd) \ 54 53 list_for_each_entry(drhd, &dmar_drhd_units, list) 55 54 55 + #define for_each_active_drhd_unit(drhd) \ 56 + list_for_each_entry(drhd, &dmar_drhd_units, list) \ 57 + if (drhd->ignored) {} else 58 + 56 59 #define for_each_active_iommu(i, drhd) \ 57 60 list_for_each_entry(drhd, &dmar_drhd_units, list) \ 58 61 if (i=drhd->iommu, drhd->ignored) {} else ··· 67 62 68 63 extern int dmar_table_init(void); 69 64 extern int dmar_dev_scope_init(void); 65 + extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, 66 + struct pci_dev ***devices, u16 segment); 67 + extern void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt); 70 68 71 69 /* Intel IOMMU detection */ 72 70 extern int detect_intel_iommu(void); 73 71 extern int enable_drhd_fault_handling(void); 74 - 75 - extern int parse_ioapics_under_ir(void); 76 - extern int alloc_iommu(struct dmar_drhd_unit *); 77 72 #else 78 73 static inline int detect_intel_iommu(void) 79 74 { ··· 162 157 int dmar_parse_rmrr_atsr_dev(void); 163 158 extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); 164 159 extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); 165 - extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, 166 - struct pci_dev ***devices, u16 segment); 167 160 extern int intel_iommu_init(void); 168 161 #else /* !CONFIG_INTEL_IOMMU: */ 169 162 static inline int intel_iommu_init(void) { return -ENODEV; }
+1 -2
include/linux/intel-iommu.h
··· 288 288 289 289 struct ir_table { 290 290 struct irte *base; 291 + unsigned long *bitmap; 291 292 }; 292 293 #endif 293 294 ··· 348 347 extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); 349 348 extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); 350 349 351 - extern int alloc_iommu(struct dmar_drhd_unit *drhd); 352 - extern void free_iommu(struct intel_iommu *iommu); 353 350 extern int dmar_enable_qi(struct intel_iommu *iommu); 354 351 extern void dmar_disable_qi(struct intel_iommu *iommu); 355 352 extern int dmar_reenable_qi(struct intel_iommu *iommu);
+11 -5
include/linux/iommu.h
··· 24 24 #include <linux/types.h> 25 25 #include <trace/events/iommu.h> 26 26 27 - #define IOMMU_READ (1) 28 - #define IOMMU_WRITE (2) 29 - #define IOMMU_CACHE (4) /* DMA cache coherency */ 27 + #define IOMMU_READ (1 << 0) 28 + #define IOMMU_WRITE (1 << 1) 29 + #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 30 + #define IOMMU_EXEC (1 << 3) 30 31 31 32 struct iommu_ops; 32 33 struct iommu_group; ··· 248 247 return NULL; 249 248 } 250 249 250 + static inline struct iommu_group *iommu_group_get_by_id(int id) 251 + { 252 + return NULL; 253 + } 254 + 251 255 static inline void iommu_domain_free(struct iommu_domain *domain) 252 256 { 253 257 } ··· 297 291 return 0; 298 292 } 299 293 300 - static inline int domain_has_cap(struct iommu_domain *domain, 301 - unsigned long cap) 294 + static inline int iommu_domain_has_cap(struct iommu_domain *domain, 295 + unsigned long cap) 302 296 { 303 297 return 0; 304 298 }