Merge branch 'amd-iommu/2.6.36' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent

+69 -23
+6
arch/x86/include/asm/amd_iommu_proto.h
··· 38 38 39 39 #endif /* !CONFIG_AMD_IOMMU_STATS */ 40 40 41 + static inline bool is_rd890_iommu(struct pci_dev *pdev) 42 + { 43 + return (pdev->vendor == PCI_VENDOR_ID_ATI) && 44 + (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); 45 + } 46 + 41 47 #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */
+12
arch/x86/include/asm/amd_iommu_types.h
··· 368 368 /* capabilities of that IOMMU read from ACPI */ 369 369 u32 cap; 370 370 371 + /* flags read from acpi table */ 372 + u8 acpi_flags; 373 + 371 374 /* 372 375 * Capability pointer. There could be more than one IOMMU per PCI 373 376 * device function if there are more than one AMD IOMMU capability ··· 414 411 415 412 /* default dma_ops domain for that IOMMU */ 416 413 struct dma_ops_domain *default_dom; 414 + 415 + /* 416 + * This array is required to work around a potential BIOS bug. 417 + * The BIOS may miss to restore parts of the PCI configuration 418 + * space when the system resumes from S3. The result is that the 419 + * IOMMU does not execute commands anymore which leads to system 420 + * failure. 421 + */ 422 + u32 cache_cfg[4]; 417 423 }; 418 424 419 425 /*
+3 -1
arch/x86/kernel/amd_iommu.c
··· 1953 1953 size_t size, 1954 1954 int dir) 1955 1955 { 1956 + dma_addr_t flush_addr; 1956 1957 dma_addr_t i, start; 1957 1958 unsigned int pages; 1958 1959 ··· 1961 1960 (dma_addr + size > dma_dom->aperture_size)) 1962 1961 return; 1963 1962 1963 + flush_addr = dma_addr; 1964 1964 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 1965 dma_addr &= PAGE_MASK; 1966 1966 start = dma_addr; ··· 1976 1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1977 1975 1978 1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1979 - iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1977 + iommu_flush_pages(&dma_dom->domain, flush_addr, size); 1980 1978 dma_dom->need_flush = false; 1981 1979 } 1982 1980 }
+45 -22
arch/x86/kernel/amd_iommu_init.c
··· 632 632 iommu->last_device = calc_devid(MMIO_GET_BUS(range), 633 633 MMIO_GET_LD(range)); 634 634 iommu->evt_msi_num = MMIO_MSI_NUM(misc); 635 + 636 + if (is_rd890_iommu(iommu->dev)) { 637 + pci_read_config_dword(iommu->dev, 0xf0, &iommu->cache_cfg[0]); 638 + pci_read_config_dword(iommu->dev, 0xf4, &iommu->cache_cfg[1]); 639 + pci_read_config_dword(iommu->dev, 0xf8, &iommu->cache_cfg[2]); 640 + pci_read_config_dword(iommu->dev, 0xfc, &iommu->cache_cfg[3]); 641 + } 635 642 } 636 643 637 644 /* ··· 656 649 struct ivhd_entry *e; 657 650 658 651 /* 659 - * First set the recommended feature enable bits from ACPI 660 - * into the IOMMU control registers 652 + * First save the recommended feature enable bits from ACPI 661 653 */ 662 - h->flags & IVHD_FLAG_HT_TUN_EN_MASK ? 663 - iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 664 - iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 665 - 666 - h->flags & IVHD_FLAG_PASSPW_EN_MASK ? 667 - iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 668 - iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 669 - 670 - h->flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 671 - iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 672 - iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 673 - 674 - h->flags & IVHD_FLAG_ISOC_EN_MASK ? 675 - iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 676 - iommu_feature_disable(iommu, CONTROL_ISOC_EN); 677 - 678 - /* 679 - * make IOMMU memory accesses cache coherent 680 - */ 681 - iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 654 + iommu->acpi_flags = h->flags; 682 655 683 656 /* 684 657 * Done. Now parse the device entries ··· 1103 1116 } 1104 1117 } 1105 1118 1119 + static void iommu_init_flags(struct amd_iommu *iommu) 1120 + { 1121 + iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 1122 + iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 1123 + iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 1124 + 1125 + iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 1126 + iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 1127 + iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 1128 + 1129 + iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 1130 + iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 1131 + iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 1132 + 1133 + iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 1134 + iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 1135 + iommu_feature_disable(iommu, CONTROL_ISOC_EN); 1136 + 1137 + /* 1138 + * make IOMMU memory accesses cache coherent 1139 + */ 1140 + iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 1141 + } 1142 + 1143 + static void iommu_apply_quirks(struct amd_iommu *iommu) 1144 + { 1145 + if (is_rd890_iommu(iommu->dev)) { 1146 + pci_write_config_dword(iommu->dev, 0xf0, iommu->cache_cfg[0]); 1147 + pci_write_config_dword(iommu->dev, 0xf4, iommu->cache_cfg[1]); 1148 + pci_write_config_dword(iommu->dev, 0xf8, iommu->cache_cfg[2]); 1149 + pci_write_config_dword(iommu->dev, 0xfc, iommu->cache_cfg[3]); 1150 + } 1151 + } 1152 + 1106 1153 /* 1107 1154 * This function finally enables all IOMMUs found in the system after 1108 1155 * they have been initialized ··· 1147 1126 1148 1127 for_each_iommu(iommu) { 1149 1128 iommu_disable(iommu); 1129 + iommu_apply_quirks(iommu); 1130 + iommu_init_flags(iommu); 1150 1131 iommu_set_device_table(iommu); 1151 1132 iommu_enable_command_buffer(iommu); 1152 1133 iommu_enable_event_buffer(iommu);
+3
include/linux/pci_ids.h
··· 393 393 #define PCI_DEVICE_ID_VLSI_82C147 0x0105 394 394 #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 395 395 396 + /* AMD RD890 Chipset */ 397 + #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 398 + 396 399 #define PCI_VENDOR_ID_ADL 0x1005 397 400 #define PCI_DEVICE_ID_ADL_2301 0x2301 398 401