Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
x86/gart: Disable GART explicitly before initialization
dma-debug: Cleanup for copy-loop in filter_write()
x86/amd-iommu: Remove obsolete parameter documentation
x86/amd-iommu: use for_each_pci_dev
Revert "x86: disable IOMMUs on kernel crash"
x86/amd-iommu: warn when issuing command to uninitialized cmd buffer
x86/amd-iommu: enable iommu before attaching devices
x86/amd-iommu: Use helper function to destroy domain
x86/amd-iommu: Report errors in acpi parsing functions upstream
x86/amd-iommu: Pt mode fix for domain_destroy
x86/amd-iommu: Protect IOMMU-API map/unmap path
x86/amd-iommu: Remove double NULL check in check_device

+68 -34
-5
Documentation/kernel-parameters.txt
··· 320 320 amd_iommu= [HW,X86-84] 321 321 Pass parameters to the AMD IOMMU driver in the system. 322 322 Possible values are: 323 - isolate - enable device isolation (each device, as far 324 - as possible, will get its own protection 325 - domain) [default] 326 - share - put every device behind one IOMMU into the 327 - same protection domain 328 323 fullflush - enable flushing of IO/TLB entries when 329 324 they are unmapped. Otherwise they are 330 325 flushed before they will be reused, which
+3
arch/x86/include/asm/amd_iommu_types.h
··· 21 21 #define _ASM_X86_AMD_IOMMU_TYPES_H 22 22 23 23 #include <linux/types.h> 24 + #include <linux/mutex.h> 24 25 #include <linux/list.h> 25 26 #include <linux/spinlock.h> 26 27 ··· 141 140 142 141 /* constants to configure the command buffer */ 143 142 #define CMD_BUFFER_SIZE 8192 143 + #define CMD_BUFFER_UNINITIALIZED 1 144 144 #define CMD_BUFFER_ENTRIES 512 145 145 #define MMIO_CMD_SIZE_SHIFT 56 146 146 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) ··· 239 237 struct list_head list; /* for list of all protection domains */ 240 238 struct list_head dev_list; /* List of all devices in this domain */ 241 239 spinlock_t lock; /* mostly used to lock the page table*/ 240 + struct mutex api_lock; /* protect page tables in the iommu-api path */ 242 241 u16 id; /* the domain id written to the device table */ 243 242 int mode; /* paging mode (0-6 levels) */ 244 243 u64 *pt_root; /* page table root pointer */
+14 -6
arch/x86/kernel/amd_iommu.c
··· 118 118 return false; 119 119 120 120 /* No device or no PCI device */ 121 - if (!dev || dev->bus != &pci_bus_type) 121 + if (dev->bus != &pci_bus_type) 122 122 return false; 123 123 124 124 devid = get_device_id(dev); ··· 392 392 u32 tail, head; 393 393 u8 *target; 394 394 395 + WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); 395 396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 396 397 target = iommu->cmd_buf + tail; 397 398 memcpy_toio(target, cmd, sizeof(*cmd)); ··· 2187 2186 struct dma_ops_domain *dma_dom; 2188 2187 u16 devid; 2189 2188 2190 - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2189 + for_each_pci_dev(dev) { 2191 2190 2192 2191 /* Do we handle this device? */ 2193 2192 if (!check_device(&dev->dev)) ··· 2299 2298 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2300 2299 struct device *dev = dev_data->dev; 2301 2300 2302 - do_detach(dev); 2301 + __detach_device(dev); 2303 2302 atomic_set(&dev_data->bind, 0); 2304 2303 } 2305 2304 ··· 2328 2327 return NULL; 2329 2328 2330 2329 spin_lock_init(&domain->lock); 2330 + mutex_init(&domain->api_lock); 2331 2331 domain->id = domain_id_alloc(); 2332 2332 if (!domain->id) 2333 2333 goto out_err; ··· 2381 2379 2382 2380 free_pagetable(domain); 2383 2381 2384 - domain_id_free(domain->id); 2385 - 2386 - kfree(domain); 2382 + protection_domain_free(domain); 2387 2383 2388 2384 dom->priv = NULL; 2389 2385 } ··· 2456 2456 iova &= PAGE_MASK; 2457 2457 paddr &= PAGE_MASK; 2458 2458 2459 + mutex_lock(&domain->api_lock); 2460 + 2459 2461 for (i = 0; i < npages; ++i) { 2460 2462 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2461 2463 if (ret) ··· 2466 2464 iova += PAGE_SIZE; 2467 2465 paddr += PAGE_SIZE; 2468 2466 } 2467 + 2468 + mutex_unlock(&domain->api_lock); 2469 2469 2470 2470 return 0; 2471 2471 } ··· 2481 2477 2482 2478 iova &= PAGE_MASK; 2483 2479 2480 + mutex_lock(&domain->api_lock); 2481 + 2484 2482 for (i = 0; i < npages; ++i) { 2485 2483 iommu_unmap_page(domain, iova, PM_MAP_4k); 2486 2484 iova += PAGE_SIZE; 2487 2485 } 2488 2486 2489 2487 iommu_flush_tlb_pde(domain); 2488 + 2489 + mutex_unlock(&domain->api_lock); 2490 2490 } 2491 2491 2492 2492 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+33 -15
arch/x86/kernel/amd_iommu_init.c
··· 138 138 bool amd_iommu_np_cache __read_mostly; 139 139 140 140 /* 141 - * Set to true if ACPI table parsing and hardware intialization went properly 141 + * The ACPI table parsing functions set this variable on an error 142 142 */ 143 - static bool amd_iommu_initialized; 143 + static int __initdata amd_iommu_init_err; 144 144 145 145 /* 146 146 * List of protection domains - used during resume ··· 391 391 */ 392 392 for (i = 0; i < table->length; ++i) 393 393 checksum += p[i]; 394 - if (checksum != 0) 394 + if (checksum != 0) { 395 395 /* ACPI table corrupt */ 396 - return -ENODEV; 396 + amd_iommu_init_err = -ENODEV; 397 + return 0; 398 + } 397 399 398 400 p += IVRS_HEADER_LENGTH; 399 401 ··· 438 436 if (cmd_buf == NULL) 439 437 return NULL; 440 438 441 - iommu->cmd_buf_size = CMD_BUFFER_SIZE; 439 + iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; 442 440 443 441 return cmd_buf; 444 442 } ··· 474 472 &entry, sizeof(entry)); 475 473 476 474 amd_iommu_reset_cmd_buffer(iommu); 475 + iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); 477 476 } 478 477 479 478 static void __init free_command_buffer(struct amd_iommu *iommu) 480 479 { 481 480 free_pages((unsigned long)iommu->cmd_buf, 482 - get_order(iommu->cmd_buf_size)); 481 + get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); 483 482 } 484 483 485 484 /* allocates the memory where the IOMMU will log its events to */ ··· 923 920 h->mmio_phys); 924 921 925 922 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 926 - if (iommu == NULL) 927 - return -ENOMEM; 923 + if (iommu == NULL) { 924 + amd_iommu_init_err = -ENOMEM; 925 + return 0; 926 + } 927 + 928 928 ret = init_iommu_one(iommu, h); 929 - if (ret) 930 - return ret; 929 + if (ret) { 930 + amd_iommu_init_err = ret; 931 + return 0; 932 + } 931 933 break; 932 934 default: 933 935 break; ··· 941 933 942 934 } 943 935 WARN_ON(p != end); 944 - 945 - amd_iommu_initialized = true; 946 936 947 937 return 0; 948 938 } ··· 1217 1211 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1218 1212 return -ENODEV; 1219 1213 1214 + ret = amd_iommu_init_err; 1215 + if (ret) 1216 + goto out; 1217 + 1220 1218 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1221 1219 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1222 1220 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); ··· 1280 1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1281 1271 goto free; 1282 1272 1283 - if (!amd_iommu_initialized) 1273 + if (amd_iommu_init_err) { 1274 + ret = amd_iommu_init_err; 1284 1275 goto free; 1276 + } 1285 1277 1286 1278 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1287 1279 goto free; 1280 + 1281 + if (amd_iommu_init_err) { 1282 + ret = amd_iommu_init_err; 1283 + goto free; 1284 + } 1288 1285 1289 1286 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1290 1287 if (ret) ··· 1305 1288 if (ret) 1306 1289 goto free; 1307 1290 1291 + enable_iommus(); 1292 + 1308 1293 if (iommu_pass_through) 1309 1294 ret = amd_iommu_init_passthrough(); 1310 1295 else ··· 1318 1299 amd_iommu_init_api(); 1319 1300 1320 1301 amd_iommu_init_notifier(); 1321 - 1322 - enable_iommus(); 1323 1302 1324 1303 if (iommu_pass_through) 1325 1304 goto out; ··· 1332 1315 return ret; 1333 1316 1334 1317 free: 1318 + disable_iommus(); 1335 1319 1336 1320 amd_iommu_uninit_devices(); 1337 1321
+14 -1
arch/x86/kernel/aperture_64.c
··· 393 393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 394 394 int bus; 395 395 int dev_base, dev_limit; 396 + u32 ctl; 396 397 397 398 bus = bus_dev_ranges[i].bus; 398 399 dev_base = bus_dev_ranges[i].dev_base; ··· 407 406 gart_iommu_aperture = 1; 408 407 x86_init.iommu.iommu_init = gart_iommu_init; 409 408 410 - aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; 409 + ctl = read_pci_config(bus, slot, 3, 410 + AMD64_GARTAPERTURECTL); 411 + 412 + /* 413 + * Before we do anything else disable the GART. It may 414 + * still be enabled if we boot into a crash-kernel here. 415 + * Reconfiguring the GART while it is enabled could have 416 + * unknown side-effects. 417 + */ 418 + ctl &= ~GARTEN; 419 + write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); 420 + 421 + aper_order = (ctl >> 1) & 7; 411 422 aper_size = (32 * 1024 * 1024) << aper_order; 412 423 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; 413 424 aper_base <<= 25;
-6
arch/x86/kernel/crash.c
··· 27 27 #include <asm/cpu.h> 28 28 #include <asm/reboot.h> 29 29 #include <asm/virtext.h> 30 - #include <asm/x86_init.h> 31 30 32 31 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 33 32 ··· 102 103 #ifdef CONFIG_HPET_TIMER 103 104 hpet_disable(); 104 105 #endif 105 - 106 - #ifdef CONFIG_X86_64 107 - x86_platform.iommu_shutdown(); 108 - #endif 109 - 110 106 crash_save_cpu(regs, safe_smp_processor_id()); 111 107 }
+3
arch/x86/kernel/pci-gart_64.c
··· 565 565 566 566 enable_gart_translation(dev, __pa(agp_gatt_table)); 567 567 } 568 + 569 + /* Flush the GART-TLB to remove stale entries */ 570 + k8_flush_garts(); 568 571 } 569 572 570 573 /*
+1 -1
lib/dma-debug.c
··· 570 570 * Now parse out the first token and use it as the name for the 571 571 * driver to filter for. 572 572 */ 573 - for (i = 0; i < NAME_MAX_LEN; ++i) { 573 + for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 574 574 current_driver_name[i] = buf[i]; 575 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 576 576 break;