Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent

+68 -34
-5
Documentation/kernel-parameters.txt
··· 320 amd_iommu= [HW,X86-84] 321 Pass parameters to the AMD IOMMU driver in the system. 322 Possible values are: 323 - isolate - enable device isolation (each device, as far 324 - as possible, will get its own protection 325 - domain) [default] 326 - share - put every device behind one IOMMU into the 327 - same protection domain 328 fullflush - enable flushing of IO/TLB entries when 329 they are unmapped. Otherwise they are 330 flushed before they will be reused, which
··· 320 amd_iommu= [HW,X86-84] 321 Pass parameters to the AMD IOMMU driver in the system. 322 Possible values are: 323 fullflush - enable flushing of IO/TLB entries when 324 they are unmapped. Otherwise they are 325 flushed before they will be reused, which
+3
arch/x86/include/asm/amd_iommu_types.h
··· 21 #define _ASM_X86_AMD_IOMMU_TYPES_H 22 23 #include <linux/types.h> 24 #include <linux/list.h> 25 #include <linux/spinlock.h> 26 ··· 141 142 /* constants to configure the command buffer */ 143 #define CMD_BUFFER_SIZE 8192 144 #define CMD_BUFFER_ENTRIES 512 145 #define MMIO_CMD_SIZE_SHIFT 56 146 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) ··· 239 struct list_head list; /* for list of all protection domains */ 240 struct list_head dev_list; /* List of all devices in this domain */ 241 spinlock_t lock; /* mostly used to lock the page table*/ 242 u16 id; /* the domain id written to the device table */ 243 int mode; /* paging mode (0-6 levels) */ 244 u64 *pt_root; /* page table root pointer */
··· 21 #define _ASM_X86_AMD_IOMMU_TYPES_H 22 23 #include <linux/types.h> 24 + #include <linux/mutex.h> 25 #include <linux/list.h> 26 #include <linux/spinlock.h> 27 ··· 140 141 /* constants to configure the command buffer */ 142 #define CMD_BUFFER_SIZE 8192 143 + #define CMD_BUFFER_UNINITIALIZED 1 144 #define CMD_BUFFER_ENTRIES 512 145 #define MMIO_CMD_SIZE_SHIFT 56 146 #define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) ··· 237 struct list_head list; /* for list of all protection domains */ 238 struct list_head dev_list; /* List of all devices in this domain */ 239 spinlock_t lock; /* mostly used to lock the page table*/ 240 + struct mutex api_lock; /* protect page tables in the iommu-api path */ 241 u16 id; /* the domain id written to the device table */ 242 int mode; /* paging mode (0-6 levels) */ 243 u64 *pt_root; /* page table root pointer */
+14 -6
arch/x86/kernel/amd_iommu.c
··· 118 return false; 119 120 /* No device or no PCI device */ 121 - if (!dev || dev->bus != &pci_bus_type) 122 return false; 123 124 devid = get_device_id(dev); ··· 392 u32 tail, head; 393 u8 *target; 394 395 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 396 target = iommu->cmd_buf + tail; 397 memcpy_toio(target, cmd, sizeof(*cmd)); ··· 2187 struct dma_ops_domain *dma_dom; 2188 u16 devid; 2189 2190 - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2191 2192 /* Do we handle this device? */ 2193 if (!check_device(&dev->dev)) ··· 2299 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2300 struct device *dev = dev_data->dev; 2301 2302 - do_detach(dev); 2303 atomic_set(&dev_data->bind, 0); 2304 } 2305 ··· 2328 return NULL; 2329 2330 spin_lock_init(&domain->lock); 2331 domain->id = domain_id_alloc(); 2332 if (!domain->id) 2333 goto out_err; ··· 2381 2382 free_pagetable(domain); 2383 2384 - domain_id_free(domain->id); 2385 - 2386 - kfree(domain); 2387 2388 dom->priv = NULL; 2389 } ··· 2456 iova &= PAGE_MASK; 2457 paddr &= PAGE_MASK; 2458 2459 for (i = 0; i < npages; ++i) { 2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2461 if (ret) ··· 2466 iova += PAGE_SIZE; 2467 paddr += PAGE_SIZE; 2468 } 2469 2470 return 0; 2471 } ··· 2481 2482 iova &= PAGE_MASK; 2483 2484 for (i = 0; i < npages; ++i) { 2485 iommu_unmap_page(domain, iova, PM_MAP_4k); 2486 iova += PAGE_SIZE; 2487 } 2488 2489 iommu_flush_tlb_pde(domain); 2490 } 2491 2492 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
··· 118 return false; 119 120 /* No device or no PCI device */ 121 + if (dev->bus != &pci_bus_type) 122 return false; 123 124 devid = get_device_id(dev); ··· 392 u32 tail, head; 393 u8 *target; 394 395 + WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); 396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 397 target = iommu->cmd_buf + tail; 398 memcpy_toio(target, cmd, sizeof(*cmd)); ··· 2186 struct dma_ops_domain *dma_dom; 2187 u16 devid; 2188 2189 + for_each_pci_dev(dev) { 2190 2191 /* Do we handle this device? */ 2192 if (!check_device(&dev->dev)) ··· 2298 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2299 struct device *dev = dev_data->dev; 2300 2301 + __detach_device(dev); 2302 atomic_set(&dev_data->bind, 0); 2303 } 2304 ··· 2327 return NULL; 2328 2329 spin_lock_init(&domain->lock); 2330 + mutex_init(&domain->api_lock); 2331 domain->id = domain_id_alloc(); 2332 if (!domain->id) 2333 goto out_err; ··· 2379 2380 free_pagetable(domain); 2381 2382 + protection_domain_free(domain); 2383 2384 dom->priv = NULL; 2385 } ··· 2456 iova &= PAGE_MASK; 2457 paddr &= PAGE_MASK; 2458 2459 + mutex_lock(&domain->api_lock); 2460 + 2461 for (i = 0; i < npages; ++i) { 2462 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2463 if (ret) ··· 2464 iova += PAGE_SIZE; 2465 paddr += PAGE_SIZE; 2466 } 2467 + 2468 + mutex_unlock(&domain->api_lock); 2469 2470 return 0; 2471 } ··· 2477 2478 iova &= PAGE_MASK; 2479 2480 + mutex_lock(&domain->api_lock); 2481 + 2482 for (i = 0; i < npages; ++i) { 2483 iommu_unmap_page(domain, iova, PM_MAP_4k); 2484 iova += PAGE_SIZE; 2485 } 2486 2487 iommu_flush_tlb_pde(domain); 2488 + 2489 + mutex_unlock(&domain->api_lock); 2490 } 2491 2492 static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+33 -15
arch/x86/kernel/amd_iommu_init.c
··· 138 bool amd_iommu_np_cache __read_mostly; 139 140 /* 141 - * Set to true if ACPI table parsing and hardware intialization went properly 142 */ 143 - static bool amd_iommu_initialized; 144 145 /* 146 * List of protection domains - used during resume ··· 391 */ 392 for (i = 0; i < table->length; ++i) 393 checksum += p[i]; 394 - if (checksum != 0) 395 /* ACPI table corrupt */ 396 - return -ENODEV; 397 398 p += IVRS_HEADER_LENGTH; 399 ··· 438 if (cmd_buf == NULL) 439 return NULL; 440 441 - iommu->cmd_buf_size = CMD_BUFFER_SIZE; 442 443 return cmd_buf; 444 } ··· 474 &entry, sizeof(entry)); 475 476 amd_iommu_reset_cmd_buffer(iommu); 477 } 478 479 static void __init free_command_buffer(struct amd_iommu *iommu) 480 { 481 free_pages((unsigned long)iommu->cmd_buf, 482 - get_order(iommu->cmd_buf_size)); 483 } 484 485 /* allocates the memory where the IOMMU will log its events to */ ··· 923 h->mmio_phys); 924 925 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 926 - if (iommu == NULL) 927 - return -ENOMEM; 928 ret = init_iommu_one(iommu, h); 929 - if (ret) 930 - return ret; 931 break; 932 default: 933 break; ··· 941 942 } 943 WARN_ON(p != end); 944 - 945 - amd_iommu_initialized = true; 946 947 return 0; 948 } ··· 1217 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1218 return -ENODEV; 1219 1220 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1221 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1222 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); ··· 1280 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1281 goto free; 1282 1283 - if (!amd_iommu_initialized) 1284 goto free; 1285 1286 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1287 goto free; 1288 1289 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1290 if (ret) ··· 1305 if (ret) 1306 goto free; 1307 1308 if (iommu_pass_through) 1309 ret = amd_iommu_init_passthrough(); 1310 else ··· 1318 amd_iommu_init_api(); 1319 1320 amd_iommu_init_notifier(); 1321 - 1322 - enable_iommus(); 1323 1324 if (iommu_pass_through) 1325 goto out; ··· 1332 return ret; 1333 1334 free: 1335 1336 amd_iommu_uninit_devices(); 1337
··· 138 bool amd_iommu_np_cache __read_mostly; 139 140 /* 141 + * The ACPI table parsing functions set this variable on an error 142 */ 143 + static int __initdata amd_iommu_init_err; 144 145 /* 146 * List of protection domains - used during resume ··· 391 */ 392 for (i = 0; i < table->length; ++i) 393 checksum += p[i]; 394 + if (checksum != 0) { 395 /* ACPI table corrupt */ 396 + amd_iommu_init_err = -ENODEV; 397 + return 0; 398 + } 399 400 p += IVRS_HEADER_LENGTH; 401 ··· 436 if (cmd_buf == NULL) 437 return NULL; 438 439 + iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; 440 441 return cmd_buf; 442 } ··· 472 &entry, sizeof(entry)); 473 474 amd_iommu_reset_cmd_buffer(iommu); 475 + iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); 476 } 477 478 static void __init free_command_buffer(struct amd_iommu *iommu) 479 { 480 free_pages((unsigned long)iommu->cmd_buf, 481 + get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); 482 } 483 484 /* allocates the memory where the IOMMU will log its events to */ ··· 920 h->mmio_phys); 921 922 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 923 + if (iommu == NULL) { 924 + amd_iommu_init_err = -ENOMEM; 925 + return 0; 926 + } 927 + 928 ret = init_iommu_one(iommu, h); 929 + if (ret) { 930 + amd_iommu_init_err = ret; 931 + return 0; 932 + } 933 break; 934 default: 935 break; ··· 933 934 } 935 WARN_ON(p != end); 936 937 return 0; 938 } ··· 1211 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1212 return -ENODEV; 1213 1214 + ret = amd_iommu_init_err; 1215 + if (ret) 1216 + goto out; 1217 + 1218 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1219 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1220 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); ··· 1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1271 goto free; 1272 1273 + if (amd_iommu_init_err) { 1274 + ret = amd_iommu_init_err; 1275 goto free; 1276 + } 1277 1278 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1279 goto free; 1280 + 1281 + if (amd_iommu_init_err) { 1282 + ret = amd_iommu_init_err; 1283 + goto free; 1284 + } 1285 1286 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1287 if (ret) ··· 1288 if (ret) 1289 goto free; 1290 1291 + enable_iommus(); 1292 + 1293 if (iommu_pass_through) 1294 ret = amd_iommu_init_passthrough(); 1295 else ··· 1299 amd_iommu_init_api(); 1300 1301 amd_iommu_init_notifier(); 1302 1303 if (iommu_pass_through) 1304 goto out; ··· 1315 return ret; 1316 1317 free: 1318 + disable_iommus(); 1319 1320 amd_iommu_uninit_devices(); 1321
+14 -1
arch/x86/kernel/aperture_64.c
··· 393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 394 int bus; 395 int dev_base, dev_limit; 396 397 bus = bus_dev_ranges[i].bus; 398 dev_base = bus_dev_ranges[i].dev_base; ··· 407 gart_iommu_aperture = 1; 408 x86_init.iommu.iommu_init = gart_iommu_init; 409 410 - aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7; 411 aper_size = (32 * 1024 * 1024) << aper_order; 412 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; 413 aper_base <<= 25;
··· 393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) { 394 int bus; 395 int dev_base, dev_limit; 396 + u32 ctl; 397 398 bus = bus_dev_ranges[i].bus; 399 dev_base = bus_dev_ranges[i].dev_base; ··· 406 gart_iommu_aperture = 1; 407 x86_init.iommu.iommu_init = gart_iommu_init; 408 409 + ctl = read_pci_config(bus, slot, 3, 410 + AMD64_GARTAPERTURECTL); 411 + 412 + /* 413 + * Before we do anything else disable the GART. It may 414 + * still be enabled if we boot into a crash-kernel here. 415 + * Reconfiguring the GART while it is enabled could have 416 + * unknown side-effects. 417 + */ 418 + ctl &= ~GARTEN; 419 + write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl); 420 + 421 + aper_order = (ctl >> 1) & 7; 422 aper_size = (32 * 1024 * 1024) << aper_order; 423 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff; 424 aper_base <<= 25;
-6
arch/x86/kernel/crash.c
··· 27 #include <asm/cpu.h> 28 #include <asm/reboot.h> 29 #include <asm/virtext.h> 30 - #include <asm/x86_init.h> 31 32 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 33 ··· 102 #ifdef CONFIG_HPET_TIMER 103 hpet_disable(); 104 #endif 105 - 106 - #ifdef CONFIG_X86_64 107 - x86_platform.iommu_shutdown(); 108 - #endif 109 - 110 crash_save_cpu(regs, safe_smp_processor_id()); 111 }
··· 27 #include <asm/cpu.h> 28 #include <asm/reboot.h> 29 #include <asm/virtext.h> 30 31 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) 32 ··· 103 #ifdef CONFIG_HPET_TIMER 104 hpet_disable(); 105 #endif 106 crash_save_cpu(regs, safe_smp_processor_id()); 107 }
+3
arch/x86/kernel/pci-gart_64.c
··· 565 566 enable_gart_translation(dev, __pa(agp_gatt_table)); 567 } 568 } 569 570 /*
··· 565 566 enable_gart_translation(dev, __pa(agp_gatt_table)); 567 } 568 + 569 + /* Flush the GART-TLB to remove stale entries */ 570 + k8_flush_garts(); 571 } 572 573 /*
+1 -1
lib/dma-debug.c
··· 570 * Now parse out the first token and use it as the name for the 571 * driver to filter for. 572 */ 573 - for (i = 0; i < NAME_MAX_LEN; ++i) { 574 current_driver_name[i] = buf[i]; 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 576 break;
··· 570 * Now parse out the first token and use it as the name for the 571 * driver to filter for. 572 */ 573 + for (i = 0; i < NAME_MAX_LEN - 1; ++i) { 574 current_driver_name[i] = buf[i]; 575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) 576 break;