···320 amd_iommu= [HW,X86-84]321 Pass parameters to the AMD IOMMU driver in the system.322 Possible values are:323- isolate - enable device isolation (each device, as far324- as possible, will get its own protection325- domain) [default]326- share - put every device behind one IOMMU into the327- same protection domain328 fullflush - enable flushing of IO/TLB entries when329 they are unmapped. Otherwise they are330 flushed before they will be reused, which
···320 amd_iommu= [HW,X86-84]321 Pass parameters to the AMD IOMMU driver in the system.322 Possible values are:00000323 fullflush - enable flushing of IO/TLB entries when324 they are unmapped. Otherwise they are325 flushed before they will be reused, which
+3
arch/x86/include/asm/amd_iommu_types.h
···21#define _ASM_X86_AMD_IOMMU_TYPES_H2223#include <linux/types.h>024#include <linux/list.h>25#include <linux/spinlock.h>26···141142/* constants to configure the command buffer */143#define CMD_BUFFER_SIZE 81920144#define CMD_BUFFER_ENTRIES 512145#define MMIO_CMD_SIZE_SHIFT 56146#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)···239 struct list_head list; /* for list of all protection domains */240 struct list_head dev_list; /* List of all devices in this domain */241 spinlock_t lock; /* mostly used to lock the page table*/0242 u16 id; /* the domain id written to the device table */243 int mode; /* paging mode (0-6 levels) */244 u64 *pt_root; /* page table root pointer */
···21#define _ASM_X86_AMD_IOMMU_TYPES_H2223#include <linux/types.h>24+#include <linux/mutex.h>25#include <linux/list.h>26#include <linux/spinlock.h>27···140141/* constants to configure the command buffer */142#define CMD_BUFFER_SIZE 8192143+#define CMD_BUFFER_UNINITIALIZED 1144#define CMD_BUFFER_ENTRIES 512145#define MMIO_CMD_SIZE_SHIFT 56146#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)···237 struct list_head list; /* for list of all protection domains */238 struct list_head dev_list; /* List of all devices in this domain */239 spinlock_t lock; /* mostly used to lock the page table*/240+ struct mutex api_lock; /* protect page tables in the iommu-api path */241 u16 id; /* the domain id written to the device table */242 int mode; /* paging mode (0-6 levels) */243 u64 *pt_root; /* page table root pointer */
+14-6
arch/x86/kernel/amd_iommu.c
···118 return false;119120 /* No device or no PCI device */121- if (!dev || dev->bus != &pci_bus_type)122 return false;123124 devid = get_device_id(dev);···392 u32 tail, head;393 u8 *target;3940395 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);396 target = iommu->cmd_buf + tail;397 memcpy_toio(target, cmd, sizeof(*cmd));···2187 struct dma_ops_domain *dma_dom;2188 u16 devid;21892190- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {21912192 /* Do we handle this device? */2193 if (!check_device(&dev->dev))···2299 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {2300 struct device *dev = dev_data->dev;23012302- do_detach(dev);2303 atomic_set(&dev_data->bind, 0);2304 }2305···2328 return NULL;23292330 spin_lock_init(&domain->lock);02331 domain->id = domain_id_alloc();2332 if (!domain->id)2333 goto out_err;···23812382 free_pagetable(domain);23832384- domain_id_free(domain->id);2385-2386- kfree(domain);23872388 dom->priv = NULL;2389}···2456 iova &= PAGE_MASK;2457 paddr &= PAGE_MASK;2458002459 for (i = 0; i < npages; ++i) {2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);2461 if (ret)···2466 iova += PAGE_SIZE;2467 paddr += PAGE_SIZE;2468 }0024692470 return 0;2471}···24812482 iova &= PAGE_MASK;2483002484 for (i = 0; i < npages; ++i) {2485 iommu_unmap_page(domain, iova, PM_MAP_4k);2486 iova += PAGE_SIZE;2487 }24882489 iommu_flush_tlb_pde(domain);002490}24912492static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
···118 return false;119120 /* No device or no PCI device */121+ if (dev->bus != &pci_bus_type)122 return false;123124 devid = get_device_id(dev);···392 u32 tail, head;393 u8 *target;394395+ WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);397 target = iommu->cmd_buf + tail;398 memcpy_toio(target, cmd, sizeof(*cmd));···2186 struct dma_ops_domain *dma_dom;2187 u16 devid;21882189+ for_each_pci_dev(dev) {21902191 /* Do we handle this device? */2192 if (!check_device(&dev->dev))···2298 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {2299 struct device *dev = dev_data->dev;23002301+ __detach_device(dev);2302 atomic_set(&dev_data->bind, 0);2303 }2304···2327 return NULL;23282329 spin_lock_init(&domain->lock);2330+ mutex_init(&domain->api_lock);2331 domain->id = domain_id_alloc();2332 if (!domain->id)2333 goto out_err;···23792380 free_pagetable(domain);23812382+ protection_domain_free(domain);0023832384 dom->priv = NULL;2385}···2456 iova &= PAGE_MASK;2457 paddr &= PAGE_MASK;24582459+ mutex_lock(&domain->api_lock);2460+2461 for (i = 0; i < npages; ++i) {2462 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);2463 if (ret)···2464 iova += PAGE_SIZE;2465 paddr += PAGE_SIZE;2466 }2467+2468+ mutex_unlock(&domain->api_lock);24692470 return 0;2471}···24772478 iova &= PAGE_MASK;24792480+ mutex_lock(&domain->api_lock);2481+2482 for (i = 0; i < npages; ++i) {2483 iommu_unmap_page(domain, iova, PM_MAP_4k);2484 iova += PAGE_SIZE;2485 }24862487 iommu_flush_tlb_pde(domain);2488+2489+ mutex_unlock(&domain->api_lock);2490}24912492static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
+33-15
arch/x86/kernel/amd_iommu_init.c
···138bool amd_iommu_np_cache __read_mostly;139140/*141- * Set to true if ACPI table parsing and hardware intialization went properly142 */143-static bool amd_iommu_initialized;144145/*146 * List of protection domains - used during resume···391 */392 for (i = 0; i < table->length; ++i)393 checksum += p[i];394- if (checksum != 0)395 /* ACPI table corrupt */396- return -ENODEV;00397398 p += IVRS_HEADER_LENGTH;399···438 if (cmd_buf == NULL)439 return NULL;440441- iommu->cmd_buf_size = CMD_BUFFER_SIZE;442443 return cmd_buf;444}···474 &entry, sizeof(entry));475476 amd_iommu_reset_cmd_buffer(iommu);0477}478479static void __init free_command_buffer(struct amd_iommu *iommu)480{481 free_pages((unsigned long)iommu->cmd_buf,482- get_order(iommu->cmd_buf_size));483}484485/* allocates the memory where the IOMMU will log its events to */···923 h->mmio_phys);924925 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);926- if (iommu == NULL)927- return -ENOMEM;000928 ret = init_iommu_one(iommu, h);929- if (ret)930- return ret;00931 break;932 default:933 break;···941942 }943 WARN_ON(p != end);944-945- amd_iommu_initialized = true;946947 return 0;948}···1217 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)1218 return -ENODEV;121900001220 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);1221 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);1222 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);···1280 if (acpi_table_parse("IVRS", init_iommu_all) != 0)1281 goto free;12821283- if (!amd_iommu_initialized)01284 goto free;012851286 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)1287 goto free;0000012881289 ret = sysdev_class_register(&amd_iommu_sysdev_class);1290 if (ret)···1305 if (ret)1306 goto free;1307001308 if (iommu_pass_through)1309 ret = amd_iommu_init_passthrough();1310 else···1318 amd_iommu_init_api();13191320 amd_iommu_init_notifier();1321-1322- enable_iommus();13231324 if (iommu_pass_through)1325 goto out;···1332 return ret;13331334free:013351336 amd_iommu_uninit_devices();1337
···138bool amd_iommu_np_cache __read_mostly;139140/*141+ * The ACPI table parsing functions set this variable on an error142 */143+static int __initdata amd_iommu_init_err;144145/*146 * List of protection domains - used during resume···391 */392 for (i = 0; i < table->length; ++i)393 checksum += p[i];394+ if (checksum != 0) {395 /* ACPI table corrupt */396+ amd_iommu_init_err = -ENODEV;397+ return 0;398+ }399400 p += IVRS_HEADER_LENGTH;401···436 if (cmd_buf == NULL)437 return NULL;438439+ iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;440441 return cmd_buf;442}···472 &entry, sizeof(entry));473474 amd_iommu_reset_cmd_buffer(iommu);475+ iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);476}477478static void __init free_command_buffer(struct amd_iommu *iommu)479{480 free_pages((unsigned long)iommu->cmd_buf,481+ get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));482}483484/* allocates the memory where the IOMMU will log its events to */···920 h->mmio_phys);921922 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);923+ if (iommu == NULL) {924+ amd_iommu_init_err = -ENOMEM;925+ return 0;926+ }927+928 ret = init_iommu_one(iommu, h);929+ if (ret) {930+ amd_iommu_init_err = ret;931+ return 0;932+ }933 break;934 default:935 break;···933934 }935 WARN_ON(p != end);00936937 return 0;938}···1211 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)1212 return -ENODEV;12131214+ ret = amd_iommu_init_err;1215+ if (ret)1216+ goto out;1217+1218 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);1219 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);1220 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);···1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0)1271 goto free;12721273+ if (amd_iommu_init_err) {1274+ ret = amd_iommu_init_err;1275 goto free;1276+ }12771278 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)1279 goto free;1280+1281+ if (amd_iommu_init_err) {1282+ ret = amd_iommu_init_err;1283+ goto free;1284+ }12851286 ret = sysdev_class_register(&amd_iommu_sysdev_class);1287 if (ret)···1288 if (ret)1289 goto free;12901291+ enable_iommus();1292+1293 if (iommu_pass_through)1294 ret = amd_iommu_init_passthrough();1295 else···1299 amd_iommu_init_api();13001301 amd_iommu_init_notifier();0013021303 if (iommu_pass_through)1304 goto out;···1315 return ret;13161317free:1318+ disable_iommus();13191320 amd_iommu_uninit_devices();1321
+14-1
arch/x86/kernel/aperture_64.c
···393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {394 int bus;395 int dev_base, dev_limit;0396397 bus = bus_dev_ranges[i].bus;398 dev_base = bus_dev_ranges[i].dev_base;···407 gart_iommu_aperture = 1;408 x86_init.iommu.iommu_init = gart_iommu_init;409410- aper_order = (read_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL) >> 1) & 7;000000000000411 aper_size = (32 * 1024 * 1024) << aper_order;412 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;413 aper_base <<= 25;
···393 for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {394 int bus;395 int dev_base, dev_limit;396+ u32 ctl;397398 bus = bus_dev_ranges[i].bus;399 dev_base = bus_dev_ranges[i].dev_base;···406 gart_iommu_aperture = 1;407 x86_init.iommu.iommu_init = gart_iommu_init;408409+ ctl = read_pci_config(bus, slot, 3,410+ AMD64_GARTAPERTURECTL);411+412+ /*413+ * Before we do anything else disable the GART. It may414+ * still be enabled if we boot into a crash-kernel here.415+ * Reconfiguring the GART while it is enabled could have416+ * unknown side-effects.417+ */418+ ctl &= ~GARTEN;419+ write_pci_config(bus, slot, 3, AMD64_GARTAPERTURECTL, ctl);420+421+ aper_order = (ctl >> 1) & 7;422 aper_size = (32 * 1024 * 1024) << aper_order;423 aper_base = read_pci_config(bus, slot, 3, AMD64_GARTAPERTUREBASE) & 0x7fff;424 aper_base <<= 25;
···565566 enable_gart_translation(dev, __pa(agp_gatt_table));567 }568+569+ /* Flush the GART-TLB to remove stale entries */570+ k8_flush_garts();571}572573/*
+1-1
lib/dma-debug.c
···570 * Now parse out the first token and use it as the name for the571 * driver to filter for.572 */573- for (i = 0; i < NAME_MAX_LEN; ++i) {574 current_driver_name[i] = buf[i];575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)576 break;
···570 * Now parse out the first token and use it as the name for the571 * driver to filter for.572 */573+ for (i = 0; i < NAME_MAX_LEN - 1; ++i) {574 current_driver_name[i] = buf[i];575 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)576 break;