Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.11-rc3 2356 lines 58 kB view raw
1/* 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * Leo Duran <leo.duran@amd.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20#include <linux/pci.h> 21#include <linux/acpi.h> 22#include <linux/list.h> 23#include <linux/slab.h> 24#include <linux/syscore_ops.h> 25#include <linux/interrupt.h> 26#include <linux/msi.h> 27#include <linux/amd-iommu.h> 28#include <linux/export.h> 29#include <acpi/acpi.h> 30#include <asm/pci-direct.h> 31#include <asm/iommu.h> 32#include <asm/gart.h> 33#include <asm/x86_init.h> 34#include <asm/iommu_table.h> 35#include <asm/io_apic.h> 36#include <asm/irq_remapping.h> 37 38#include "amd_iommu_proto.h" 39#include "amd_iommu_types.h" 40#include "irq_remapping.h" 41 42/* 43 * definitions for the ACPI scanning code 44 */ 45#define IVRS_HEADER_LENGTH 48 46 47#define ACPI_IVHD_TYPE 0x10 48#define ACPI_IVMD_TYPE_ALL 0x20 49#define ACPI_IVMD_TYPE 0x21 50#define ACPI_IVMD_TYPE_RANGE 0x22 51 52#define IVHD_DEV_ALL 0x01 53#define IVHD_DEV_SELECT 0x02 54#define IVHD_DEV_SELECT_RANGE_START 0x03 55#define IVHD_DEV_RANGE_END 0x04 56#define IVHD_DEV_ALIAS 0x42 57#define IVHD_DEV_ALIAS_RANGE 0x43 58#define IVHD_DEV_EXT_SELECT 0x46 59#define IVHD_DEV_EXT_SELECT_RANGE 0x47 60#define IVHD_DEV_SPECIAL 0x48 61 62#define IVHD_SPECIAL_IOAPIC 1 63#define IVHD_SPECIAL_HPET 2 64 65#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 66#define IVHD_FLAG_PASSPW_EN_MASK 0x02 67#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 68#define IVHD_FLAG_ISOC_EN_MASK 0x08 69 70#define IVMD_FLAG_EXCL_RANGE 0x08 71#define IVMD_FLAG_UNITY_MAP 0x01 72 73#define ACPI_DEVFLAG_INITPASS 0x01 74#define ACPI_DEVFLAG_EXTINT 0x02 75#define ACPI_DEVFLAG_NMI 0x04 76#define ACPI_DEVFLAG_SYSMGT1 0x10 77#define ACPI_DEVFLAG_SYSMGT2 0x20 78#define ACPI_DEVFLAG_LINT0 0x40 79#define ACPI_DEVFLAG_LINT1 0x80 80#define ACPI_DEVFLAG_ATSDIS 0x10000000 81 82/* 83 * ACPI table definitions 84 * 85 * These data structures are laid over the table to parse the important values 86 * out of it. 87 */ 88 89/* 90 * structure describing one IOMMU in the ACPI table. Typically followed by one 91 * or more ivhd_entrys. 92 */ 93struct ivhd_header { 94 u8 type; 95 u8 flags; 96 u16 length; 97 u16 devid; 98 u16 cap_ptr; 99 u64 mmio_phys; 100 u16 pci_seg; 101 u16 info; 102 u32 efr; 103} __attribute__((packed)); 104 105/* 106 * A device entry describing which devices a specific IOMMU translates and 107 * which requestor ids they use. 108 */ 109struct ivhd_entry { 110 u8 type; 111 u16 devid; 112 u8 flags; 113 u32 ext; 114} __attribute__((packed)); 115 116/* 117 * An AMD IOMMU memory definition structure. It defines things like exclusion 118 * ranges for devices and regions that should be unity mapped. 119 */ 120struct ivmd_header { 121 u8 type; 122 u8 flags; 123 u16 length; 124 u16 devid; 125 u16 aux; 126 u64 resv; 127 u64 range_start; 128 u64 range_length; 129} __attribute__((packed)); 130 131bool amd_iommu_dump; 132bool amd_iommu_irq_remap __read_mostly; 133 134static bool amd_iommu_detected; 135static bool __initdata amd_iommu_disabled; 136 137u16 amd_iommu_last_bdf; /* largest PCI device id we have 138 to handle */ 139LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 140 we find in ACPI */ 141u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */ 142 143LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 144 system */ 145 146/* Array to assign indices to IOMMUs*/ 147struct amd_iommu *amd_iommus[MAX_IOMMUS]; 148int amd_iommus_present; 149 150/* IOMMUs have a non-present cache? */ 151bool amd_iommu_np_cache __read_mostly; 152bool amd_iommu_iotlb_sup __read_mostly = true; 153 154u32 amd_iommu_max_pasids __read_mostly = ~0; 155 156bool amd_iommu_v2_present __read_mostly; 157bool amd_iommu_pc_present __read_mostly; 158 159bool amd_iommu_force_isolation __read_mostly; 160 161/* 162 * List of protection domains - used during resume 163 */ 164LIST_HEAD(amd_iommu_pd_list); 165spinlock_t amd_iommu_pd_lock; 166 167/* 168 * Pointer to the device table which is shared by all AMD IOMMUs 169 * it is indexed by the PCI device id or the HT unit id and contains 170 * information about the domain the device belongs to as well as the 171 * page table root pointer. 172 */ 173struct dev_table_entry *amd_iommu_dev_table; 174 175/* 176 * The alias table is a driver specific data structure which contains the 177 * mappings of the PCI device ids to the actual requestor ids on the IOMMU. 178 * More than one device can share the same requestor id. 179 */ 180u16 *amd_iommu_alias_table; 181 182/* 183 * The rlookup table is used to find the IOMMU which is responsible 184 * for a specific device. It is also indexed by the PCI device id. 185 */ 186struct amd_iommu **amd_iommu_rlookup_table; 187 188/* 189 * This table is used to find the irq remapping table for a given device id 190 * quickly. 191 */ 192struct irq_remap_table **irq_lookup_table; 193 194/* 195 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 196 * to know which ones are already in use. 197 */ 198unsigned long *amd_iommu_pd_alloc_bitmap; 199 200static u32 dev_table_size; /* size of the device table */ 201static u32 alias_table_size; /* size of the alias table */ 202static u32 rlookup_table_size; /* size if the rlookup table */ 203 204enum iommu_init_state { 205 IOMMU_START_STATE, 206 IOMMU_IVRS_DETECTED, 207 IOMMU_ACPI_FINISHED, 208 IOMMU_ENABLED, 209 IOMMU_PCI_INIT, 210 IOMMU_INTERRUPTS_EN, 211 IOMMU_DMA_OPS, 212 IOMMU_INITIALIZED, 213 IOMMU_NOT_FOUND, 214 IOMMU_INIT_ERROR, 215}; 216 217/* Early ioapic and hpet maps from kernel command line */ 218#define EARLY_MAP_SIZE 4 219static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 220static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 221static int __initdata early_ioapic_map_size; 222static int __initdata early_hpet_map_size; 223static bool __initdata cmdline_maps; 224 225static enum iommu_init_state init_state = IOMMU_START_STATE; 226 227static int amd_iommu_enable_interrupts(void); 228static int __init iommu_go_to_state(enum iommu_init_state state); 229 230static inline void update_last_devid(u16 devid) 231{ 232 if (devid > amd_iommu_last_bdf) 233 amd_iommu_last_bdf = devid; 234} 235 236static inline unsigned long tbl_size(int entry_size) 237{ 238 unsigned shift = PAGE_SHIFT + 239 get_order(((int)amd_iommu_last_bdf + 1) * entry_size); 240 241 return 1UL << shift; 242} 243 244/* Access to l1 and l2 indexed register spaces */ 245 246static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 247{ 248 u32 val; 249 250 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 251 pci_read_config_dword(iommu->dev, 0xfc, &val); 252 return val; 253} 254 255static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 256{ 257 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 258 pci_write_config_dword(iommu->dev, 0xfc, val); 259 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 260} 261 262static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 263{ 264 u32 val; 265 266 pci_write_config_dword(iommu->dev, 0xf0, address); 267 pci_read_config_dword(iommu->dev, 0xf4, &val); 268 return val; 269} 270 271static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 272{ 273 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 274 pci_write_config_dword(iommu->dev, 0xf4, val); 275} 276 277/**************************************************************************** 278 * 279 * AMD IOMMU MMIO register space handling functions 280 * 281 * These functions are used to program the IOMMU device registers in 282 * MMIO space required for that driver. 283 * 284 ****************************************************************************/ 285 286/* 287 * This function set the exclusion range in the IOMMU. DMA accesses to the 288 * exclusion range are passed through untranslated 289 */ 290static void iommu_set_exclusion_range(struct amd_iommu *iommu) 291{ 292 u64 start = iommu->exclusion_start & PAGE_MASK; 293 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; 294 u64 entry; 295 296 if (!iommu->exclusion_start) 297 return; 298 299 entry = start | MMIO_EXCL_ENABLE_MASK; 300 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 301 &entry, sizeof(entry)); 302 303 entry = limit; 304 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 305 &entry, sizeof(entry)); 306} 307 308/* Programs the physical address of the device table into the IOMMU hardware */ 309static void iommu_set_device_table(struct amd_iommu *iommu) 310{ 311 u64 entry; 312 313 BUG_ON(iommu->mmio_base == NULL); 314 315 entry = virt_to_phys(amd_iommu_dev_table); 316 entry |= (dev_table_size >> 12) - 1; 317 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 318 &entry, sizeof(entry)); 319} 320 321/* Generic functions to enable/disable certain features of the IOMMU. */ 322static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 323{ 324 u32 ctrl; 325 326 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); 327 ctrl |= (1 << bit); 328 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 329} 330 331static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 332{ 333 u32 ctrl; 334 335 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); 336 ctrl &= ~(1 << bit); 337 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 338} 339 340static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 341{ 342 u32 ctrl; 343 344 ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); 345 ctrl &= ~CTRL_INV_TO_MASK; 346 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 347 writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 348} 349 350/* Function to enable the hardware */ 351static void iommu_enable(struct amd_iommu *iommu) 352{ 353 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 354} 355 356static void iommu_disable(struct amd_iommu *iommu) 357{ 358 /* Disable command buffer */ 359 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 360 361 /* Disable event logging and event interrupts */ 362 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 363 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 364 365 /* Disable IOMMU hardware itself */ 366 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 367} 368 369/* 370 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 371 * the system has one. 372 */ 373static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 374{ 375 if (!request_mem_region(address, end, "amd_iommu")) { 376 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n", 377 address, end); 378 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); 379 return NULL; 380 } 381 382 return (u8 __iomem *)ioremap_nocache(address, end); 383} 384 385static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 386{ 387 if (iommu->mmio_base) 388 iounmap(iommu->mmio_base); 389 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 390} 391 392/**************************************************************************** 393 * 394 * The functions below belong to the first pass of AMD IOMMU ACPI table 395 * parsing. In this pass we try to find out the highest device id this 396 * code has to handle. Upon this information the size of the shared data 397 * structures is determined later. 398 * 399 ****************************************************************************/ 400 401/* 402 * This function calculates the length of a given IVHD entry 403 */ 404static inline int ivhd_entry_length(u8 *ivhd) 405{ 406 return 0x04 << (*ivhd >> 6); 407} 408 409/* 410 * This function reads the last device id the IOMMU has to handle from the PCI 411 * capability header for this IOMMU 412 */ 413static int __init find_last_devid_on_pci(int bus, int dev, int fn, int cap_ptr) 414{ 415 u32 cap; 416 417 cap = read_pci_config(bus, dev, fn, cap_ptr+MMIO_RANGE_OFFSET); 418 update_last_devid(PCI_DEVID(MMIO_GET_BUS(cap), MMIO_GET_LD(cap))); 419 420 return 0; 421} 422 423/* 424 * After reading the highest device id from the IOMMU PCI capability header 425 * this function looks if there is a higher device id defined in the ACPI table 426 */ 427static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 428{ 429 u8 *p = (void *)h, *end = (void *)h; 430 struct ivhd_entry *dev; 431 432 p += sizeof(*h); 433 end += h->length; 434 435 find_last_devid_on_pci(PCI_BUS_NUM(h->devid), 436 PCI_SLOT(h->devid), 437 PCI_FUNC(h->devid), 438 h->cap_ptr); 439 440 while (p < end) { 441 dev = (struct ivhd_entry *)p; 442 switch (dev->type) { 443 case IVHD_DEV_SELECT: 444 case IVHD_DEV_RANGE_END: 445 case IVHD_DEV_ALIAS: 446 case IVHD_DEV_EXT_SELECT: 447 /* all the above subfield types refer to device ids */ 448 update_last_devid(dev->devid); 449 break; 450 default: 451 break; 452 } 453 p += ivhd_entry_length(p); 454 } 455 456 WARN_ON(p != end); 457 458 return 0; 459} 460 461/* 462 * Iterate over all IVHD entries in the ACPI table and find the highest device 463 * id which we need to handle. This is the first of three functions which parse 464 * the ACPI table. So we check the checksum here. 465 */ 466static int __init find_last_devid_acpi(struct acpi_table_header *table) 467{ 468 int i; 469 u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table; 470 struct ivhd_header *h; 471 472 /* 473 * Validate checksum here so we don't need to do it when 474 * we actually parse the table 475 */ 476 for (i = 0; i < table->length; ++i) 477 checksum += p[i]; 478 if (checksum != 0) 479 /* ACPI table corrupt */ 480 return -ENODEV; 481 482 p += IVRS_HEADER_LENGTH; 483 484 end += table->length; 485 while (p < end) { 486 h = (struct ivhd_header *)p; 487 switch (h->type) { 488 case ACPI_IVHD_TYPE: 489 find_last_devid_from_ivhd(h); 490 break; 491 default: 492 break; 493 } 494 p += h->length; 495 } 496 WARN_ON(p != end); 497 498 return 0; 499} 500 501/**************************************************************************** 502 * 503 * The following functions belong to the code path which parses the ACPI table 504 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 505 * data structures, initialize the device/alias/rlookup table and also 506 * basically initialize the hardware. 507 * 508 ****************************************************************************/ 509 510/* 511 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 512 * write commands to that buffer later and the IOMMU will execute them 513 * asynchronously 514 */ 515static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) 516{ 517 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 518 get_order(CMD_BUFFER_SIZE)); 519 520 if (cmd_buf == NULL) 521 return NULL; 522 523 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED; 524 525 return cmd_buf; 526} 527 528/* 529 * This function resets the command buffer if the IOMMU stopped fetching 530 * commands from it. 531 */ 532void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 533{ 534 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 535 536 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 537 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 538 539 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 540} 541 542/* 543 * This function writes the command buffer address to the hardware and 544 * enables it. 545 */ 546static void iommu_enable_command_buffer(struct amd_iommu *iommu) 547{ 548 u64 entry; 549 550 BUG_ON(iommu->cmd_buf == NULL); 551 552 entry = (u64)virt_to_phys(iommu->cmd_buf); 553 entry |= MMIO_CMD_SIZE_512; 554 555 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 556 &entry, sizeof(entry)); 557 558 amd_iommu_reset_cmd_buffer(iommu); 559 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED); 560} 561 562static void __init free_command_buffer(struct amd_iommu *iommu) 563{ 564 free_pages((unsigned long)iommu->cmd_buf, 565 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED))); 566} 567 568/* allocates the memory where the IOMMU will log its events to */ 569static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) 570{ 571 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 572 get_order(EVT_BUFFER_SIZE)); 573 574 if (iommu->evt_buf == NULL) 575 return NULL; 576 577 iommu->evt_buf_size = EVT_BUFFER_SIZE; 578 579 return iommu->evt_buf; 580} 581 582static void iommu_enable_event_buffer(struct amd_iommu *iommu) 583{ 584 u64 entry; 585 586 BUG_ON(iommu->evt_buf == NULL); 587 588 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 589 590 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 591 &entry, sizeof(entry)); 592 593 /* set head and tail to zero manually */ 594 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 595 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 596 597 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 598} 599 600static void __init free_event_buffer(struct amd_iommu *iommu) 601{ 602 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 603} 604 605/* allocates the memory where the IOMMU will log its events to */ 606static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) 607{ 608 iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 609 get_order(PPR_LOG_SIZE)); 610 611 if (iommu->ppr_log == NULL) 612 return NULL; 613 614 return iommu->ppr_log; 615} 616 617static void iommu_enable_ppr_log(struct amd_iommu *iommu) 618{ 619 u64 entry; 620 621 if (iommu->ppr_log == NULL) 622 return; 623 624 entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 625 626 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 627 &entry, sizeof(entry)); 628 629 /* set head and tail to zero manually */ 630 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 631 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 632 633 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); 634 iommu_feature_enable(iommu, CONTROL_PPR_EN); 635} 636 637static void __init free_ppr_log(struct amd_iommu *iommu) 638{ 639 if (iommu->ppr_log == NULL) 640 return; 641 642 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 643} 644 645static void iommu_enable_gt(struct amd_iommu *iommu) 646{ 647 if (!iommu_feature(iommu, FEATURE_GT)) 648 return; 649 650 iommu_feature_enable(iommu, CONTROL_GT_EN); 651} 652 653/* sets a specific bit in the device table entry. */ 654static void set_dev_entry_bit(u16 devid, u8 bit) 655{ 656 int i = (bit >> 6) & 0x03; 657 int _bit = bit & 0x3f; 658 659 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); 660} 661 662static int get_dev_entry_bit(u16 devid, u8 bit) 663{ 664 int i = (bit >> 6) & 0x03; 665 int _bit = bit & 0x3f; 666 667 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 668} 669 670 671void amd_iommu_apply_erratum_63(u16 devid) 672{ 673 int sysmgt; 674 675 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | 676 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); 677 678 if (sysmgt == 0x01) 679 set_dev_entry_bit(devid, DEV_ENTRY_IW); 680} 681 682/* Writes the specific IOMMU for a device into the rlookup table */ 683static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 684{ 685 amd_iommu_rlookup_table[devid] = iommu; 686} 687 688/* 689 * This function takes the device specific flags read from the ACPI 690 * table and sets up the device table entry with that information 691 */ 692static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 693 u16 devid, u32 flags, u32 ext_flags) 694{ 695 if (flags & ACPI_DEVFLAG_INITPASS) 696 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 697 if (flags & ACPI_DEVFLAG_EXTINT) 698 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); 699 if (flags & ACPI_DEVFLAG_NMI) 700 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); 701 if (flags & ACPI_DEVFLAG_SYSMGT1) 702 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); 703 if (flags & ACPI_DEVFLAG_SYSMGT2) 704 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); 705 if (flags & ACPI_DEVFLAG_LINT0) 706 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 707 if (flags & ACPI_DEVFLAG_LINT1) 708 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 709 710 amd_iommu_apply_erratum_63(devid); 711 712 set_iommu_for_device(iommu, devid); 713} 714 715static int __init add_special_device(u8 type, u8 id, u16 devid, bool cmd_line) 716{ 717 struct devid_map *entry; 718 struct list_head *list; 719 720 if (type == IVHD_SPECIAL_IOAPIC) 721 list = &ioapic_map; 722 else if (type == IVHD_SPECIAL_HPET) 723 list = &hpet_map; 724 else 725 return -EINVAL; 726 727 list_for_each_entry(entry, list, list) { 728 if (!(entry->id == id && entry->cmd_line)) 729 continue; 730 731 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", 732 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 733 734 return 0; 735 } 736 737 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 738 if (!entry) 739 return -ENOMEM; 740 741 entry->id = id; 742 entry->devid = devid; 743 entry->cmd_line = cmd_line; 744 745 list_add_tail(&entry->list, list); 746 747 return 0; 748} 749 750static int __init add_early_maps(void) 751{ 752 int i, ret; 753 754 for (i = 0; i < early_ioapic_map_size; ++i) { 755 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 756 early_ioapic_map[i].id, 757 early_ioapic_map[i].devid, 758 early_ioapic_map[i].cmd_line); 759 if (ret) 760 return ret; 761 } 762 763 for (i = 0; i < early_hpet_map_size; ++i) { 764 ret = add_special_device(IVHD_SPECIAL_HPET, 765 early_hpet_map[i].id, 766 early_hpet_map[i].devid, 767 early_hpet_map[i].cmd_line); 768 if (ret) 769 return ret; 770 } 771 772 return 0; 773} 774 775/* 776 * Reads the device exclusion range from ACPI and initializes the IOMMU with 777 * it 778 */ 779static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) 780{ 781 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 782 783 if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) 784 return; 785 786 if (iommu) { 787 /* 788 * We only can configure exclusion ranges per IOMMU, not 789 * per device. But we can enable the exclusion range per 790 * device. This is done here 791 */ 792 set_dev_entry_bit(m->devid, DEV_ENTRY_EX); 793 iommu->exclusion_start = m->range_start; 794 iommu->exclusion_length = m->range_length; 795 } 796} 797 798/* 799 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 800 * initializes the hardware and our data structures with it. 801 */ 802static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 803 struct ivhd_header *h) 804{ 805 u8 *p = (u8 *)h; 806 u8 *end = p, flags = 0; 807 u16 devid = 0, devid_start = 0, devid_to = 0; 808 u32 dev_i, ext_flags = 0; 809 bool alias = false; 810 struct ivhd_entry *e; 811 int ret; 812 813 814 ret = add_early_maps(); 815 if (ret) 816 return ret; 817 818 /* 819 * First save the recommended feature enable bits from ACPI 820 */ 821 iommu->acpi_flags = h->flags; 822 823 /* 824 * Done. Now parse the device entries 825 */ 826 p += sizeof(struct ivhd_header); 827 end += h->length; 828 829 830 while (p < end) { 831 e = (struct ivhd_entry *)p; 832 switch (e->type) { 833 case IVHD_DEV_ALL: 834 835 DUMP_printk(" DEV_ALL\t\t\t first devid: %02x:%02x.%x" 836 " last device %02x:%02x.%x flags: %02x\n", 837 PCI_BUS_NUM(iommu->first_device), 838 PCI_SLOT(iommu->first_device), 839 PCI_FUNC(iommu->first_device), 840 PCI_BUS_NUM(iommu->last_device), 841 PCI_SLOT(iommu->last_device), 842 PCI_FUNC(iommu->last_device), 843 e->flags); 844 845 for (dev_i = iommu->first_device; 846 dev_i <= iommu->last_device; ++dev_i) 847 set_dev_entry_from_acpi(iommu, dev_i, 848 e->flags, 0); 849 break; 850 case IVHD_DEV_SELECT: 851 852 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " 853 "flags: %02x\n", 854 PCI_BUS_NUM(e->devid), 855 PCI_SLOT(e->devid), 856 PCI_FUNC(e->devid), 857 e->flags); 858 859 devid = e->devid; 860 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 861 break; 862 case IVHD_DEV_SELECT_RANGE_START: 863 864 DUMP_printk(" DEV_SELECT_RANGE_START\t " 865 "devid: %02x:%02x.%x flags: %02x\n", 866 PCI_BUS_NUM(e->devid), 867 PCI_SLOT(e->devid), 868 PCI_FUNC(e->devid), 869 e->flags); 870 871 devid_start = e->devid; 872 flags = e->flags; 873 ext_flags = 0; 874 alias = false; 875 break; 876 case IVHD_DEV_ALIAS: 877 878 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " 879 "flags: %02x devid_to: %02x:%02x.%x\n", 880 PCI_BUS_NUM(e->devid), 881 PCI_SLOT(e->devid), 882 PCI_FUNC(e->devid), 883 e->flags, 884 PCI_BUS_NUM(e->ext >> 8), 885 PCI_SLOT(e->ext >> 8), 886 PCI_FUNC(e->ext >> 8)); 887 888 devid = e->devid; 889 devid_to = e->ext >> 8; 890 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 891 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 892 amd_iommu_alias_table[devid] = devid_to; 893 break; 894 case IVHD_DEV_ALIAS_RANGE: 895 896 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 897 "devid: %02x:%02x.%x flags: %02x " 898 "devid_to: %02x:%02x.%x\n", 899 PCI_BUS_NUM(e->devid), 900 PCI_SLOT(e->devid), 901 PCI_FUNC(e->devid), 902 e->flags, 903 PCI_BUS_NUM(e->ext >> 8), 904 PCI_SLOT(e->ext >> 8), 905 PCI_FUNC(e->ext >> 8)); 906 907 devid_start = e->devid; 908 flags = e->flags; 909 devid_to = e->ext >> 8; 910 ext_flags = 0; 911 alias = true; 912 break; 913 case IVHD_DEV_EXT_SELECT: 914 915 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " 916 "flags: %02x ext: %08x\n", 917 PCI_BUS_NUM(e->devid), 918 PCI_SLOT(e->devid), 919 PCI_FUNC(e->devid), 920 e->flags, e->ext); 921 922 devid = e->devid; 923 set_dev_entry_from_acpi(iommu, devid, e->flags, 924 e->ext); 925 break; 926 case IVHD_DEV_EXT_SELECT_RANGE: 927 928 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 929 "%02x:%02x.%x flags: %02x ext: %08x\n", 930 PCI_BUS_NUM(e->devid), 931 PCI_SLOT(e->devid), 932 PCI_FUNC(e->devid), 933 e->flags, e->ext); 934 935 devid_start = e->devid; 936 flags = e->flags; 937 ext_flags = e->ext; 938 alias = false; 939 break; 940 case IVHD_DEV_RANGE_END: 941 942 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", 943 PCI_BUS_NUM(e->devid), 944 PCI_SLOT(e->devid), 945 PCI_FUNC(e->devid)); 946 947 devid = e->devid; 948 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 949 if (alias) { 950 amd_iommu_alias_table[dev_i] = devid_to; 951 set_dev_entry_from_acpi(iommu, 952 devid_to, flags, ext_flags); 953 } 954 set_dev_entry_from_acpi(iommu, dev_i, 955 flags, ext_flags); 956 } 957 break; 958 case IVHD_DEV_SPECIAL: { 959 u8 handle, type; 960 const char *var; 961 u16 devid; 962 int ret; 963 964 handle = e->ext & 0xff; 965 devid = (e->ext >> 8) & 0xffff; 966 type = (e->ext >> 24) & 0xff; 967 968 if (type == IVHD_SPECIAL_IOAPIC) 969 var = "IOAPIC"; 970 else if (type == IVHD_SPECIAL_HPET) 971 var = "HPET"; 972 else 973 var = "UNKNOWN"; 974 975 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", 976 var, (int)handle, 977 PCI_BUS_NUM(devid), 978 PCI_SLOT(devid), 979 PCI_FUNC(devid)); 980 981 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 982 ret = add_special_device(type, handle, devid, false); 983 if (ret) 984 return ret; 985 break; 986 } 987 default: 988 break; 989 } 990 991 p += ivhd_entry_length(p); 992 } 993 994 return 0; 995} 996 997/* Initializes the device->iommu mapping for the driver */ 998static int __init init_iommu_devices(struct amd_iommu *iommu) 999{ 1000 u32 i; 1001 1002 for (i = iommu->first_device; i <= iommu->last_device; ++i) 1003 set_iommu_for_device(iommu, i); 1004 1005 return 0; 1006} 1007 1008static void __init free_iommu_one(struct amd_iommu *iommu) 1009{ 1010 free_command_buffer(iommu); 1011 free_event_buffer(iommu); 1012 free_ppr_log(iommu); 1013 iommu_unmap_mmio_space(iommu); 1014} 1015 1016static void __init free_iommu_all(void) 1017{ 1018 struct amd_iommu *iommu, *next; 1019 1020 for_each_iommu_safe(iommu, next) { 1021 list_del(&iommu->list); 1022 free_iommu_one(iommu); 1023 kfree(iommu); 1024 } 1025} 1026 1027/* 1028 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1029 * Workaround: 1030 * BIOS should disable L2B micellaneous clock gating by setting 1031 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1032 */ 1033static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1034{ 1035 u32 value; 1036 1037 if ((boot_cpu_data.x86 != 0x15) || 1038 (boot_cpu_data.x86_model < 0x10) || 1039 (boot_cpu_data.x86_model > 0x1f)) 1040 return; 1041 1042 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1043 pci_read_config_dword(iommu->dev, 0xf4, &value); 1044 1045 if (value & BIT(2)) 1046 return; 1047 1048 /* Select NB indirect register 0x90 and enable writing */ 1049 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1050 1051 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1052 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", 1053 dev_name(&iommu->dev->dev)); 1054 1055 /* Clear the enable writing bit */ 1056 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1057} 1058 1059/* 1060 * This function clues the initialization function for one IOMMU 1061 * together and also allocates the command buffer and programs the 1062 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1063 */ 1064static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 1065{ 1066 int ret; 1067 1068 spin_lock_init(&iommu->lock); 1069 1070 /* Add IOMMU to internal data structures */ 1071 list_add_tail(&iommu->list, &amd_iommu_list); 1072 iommu->index = amd_iommus_present++; 1073 1074 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1075 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); 1076 return -ENOSYS; 1077 } 1078 1079 /* Index is fine - add IOMMU to the array */ 1080 amd_iommus[iommu->index] = iommu; 1081 1082 /* 1083 * Copy data from ACPI table entry to the iommu struct 1084 */ 1085 iommu->devid = h->devid; 1086 iommu->cap_ptr = h->cap_ptr; 1087 iommu->pci_seg = h->pci_seg; 1088 iommu->mmio_phys = h->mmio_phys; 1089 1090 /* Check if IVHD EFR contains proper max banks/counters */ 1091 if ((h->efr != 0) && 1092 ((h->efr & (0xF << 13)) != 0) && 1093 ((h->efr & (0x3F << 17)) != 0)) { 1094 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1095 } else { 1096 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1097 } 1098 1099 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1100 iommu->mmio_phys_end); 1101 if (!iommu->mmio_base) 1102 return -ENOMEM; 1103 1104 iommu->cmd_buf = alloc_command_buffer(iommu); 1105 if (!iommu->cmd_buf) 1106 return -ENOMEM; 1107 1108 iommu->evt_buf = alloc_event_buffer(iommu); 1109 if (!iommu->evt_buf) 1110 return -ENOMEM; 1111 1112 iommu->int_enabled = false; 1113 1114 ret = init_iommu_from_acpi(iommu, h); 1115 if (ret) 1116 return ret; 1117 1118 /* 1119 * Make sure IOMMU is not considered to translate itself. The IVRS 1120 * table tells us so, but this is a lie! 1121 */ 1122 amd_iommu_rlookup_table[iommu->devid] = NULL; 1123 1124 init_iommu_devices(iommu); 1125 1126 return 0; 1127} 1128 1129/* 1130 * Iterates over all IOMMU entries in the ACPI table, allocates the 1131 * IOMMU structure and initializes it with init_iommu_one() 1132 */ 1133static int __init init_iommu_all(struct acpi_table_header *table) 1134{ 1135 u8 *p = (u8 *)table, *end = (u8 *)table; 1136 struct ivhd_header *h; 1137 struct amd_iommu *iommu; 1138 int ret; 1139 1140 end += table->length; 1141 p += IVRS_HEADER_LENGTH; 1142 1143 while (p < end) { 1144 h = (struct ivhd_header *)p; 1145 switch (*p) { 1146 case ACPI_IVHD_TYPE: 1147 1148 DUMP_printk("device: %02x:%02x.%01x cap: %04x " 1149 "seg: %d flags: %01x info %04x\n", 1150 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), 1151 PCI_FUNC(h->devid), h->cap_ptr, 1152 h->pci_seg, h->flags, h->info); 1153 DUMP_printk(" mmio-addr: %016llx\n", 1154 h->mmio_phys); 1155 1156 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1157 if (iommu == NULL) 1158 return -ENOMEM; 1159 1160 ret = init_iommu_one(iommu, h); 1161 if (ret) 1162 return ret; 1163 break; 1164 default: 1165 break; 1166 } 1167 p += h->length; 1168 1169 } 1170 WARN_ON(p != end); 1171 1172 return 0; 1173} 1174 1175 1176static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1177{ 1178 u64 val = 0xabcd, val2 = 0; 1179 1180 if (!iommu_feature(iommu, FEATURE_PC)) 1181 return; 1182 1183 amd_iommu_pc_present = true; 1184 1185 /* Check if the performance counters can be written to */ 1186 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || 1187 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || 1188 (val != val2)) { 1189 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); 1190 amd_iommu_pc_present = false; 1191 return; 1192 } 1193 1194 pr_info("AMD-Vi: IOMMU performance counters supported\n"); 1195 1196 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1197 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1198 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1199} 1200 1201 1202static int iommu_init_pci(struct amd_iommu *iommu) 1203{ 1204 int cap_ptr = iommu->cap_ptr; 1205 u32 range, misc, low, high; 1206 1207 iommu->dev = pci_get_bus_and_slot(PCI_BUS_NUM(iommu->devid), 1208 iommu->devid & 0xff); 1209 if (!iommu->dev) 1210 return -ENODEV; 1211 1212 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 1213 &iommu->cap); 1214 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, 1215 &range); 1216 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, 1217 &misc); 1218 1219 iommu->first_device = PCI_DEVID(MMIO_GET_BUS(range), 1220 MMIO_GET_FD(range)); 1221 iommu->last_device = PCI_DEVID(MMIO_GET_BUS(range), 1222 MMIO_GET_LD(range)); 1223 1224 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 1225 amd_iommu_iotlb_sup = false; 1226 1227 /* read extended feature bits */ 1228 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); 1229 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); 1230 1231 iommu->features = ((u64)high << 32) | low; 1232 1233 if (iommu_feature(iommu, FEATURE_GT)) { 1234 int glxval; 1235 u32 pasids; 1236 u64 shift; 1237 1238 shift = iommu->features & FEATURE_PASID_MASK; 1239 shift >>= FEATURE_PASID_SHIFT; 1240 pasids = (1 << shift); 1241 1242 amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); 1243 1244 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1245 glxval >>= FEATURE_GLXVAL_SHIFT; 1246 1247 if (amd_iommu_max_glx_val == -1) 1248 amd_iommu_max_glx_val = glxval; 1249 else 1250 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 1251 } 1252 1253 if (iommu_feature(iommu, FEATURE_GT) && 1254 iommu_feature(iommu, FEATURE_PPR)) { 1255 iommu->is_iommu_v2 = true; 1256 amd_iommu_v2_present = true; 1257 } 1258 1259 if (iommu_feature(iommu, FEATURE_PPR)) { 1260 iommu->ppr_log = alloc_ppr_log(iommu); 1261 if (!iommu->ppr_log) 1262 return -ENOMEM; 1263 } 1264 1265 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1266 amd_iommu_np_cache = true; 1267 1268 init_iommu_perf_ctr(iommu); 1269 1270 if (is_rd890_iommu(iommu->dev)) { 1271 int i, j; 1272 1273 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, 1274 PCI_DEVFN(0, 0)); 1275 1276 /* 1277 * Some rd890 systems may not be fully reconfigured by the 1278 * BIOS, so it's necessary for us to store this information so 1279 * it can be reprogrammed on resume 1280 */ 1281 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 1282 &iommu->stored_addr_lo); 1283 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 1284 &iommu->stored_addr_hi); 1285 1286 /* Low bit locks writes to configuration space */ 1287 iommu->stored_addr_lo &= ~1; 1288 1289 for (i = 0; i < 6; i++) 1290 for (j = 0; j < 0x12; j++) 1291 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 1292 1293 for (i = 0; i < 0x83; i++) 1294 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1295 } 1296 1297 amd_iommu_erratum_746_workaround(iommu); 1298 1299 return pci_enable_device(iommu->dev); 1300} 1301 1302static void print_iommu_info(void) 1303{ 1304 static const char * const feat_str[] = { 1305 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 1306 "IA", "GA", "HE", "PC" 1307 }; 1308 struct amd_iommu *iommu; 1309 1310 for_each_iommu(iommu) { 1311 int i; 1312 1313 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n", 1314 dev_name(&iommu->dev->dev), iommu->cap_ptr); 1315 1316 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1317 pr_info("AMD-Vi: Extended features: "); 1318 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 1319 if (iommu_feature(iommu, (1ULL << i))) 1320 pr_cont(" %s", feat_str[i]); 1321 } 1322 pr_cont("\n"); 1323 } 1324 } 1325 if (irq_remapping_enabled) 1326 pr_info("AMD-Vi: Interrupt remapping enabled\n"); 1327} 1328 1329static int __init amd_iommu_init_pci(void) 1330{ 1331 struct amd_iommu *iommu; 1332 int ret = 0; 1333 1334 for_each_iommu(iommu) { 1335 ret = iommu_init_pci(iommu); 1336 if (ret) 1337 break; 1338 } 1339 1340 ret = amd_iommu_init_devices(); 1341 1342 print_iommu_info(); 1343 1344 return ret; 1345} 1346 1347/**************************************************************************** 1348 * 1349 * The following functions initialize the MSI interrupts for all IOMMUs 1350 * in the system. It's a bit challenging because there could be multiple 1351 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1352 * pci_dev. 1353 * 1354 ****************************************************************************/ 1355 1356static int iommu_setup_msi(struct amd_iommu *iommu) 1357{ 1358 int r; 1359 1360 r = pci_enable_msi(iommu->dev); 1361 if (r) 1362 return r; 1363 1364 r = request_threaded_irq(iommu->dev->irq, 1365 amd_iommu_int_handler, 1366 amd_iommu_int_thread, 1367 0, "AMD-Vi", 1368 iommu); 1369 1370 if (r) { 1371 pci_disable_msi(iommu->dev); 1372 return r; 1373 } 1374 1375 iommu->int_enabled = true; 1376 1377 return 0; 1378} 1379 1380static int iommu_init_msi(struct amd_iommu *iommu) 1381{ 1382 int ret; 1383 1384 if (iommu->int_enabled) 1385 goto enable_faults; 1386 1387 if (pci_find_capability(iommu->dev, PCI_CAP_ID_MSI)) 1388 ret = iommu_setup_msi(iommu); 1389 else 1390 ret = -ENODEV; 1391 1392 if (ret) 1393 return ret; 1394 1395enable_faults: 1396 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 1397 1398 if (iommu->ppr_log != NULL) 1399 iommu_feature_enable(iommu, CONTROL_PPFINT_EN); 1400 1401 return 0; 1402} 1403 1404/**************************************************************************** 1405 * 1406 * The next functions belong to the third pass of parsing the ACPI 1407 * table. In this last pass the memory mapping requirements are 1408 * gathered (like exclusion and unity mapping ranges). 1409 * 1410 ****************************************************************************/ 1411 1412static void __init free_unity_maps(void) 1413{ 1414 struct unity_map_entry *entry, *next; 1415 1416 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { 1417 list_del(&entry->list); 1418 kfree(entry); 1419 } 1420} 1421 1422/* called when we find an exclusion range definition in ACPI */ 1423static int __init init_exclusion_range(struct ivmd_header *m) 1424{ 1425 int i; 1426 1427 switch (m->type) { 1428 case ACPI_IVMD_TYPE: 1429 set_device_exclusion_range(m->devid, m); 1430 break; 1431 case ACPI_IVMD_TYPE_ALL: 1432 for (i = 0; i <= amd_iommu_last_bdf; ++i) 1433 set_device_exclusion_range(i, m); 1434 break; 1435 case ACPI_IVMD_TYPE_RANGE: 1436 for (i = m->devid; i <= m->aux; ++i) 1437 set_device_exclusion_range(i, m); 1438 break; 1439 default: 1440 break; 1441 } 1442 1443 return 0; 1444} 1445 1446/* called for unity map ACPI definition */ 1447static int __init init_unity_map_range(struct ivmd_header *m) 1448{ 1449 struct unity_map_entry *e = NULL; 1450 char *s; 1451 1452 e = kzalloc(sizeof(*e), GFP_KERNEL); 1453 if (e == NULL) 1454 return -ENOMEM; 1455 1456 switch (m->type) { 1457 default: 1458 kfree(e); 1459 return 0; 1460 case ACPI_IVMD_TYPE: 1461 s = "IVMD_TYPEi\t\t\t"; 1462 e->devid_start = e->devid_end = m->devid; 1463 break; 1464 case ACPI_IVMD_TYPE_ALL: 1465 s = "IVMD_TYPE_ALL\t\t"; 1466 e->devid_start = 0; 1467 e->devid_end = amd_iommu_last_bdf; 1468 break; 1469 case ACPI_IVMD_TYPE_RANGE: 1470 s = "IVMD_TYPE_RANGE\t\t"; 1471 e->devid_start = m->devid; 1472 e->devid_end = m->aux; 1473 break; 1474 } 1475 e->address_start = PAGE_ALIGN(m->range_start); 1476 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 1477 e->prot = m->flags >> 1; 1478 1479 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" 1480 " range_start: %016llx range_end: %016llx flags: %x\n", s, 1481 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 1482 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), 1483 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 1484 e->address_start, e->address_end, m->flags); 1485 1486 list_add_tail(&e->list, &amd_iommu_unity_map); 1487 1488 return 0; 1489} 1490 1491/* iterates over all memory definitions we find in the ACPI table */ 1492static int __init init_memory_definitions(struct acpi_table_header *table) 1493{ 1494 u8 *p = (u8 *)table, *end = (u8 *)table; 1495 struct ivmd_header *m; 1496 1497 end += table->length; 1498 p += IVRS_HEADER_LENGTH; 1499 1500 while (p < end) { 1501 m = (struct ivmd_header *)p; 1502 if (m->flags & IVMD_FLAG_EXCL_RANGE) 1503 init_exclusion_range(m); 1504 else if (m->flags & IVMD_FLAG_UNITY_MAP) 1505 init_unity_map_range(m); 1506 1507 p += m->length; 1508 } 1509 1510 return 0; 1511} 1512 1513/* 1514 * Init the device table to not allow DMA access for devices and 1515 * suppress all page faults 1516 */ 1517static void init_device_table_dma(void) 1518{ 1519 u32 devid; 1520 1521 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1522 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 1523 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 1524 } 1525} 1526 1527static void __init uninit_device_table_dma(void) 1528{ 1529 u32 devid; 1530 1531 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1532 amd_iommu_dev_table[devid].data[0] = 0ULL; 1533 amd_iommu_dev_table[devid].data[1] = 0ULL; 1534 } 1535} 1536 1537static void init_device_table(void) 1538{ 1539 u32 devid; 1540 1541 if (!amd_iommu_irq_remap) 1542 return; 1543 1544 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 1545 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); 1546} 1547 1548static void iommu_init_flags(struct amd_iommu *iommu) 1549{ 1550 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 1551 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 1552 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 1553 1554 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 1555 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 1556 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 1557 1558 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 1559 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 1560 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 1561 1562 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 1563 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 1564 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 1565 1566 /* 1567 * make IOMMU memory accesses cache coherent 1568 */ 1569 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 1570 1571 /* Set IOTLB invalidation timeout to 1s */ 1572 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 1573} 1574 1575static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 1576{ 1577 int i, j; 1578 u32 ioc_feature_control; 1579 struct pci_dev *pdev = iommu->root_pdev; 1580 1581 /* RD890 BIOSes may not have completely reconfigured the iommu */ 1582 if (!is_rd890_iommu(iommu->dev) || !pdev) 1583 return; 1584 1585 /* 1586 * First, we need to ensure that the iommu is enabled. This is 1587 * controlled by a register in the northbridge 1588 */ 1589 1590 /* Select Northbridge indirect register 0x75 and enable writing */ 1591 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 1592 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 1593 1594 /* Enable the iommu */ 1595 if (!(ioc_feature_control & 0x1)) 1596 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 1597 1598 /* Restore the iommu BAR */ 1599 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 1600 iommu->stored_addr_lo); 1601 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 1602 iommu->stored_addr_hi); 1603 1604 /* Restore the l1 indirect regs for each of the 6 l1s */ 1605 for (i = 0; i < 6; i++) 1606 for (j = 0; j < 0x12; j++) 1607 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 1608 1609 /* Restore the l2 indirect regs */ 1610 for (i = 0; i < 0x83; i++) 1611 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 1612 1613 /* Lock PCI setup registers */ 1614 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 1615 iommu->stored_addr_lo | 1); 1616} 1617 1618/* 1619 * This function finally enables all IOMMUs found in the system after 1620 * they have been initialized 1621 */ 1622static void early_enable_iommus(void) 1623{ 1624 struct amd_iommu *iommu; 1625 1626 for_each_iommu(iommu) { 1627 iommu_disable(iommu); 1628 iommu_init_flags(iommu); 1629 iommu_set_device_table(iommu); 1630 iommu_enable_command_buffer(iommu); 1631 iommu_enable_event_buffer(iommu); 1632 iommu_set_exclusion_range(iommu); 1633 iommu_enable(iommu); 1634 iommu_flush_all_caches(iommu); 1635 } 1636} 1637 1638static void enable_iommus_v2(void) 1639{ 1640 struct amd_iommu *iommu; 1641 1642 for_each_iommu(iommu) { 1643 iommu_enable_ppr_log(iommu); 1644 iommu_enable_gt(iommu); 1645 } 1646} 1647 1648static void enable_iommus(void) 1649{ 1650 early_enable_iommus(); 1651 1652 enable_iommus_v2(); 1653} 1654 1655static void disable_iommus(void) 1656{ 1657 struct amd_iommu *iommu; 1658 1659 for_each_iommu(iommu) 1660 iommu_disable(iommu); 1661} 1662 1663/* 1664 * Suspend/Resume support 1665 * disable suspend until real resume implemented 1666 */ 1667 1668static void amd_iommu_resume(void) 1669{ 1670 struct amd_iommu *iommu; 1671 1672 for_each_iommu(iommu) 1673 iommu_apply_resume_quirks(iommu); 1674 1675 /* re-load the hardware */ 1676 enable_iommus(); 1677 1678 amd_iommu_enable_interrupts(); 1679} 1680 1681static int amd_iommu_suspend(void) 1682{ 1683 /* disable IOMMUs to go out of the way for BIOS */ 1684 disable_iommus(); 1685 1686 return 0; 1687} 1688 1689static struct syscore_ops amd_iommu_syscore_ops = { 1690 .suspend = amd_iommu_suspend, 1691 .resume = amd_iommu_resume, 1692}; 1693 1694static void __init free_on_init_error(void) 1695{ 1696 free_pages((unsigned long)irq_lookup_table, 1697 get_order(rlookup_table_size)); 1698 1699 if (amd_iommu_irq_cache) { 1700 kmem_cache_destroy(amd_iommu_irq_cache); 1701 amd_iommu_irq_cache = NULL; 1702 1703 } 1704 1705 free_pages((unsigned long)amd_iommu_rlookup_table, 1706 get_order(rlookup_table_size)); 1707 1708 free_pages((unsigned long)amd_iommu_alias_table, 1709 get_order(alias_table_size)); 1710 1711 free_pages((unsigned long)amd_iommu_dev_table, 1712 get_order(dev_table_size)); 1713 1714 free_iommu_all(); 1715 1716#ifdef CONFIG_GART_IOMMU 1717 /* 1718 * We failed to initialize the AMD IOMMU - try fallback to GART 1719 * if possible. 1720 */ 1721 gart_iommu_init(); 1722 1723#endif 1724} 1725 1726/* SB IOAPIC is always on this device in AMD systems */ 1727#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 1728 1729static bool __init check_ioapic_information(void) 1730{ 1731 const char *fw_bug = FW_BUG; 1732 bool ret, has_sb_ioapic; 1733 int idx; 1734 1735 has_sb_ioapic = false; 1736 ret = false; 1737 1738 /* 1739 * If we have map overrides on the kernel command line the 1740 * messages in this function might not describe firmware bugs 1741 * anymore - so be careful 1742 */ 1743 if (cmdline_maps) 1744 fw_bug = ""; 1745 1746 for (idx = 0; idx < nr_ioapics; idx++) { 1747 int devid, id = mpc_ioapic_id(idx); 1748 1749 devid = get_ioapic_devid(id); 1750 if (devid < 0) { 1751 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", 1752 fw_bug, id); 1753 ret = false; 1754 } else if (devid == IOAPIC_SB_DEVID) { 1755 has_sb_ioapic = true; 1756 ret = true; 1757 } 1758 } 1759 1760 if (!has_sb_ioapic) { 1761 /* 1762 * We expect the SB IOAPIC to be listed in the IVRS 1763 * table. The system timer is connected to the SB IOAPIC 1764 * and if we don't have it in the list the system will 1765 * panic at boot time. This situation usually happens 1766 * when the BIOS is buggy and provides us the wrong 1767 * device id for the IOAPIC in the system. 1768 */ 1769 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); 1770 } 1771 1772 if (!ret) 1773 pr_err("AMD-Vi: Disabling interrupt remapping\n"); 1774 1775 return ret; 1776} 1777 1778static void __init free_dma_resources(void) 1779{ 1780 amd_iommu_uninit_devices(); 1781 1782 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 1783 get_order(MAX_DOMAIN_ID/8)); 1784 1785 free_unity_maps(); 1786} 1787 1788/* 1789 * This is the hardware init function for AMD IOMMU in the system. 1790 * This function is called either from amd_iommu_init or from the interrupt 1791 * remapping setup code. 1792 * 1793 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 1794 * three times: 1795 * 1796 * 1 pass) Find the highest PCI device id the driver has to handle. 1797 * Upon this information the size of the data structures is 1798 * determined that needs to be allocated. 1799 * 1800 * 2 pass) Initialize the data structures just allocated with the 1801 * information in the ACPI table about available AMD IOMMUs 1802 * in the system. It also maps the PCI devices in the 1803 * system to specific IOMMUs 1804 * 1805 * 3 pass) After the basic data structures are allocated and 1806 * initialized we update them with information about memory 1807 * remapping requirements parsed out of the ACPI table in 1808 * this last pass. 1809 * 1810 * After everything is set up the IOMMUs are enabled and the necessary 1811 * hotplug and suspend notifiers are registered. 1812 */ 1813static int __init early_amd_iommu_init(void) 1814{ 1815 struct acpi_table_header *ivrs_base; 1816 acpi_size ivrs_size; 1817 acpi_status status; 1818 int i, ret = 0; 1819 1820 if (!amd_iommu_detected) 1821 return -ENODEV; 1822 1823 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); 1824 if (status == AE_NOT_FOUND) 1825 return -ENODEV; 1826 else if (ACPI_FAILURE(status)) { 1827 const char *err = acpi_format_exception(status); 1828 pr_err("AMD-Vi: IVRS table error: %s\n", err); 1829 return -EINVAL; 1830 } 1831 1832 /* 1833 * First parse ACPI tables to find the largest Bus/Dev/Func 1834 * we need to handle. Upon this information the shared data 1835 * structures for the IOMMUs in the system will be allocated 1836 */ 1837 ret = find_last_devid_acpi(ivrs_base); 1838 if (ret) 1839 goto out; 1840 1841 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1842 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1843 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 1844 1845 /* Device table - directly used by all IOMMUs */ 1846 ret = -ENOMEM; 1847 amd_iommu_dev_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1848 get_order(dev_table_size)); 1849 if (amd_iommu_dev_table == NULL) 1850 goto out; 1851 1852 /* 1853 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the 1854 * IOMMU see for that device 1855 */ 1856 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 1857 get_order(alias_table_size)); 1858 if (amd_iommu_alias_table == NULL) 1859 goto out; 1860 1861 /* IOMMU rlookup table - find the IOMMU for a specific device */ 1862 amd_iommu_rlookup_table = (void *)__get_free_pages( 1863 GFP_KERNEL | __GFP_ZERO, 1864 get_order(rlookup_table_size)); 1865 if (amd_iommu_rlookup_table == NULL) 1866 goto out; 1867 1868 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 1869 GFP_KERNEL | __GFP_ZERO, 1870 get_order(MAX_DOMAIN_ID/8)); 1871 if (amd_iommu_pd_alloc_bitmap == NULL) 1872 goto out; 1873 1874 /* 1875 * let all alias entries point to itself 1876 */ 1877 for (i = 0; i <= amd_iommu_last_bdf; ++i) 1878 amd_iommu_alias_table[i] = i; 1879 1880 /* 1881 * never allocate domain 0 because its used as the non-allocated and 1882 * error value placeholder 1883 */ 1884 amd_iommu_pd_alloc_bitmap[0] = 1; 1885 1886 spin_lock_init(&amd_iommu_pd_lock); 1887 1888 /* 1889 * now the data structures are allocated and basically initialized 1890 * start the real acpi table scan 1891 */ 1892 ret = init_iommu_all(ivrs_base); 1893 if (ret) 1894 goto out; 1895 1896 if (amd_iommu_irq_remap) 1897 amd_iommu_irq_remap = check_ioapic_information(); 1898 1899 if (amd_iommu_irq_remap) { 1900 /* 1901 * Interrupt remapping enabled, create kmem_cache for the 1902 * remapping tables. 1903 */ 1904 ret = -ENOMEM; 1905 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 1906 MAX_IRQS_PER_TABLE * sizeof(u32), 1907 IRQ_TABLE_ALIGNMENT, 1908 0, NULL); 1909 if (!amd_iommu_irq_cache) 1910 goto out; 1911 1912 irq_lookup_table = (void *)__get_free_pages( 1913 GFP_KERNEL | __GFP_ZERO, 1914 get_order(rlookup_table_size)); 1915 if (!irq_lookup_table) 1916 goto out; 1917 } 1918 1919 ret = init_memory_definitions(ivrs_base); 1920 if (ret) 1921 goto out; 1922 1923 /* init the device table */ 1924 init_device_table(); 1925 1926out: 1927 /* Don't leak any ACPI memory */ 1928 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); 1929 ivrs_base = NULL; 1930 1931 return ret; 1932} 1933 1934static int amd_iommu_enable_interrupts(void) 1935{ 1936 struct amd_iommu *iommu; 1937 int ret = 0; 1938 1939 for_each_iommu(iommu) { 1940 ret = iommu_init_msi(iommu); 1941 if (ret) 1942 goto out; 1943 } 1944 1945out: 1946 return ret; 1947} 1948 1949static bool detect_ivrs(void) 1950{ 1951 struct acpi_table_header *ivrs_base; 1952 acpi_size ivrs_size; 1953 acpi_status status; 1954 1955 status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size); 1956 if (status == AE_NOT_FOUND) 1957 return false; 1958 else if (ACPI_FAILURE(status)) { 1959 const char *err = acpi_format_exception(status); 1960 pr_err("AMD-Vi: IVRS table error: %s\n", err); 1961 return false; 1962 } 1963 1964 early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); 1965 1966 /* Make sure ACS will be enabled during PCI probe */ 1967 pci_request_acs(); 1968 1969 if (!disable_irq_remap) 1970 amd_iommu_irq_remap = true; 1971 1972 return true; 1973} 1974 1975static int amd_iommu_init_dma(void) 1976{ 1977 struct amd_iommu *iommu; 1978 int ret; 1979 1980 if (iommu_pass_through) 1981 ret = amd_iommu_init_passthrough(); 1982 else 1983 ret = amd_iommu_init_dma_ops(); 1984 1985 if (ret) 1986 return ret; 1987 1988 init_device_table_dma(); 1989 1990 for_each_iommu(iommu) 1991 iommu_flush_all_caches(iommu); 1992 1993 amd_iommu_init_api(); 1994 1995 amd_iommu_init_notifier(); 1996 1997 return 0; 1998} 1999 2000/**************************************************************************** 2001 * 2002 * AMD IOMMU Initialization State Machine 2003 * 2004 ****************************************************************************/ 2005 2006static int __init state_next(void) 2007{ 2008 int ret = 0; 2009 2010 switch (init_state) { 2011 case IOMMU_START_STATE: 2012 if (!detect_ivrs()) { 2013 init_state = IOMMU_NOT_FOUND; 2014 ret = -ENODEV; 2015 } else { 2016 init_state = IOMMU_IVRS_DETECTED; 2017 } 2018 break; 2019 case IOMMU_IVRS_DETECTED: 2020 ret = early_amd_iommu_init(); 2021 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2022 break; 2023 case IOMMU_ACPI_FINISHED: 2024 early_enable_iommus(); 2025 register_syscore_ops(&amd_iommu_syscore_ops); 2026 x86_platform.iommu_shutdown = disable_iommus; 2027 init_state = IOMMU_ENABLED; 2028 break; 2029 case IOMMU_ENABLED: 2030 ret = amd_iommu_init_pci(); 2031 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2032 enable_iommus_v2(); 2033 break; 2034 case IOMMU_PCI_INIT: 2035 ret = amd_iommu_enable_interrupts(); 2036 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2037 break; 2038 case IOMMU_INTERRUPTS_EN: 2039 ret = amd_iommu_init_dma(); 2040 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2041 break; 2042 case IOMMU_DMA_OPS: 2043 init_state = IOMMU_INITIALIZED; 2044 break; 2045 case IOMMU_INITIALIZED: 2046 /* Nothing to do */ 2047 break; 2048 case IOMMU_NOT_FOUND: 2049 case IOMMU_INIT_ERROR: 2050 /* Error states => do nothing */ 2051 ret = -EINVAL; 2052 break; 2053 default: 2054 /* Unknown state */ 2055 BUG(); 2056 } 2057 2058 return ret; 2059} 2060 2061static int __init iommu_go_to_state(enum iommu_init_state state) 2062{ 2063 int ret = 0; 2064 2065 while (init_state != state) { 2066 ret = state_next(); 2067 if (init_state == IOMMU_NOT_FOUND || 2068 init_state == IOMMU_INIT_ERROR) 2069 break; 2070 } 2071 2072 return ret; 2073} 2074 2075#ifdef CONFIG_IRQ_REMAP 2076int __init amd_iommu_prepare(void) 2077{ 2078 return iommu_go_to_state(IOMMU_ACPI_FINISHED); 2079} 2080 2081int __init amd_iommu_supported(void) 2082{ 2083 return amd_iommu_irq_remap ? 1 : 0; 2084} 2085 2086int __init amd_iommu_enable(void) 2087{ 2088 int ret; 2089 2090 ret = iommu_go_to_state(IOMMU_ENABLED); 2091 if (ret) 2092 return ret; 2093 2094 irq_remapping_enabled = 1; 2095 2096 return 0; 2097} 2098 2099void amd_iommu_disable(void) 2100{ 2101 amd_iommu_suspend(); 2102} 2103 2104int amd_iommu_reenable(int mode) 2105{ 2106 amd_iommu_resume(); 2107 2108 return 0; 2109} 2110 2111int __init amd_iommu_enable_faulting(void) 2112{ 2113 /* We enable MSI later when PCI is initialized */ 2114 return 0; 2115} 2116#endif 2117 2118/* 2119 * This is the core init function for AMD IOMMU hardware in the system. 2120 * This function is called from the generic x86 DMA layer initialization 2121 * code. 2122 */ 2123static int __init amd_iommu_init(void) 2124{ 2125 int ret; 2126 2127 ret = iommu_go_to_state(IOMMU_INITIALIZED); 2128 if (ret) { 2129 free_dma_resources(); 2130 if (!irq_remapping_enabled) { 2131 disable_iommus(); 2132 free_on_init_error(); 2133 } else { 2134 struct amd_iommu *iommu; 2135 2136 uninit_device_table_dma(); 2137 for_each_iommu(iommu) 2138 iommu_flush_all_caches(iommu); 2139 } 2140 } 2141 2142 return ret; 2143} 2144 2145/**************************************************************************** 2146 * 2147 * Early detect code. This code runs at IOMMU detection time in the DMA 2148 * layer. It just looks if there is an IVRS ACPI table to detect AMD 2149 * IOMMUs 2150 * 2151 ****************************************************************************/ 2152int __init amd_iommu_detect(void) 2153{ 2154 int ret; 2155 2156 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 2157 return -ENODEV; 2158 2159 if (amd_iommu_disabled) 2160 return -ENODEV; 2161 2162 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 2163 if (ret) 2164 return ret; 2165 2166 amd_iommu_detected = true; 2167 iommu_detected = 1; 2168 x86_init.iommu.iommu_init = amd_iommu_init; 2169 2170 return 0; 2171} 2172 2173/**************************************************************************** 2174 * 2175 * Parsing functions for the AMD IOMMU specific kernel command line 2176 * options. 2177 * 2178 ****************************************************************************/ 2179 2180static int __init parse_amd_iommu_dump(char *str) 2181{ 2182 amd_iommu_dump = true; 2183 2184 return 1; 2185} 2186 2187static int __init parse_amd_iommu_options(char *str) 2188{ 2189 for (; *str; ++str) { 2190 if (strncmp(str, "fullflush", 9) == 0) 2191 amd_iommu_unmap_flush = true; 2192 if (strncmp(str, "off", 3) == 0) 2193 amd_iommu_disabled = true; 2194 if (strncmp(str, "force_isolation", 15) == 0) 2195 amd_iommu_force_isolation = true; 2196 } 2197 2198 return 1; 2199} 2200 2201static int __init parse_ivrs_ioapic(char *str) 2202{ 2203 unsigned int bus, dev, fn; 2204 int ret, id, i; 2205 u16 devid; 2206 2207 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2208 2209 if (ret != 4) { 2210 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); 2211 return 1; 2212 } 2213 2214 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 2215 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 2216 str); 2217 return 1; 2218 } 2219 2220 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 2221 2222 cmdline_maps = true; 2223 i = early_ioapic_map_size++; 2224 early_ioapic_map[i].id = id; 2225 early_ioapic_map[i].devid = devid; 2226 early_ioapic_map[i].cmd_line = true; 2227 2228 return 1; 2229} 2230 2231static int __init parse_ivrs_hpet(char *str) 2232{ 2233 unsigned int bus, dev, fn; 2234 int ret, id, i; 2235 u16 devid; 2236 2237 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2238 2239 if (ret != 4) { 2240 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); 2241 return 1; 2242 } 2243 2244 if (early_hpet_map_size == EARLY_MAP_SIZE) { 2245 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", 2246 str); 2247 return 1; 2248 } 2249 2250 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 2251 2252 cmdline_maps = true; 2253 i = early_hpet_map_size++; 2254 early_hpet_map[i].id = id; 2255 early_hpet_map[i].devid = devid; 2256 early_hpet_map[i].cmd_line = true; 2257 2258 return 1; 2259} 2260 2261__setup("amd_iommu_dump", parse_amd_iommu_dump); 2262__setup("amd_iommu=", parse_amd_iommu_options); 2263__setup("ivrs_ioapic", parse_ivrs_ioapic); 2264__setup("ivrs_hpet", parse_ivrs_hpet); 2265 2266IOMMU_INIT_FINISH(amd_iommu_detect, 2267 gart_iommu_hole_init, 2268 NULL, 2269 NULL); 2270 2271bool amd_iommu_v2_supported(void) 2272{ 2273 return amd_iommu_v2_present; 2274} 2275EXPORT_SYMBOL(amd_iommu_v2_supported); 2276 2277/**************************************************************************** 2278 * 2279 * IOMMU EFR Performance Counter support functionality. This code allows 2280 * access to the IOMMU PC functionality. 2281 * 2282 ****************************************************************************/ 2283 2284u8 amd_iommu_pc_get_max_banks(u16 devid) 2285{ 2286 struct amd_iommu *iommu; 2287 u8 ret = 0; 2288 2289 /* locate the iommu governing the devid */ 2290 iommu = amd_iommu_rlookup_table[devid]; 2291 if (iommu) 2292 ret = iommu->max_banks; 2293 2294 return ret; 2295} 2296EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 2297 2298bool amd_iommu_pc_supported(void) 2299{ 2300 return amd_iommu_pc_present; 2301} 2302EXPORT_SYMBOL(amd_iommu_pc_supported); 2303 2304u8 amd_iommu_pc_get_max_counters(u16 devid) 2305{ 2306 struct amd_iommu *iommu; 2307 u8 ret = 0; 2308 2309 /* locate the iommu governing the devid */ 2310 iommu = amd_iommu_rlookup_table[devid]; 2311 if (iommu) 2312 ret = iommu->max_counters; 2313 2314 return ret; 2315} 2316EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 2317 2318int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, 2319 u64 *value, bool is_write) 2320{ 2321 struct amd_iommu *iommu; 2322 u32 offset; 2323 u32 max_offset_lim; 2324 2325 /* Make sure the IOMMU PC resource is available */ 2326 if (!amd_iommu_pc_present) 2327 return -ENODEV; 2328 2329 /* Locate the iommu associated with the device ID */ 2330 iommu = amd_iommu_rlookup_table[devid]; 2331 2332 /* Check for valid iommu and pc register indexing */ 2333 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) 2334 return -ENODEV; 2335 2336 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); 2337 2338 /* Limit the offset to the hw defined mmio region aperture */ 2339 max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) | 2340 (iommu->max_counters << 8) | 0x28); 2341 if ((offset < MMIO_CNTR_REG_OFFSET) || 2342 (offset > max_offset_lim)) 2343 return -EINVAL; 2344 2345 if (is_write) { 2346 writel((u32)*value, iommu->mmio_base + offset); 2347 writel((*value >> 32), iommu->mmio_base + offset + 4); 2348 } else { 2349 *value = readl(iommu->mmio_base + offset + 4); 2350 *value <<= 32; 2351 *value = readl(iommu->mmio_base + offset); 2352 } 2353 2354 return 0; 2355} 2356EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);