Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.20 3078 lines 77 kB view raw
1/* 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <jroedel@suse.de> 4 * Leo Duran <leo.duran@amd.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20#include <linux/pci.h> 21#include <linux/acpi.h> 22#include <linux/list.h> 23#include <linux/bitmap.h> 24#include <linux/slab.h> 25#include <linux/syscore_ops.h> 26#include <linux/interrupt.h> 27#include <linux/msi.h> 28#include <linux/amd-iommu.h> 29#include <linux/export.h> 30#include <linux/iommu.h> 31#include <linux/kmemleak.h> 32#include <linux/mem_encrypt.h> 33#include <asm/pci-direct.h> 34#include <asm/iommu.h> 35#include <asm/gart.h> 36#include <asm/x86_init.h> 37#include <asm/iommu_table.h> 38#include <asm/io_apic.h> 39#include <asm/irq_remapping.h> 40 41#include <linux/crash_dump.h> 42#include "amd_iommu_proto.h" 43#include "amd_iommu_types.h" 44#include "irq_remapping.h" 45 46/* 47 * definitions for the ACPI scanning code 48 */ 49#define IVRS_HEADER_LENGTH 48 50 51#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40 52#define ACPI_IVMD_TYPE_ALL 0x20 53#define ACPI_IVMD_TYPE 0x21 54#define ACPI_IVMD_TYPE_RANGE 0x22 55 56#define IVHD_DEV_ALL 0x01 57#define IVHD_DEV_SELECT 0x02 58#define IVHD_DEV_SELECT_RANGE_START 0x03 59#define IVHD_DEV_RANGE_END 0x04 60#define IVHD_DEV_ALIAS 0x42 61#define IVHD_DEV_ALIAS_RANGE 0x43 62#define IVHD_DEV_EXT_SELECT 0x46 63#define IVHD_DEV_EXT_SELECT_RANGE 0x47 64#define IVHD_DEV_SPECIAL 0x48 65#define IVHD_DEV_ACPI_HID 0xf0 66 67#define UID_NOT_PRESENT 0 68#define UID_IS_INTEGER 1 69#define UID_IS_CHARACTER 2 70 71#define IVHD_SPECIAL_IOAPIC 1 72#define IVHD_SPECIAL_HPET 2 73 74#define IVHD_FLAG_HT_TUN_EN_MASK 0x01 75#define IVHD_FLAG_PASSPW_EN_MASK 0x02 76#define IVHD_FLAG_RESPASSPW_EN_MASK 0x04 77#define IVHD_FLAG_ISOC_EN_MASK 0x08 78 79#define IVMD_FLAG_EXCL_RANGE 0x08 80#define IVMD_FLAG_UNITY_MAP 0x01 81 82#define ACPI_DEVFLAG_INITPASS 0x01 83#define ACPI_DEVFLAG_EXTINT 0x02 84#define ACPI_DEVFLAG_NMI 0x04 85#define ACPI_DEVFLAG_SYSMGT1 0x10 86#define ACPI_DEVFLAG_SYSMGT2 0x20 87#define ACPI_DEVFLAG_LINT0 0x40 88#define ACPI_DEVFLAG_LINT1 0x80 89#define ACPI_DEVFLAG_ATSDIS 0x10000000 90 91#define LOOP_TIMEOUT 100000 92/* 93 * ACPI table definitions 94 * 95 * These data structures are laid over the table to parse the important values 96 * out of it. 97 */ 98 99extern const struct iommu_ops amd_iommu_ops; 100 101/* 102 * structure describing one IOMMU in the ACPI table. Typically followed by one 103 * or more ivhd_entrys. 104 */ 105struct ivhd_header { 106 u8 type; 107 u8 flags; 108 u16 length; 109 u16 devid; 110 u16 cap_ptr; 111 u64 mmio_phys; 112 u16 pci_seg; 113 u16 info; 114 u32 efr_attr; 115 116 /* Following only valid on IVHD type 11h and 40h */ 117 u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */ 118 u64 res; 119} __attribute__((packed)); 120 121/* 122 * A device entry describing which devices a specific IOMMU translates and 123 * which requestor ids they use. 124 */ 125struct ivhd_entry { 126 u8 type; 127 u16 devid; 128 u8 flags; 129 u32 ext; 130 u32 hidh; 131 u64 cid; 132 u8 uidf; 133 u8 uidl; 134 u8 uid; 135} __attribute__((packed)); 136 137/* 138 * An AMD IOMMU memory definition structure. It defines things like exclusion 139 * ranges for devices and regions that should be unity mapped. 140 */ 141struct ivmd_header { 142 u8 type; 143 u8 flags; 144 u16 length; 145 u16 devid; 146 u16 aux; 147 u64 resv; 148 u64 range_start; 149 u64 range_length; 150} __attribute__((packed)); 151 152bool amd_iommu_dump; 153bool amd_iommu_irq_remap __read_mostly; 154 155int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 156static int amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; 157 158static bool amd_iommu_detected; 159static bool __initdata amd_iommu_disabled; 160static int amd_iommu_target_ivhd_type; 161 162u16 amd_iommu_last_bdf; /* largest PCI device id we have 163 to handle */ 164LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings 165 we find in ACPI */ 166bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ 167 168LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the 169 system */ 170 171/* Array to assign indices to IOMMUs*/ 172struct amd_iommu *amd_iommus[MAX_IOMMUS]; 173 174/* Number of IOMMUs present in the system */ 175static int amd_iommus_present; 176 177/* IOMMUs have a non-present cache? */ 178bool amd_iommu_np_cache __read_mostly; 179bool amd_iommu_iotlb_sup __read_mostly = true; 180 181u32 amd_iommu_max_pasid __read_mostly = ~0; 182 183bool amd_iommu_v2_present __read_mostly; 184static bool amd_iommu_pc_present __read_mostly; 185 186bool amd_iommu_force_isolation __read_mostly; 187 188/* 189 * List of protection domains - used during resume 190 */ 191LIST_HEAD(amd_iommu_pd_list); 192spinlock_t amd_iommu_pd_lock; 193 194/* 195 * Pointer to the device table which is shared by all AMD IOMMUs 196 * it is indexed by the PCI device id or the HT unit id and contains 197 * information about the domain the device belongs to as well as the 198 * page table root pointer. 199 */ 200struct dev_table_entry *amd_iommu_dev_table; 201/* 202 * Pointer to a device table which the content of old device table 203 * will be copied to. It's only be used in kdump kernel. 204 */ 205static struct dev_table_entry *old_dev_tbl_cpy; 206 207/* 208 * The alias table is a driver specific data structure which contains the 209 * mappings of the PCI device ids to the actual requestor ids on the IOMMU. 210 * More than one device can share the same requestor id. 211 */ 212u16 *amd_iommu_alias_table; 213 214/* 215 * The rlookup table is used to find the IOMMU which is responsible 216 * for a specific device. It is also indexed by the PCI device id. 217 */ 218struct amd_iommu **amd_iommu_rlookup_table; 219EXPORT_SYMBOL(amd_iommu_rlookup_table); 220 221/* 222 * This table is used to find the irq remapping table for a given device id 223 * quickly. 224 */ 225struct irq_remap_table **irq_lookup_table; 226 227/* 228 * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap 229 * to know which ones are already in use. 230 */ 231unsigned long *amd_iommu_pd_alloc_bitmap; 232 233static u32 dev_table_size; /* size of the device table */ 234static u32 alias_table_size; /* size of the alias table */ 235static u32 rlookup_table_size; /* size if the rlookup table */ 236 237enum iommu_init_state { 238 IOMMU_START_STATE, 239 IOMMU_IVRS_DETECTED, 240 IOMMU_ACPI_FINISHED, 241 IOMMU_ENABLED, 242 IOMMU_PCI_INIT, 243 IOMMU_INTERRUPTS_EN, 244 IOMMU_DMA_OPS, 245 IOMMU_INITIALIZED, 246 IOMMU_NOT_FOUND, 247 IOMMU_INIT_ERROR, 248 IOMMU_CMDLINE_DISABLED, 249}; 250 251/* Early ioapic and hpet maps from kernel command line */ 252#define EARLY_MAP_SIZE 4 253static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE]; 254static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE]; 255static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE]; 256 257static int __initdata early_ioapic_map_size; 258static int __initdata early_hpet_map_size; 259static int __initdata early_acpihid_map_size; 260 261static bool __initdata cmdline_maps; 262 263static enum iommu_init_state init_state = IOMMU_START_STATE; 264 265static int amd_iommu_enable_interrupts(void); 266static int __init iommu_go_to_state(enum iommu_init_state state); 267static void init_device_table_dma(void); 268 269static bool amd_iommu_pre_enabled = true; 270 271bool translation_pre_enabled(struct amd_iommu *iommu) 272{ 273 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); 274} 275EXPORT_SYMBOL(translation_pre_enabled); 276 277static void clear_translation_pre_enabled(struct amd_iommu *iommu) 278{ 279 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 280} 281 282static void init_translation_status(struct amd_iommu *iommu) 283{ 284 u64 ctrl; 285 286 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 287 if (ctrl & (1<<CONTROL_IOMMU_EN)) 288 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED; 289} 290 291static inline void update_last_devid(u16 devid) 292{ 293 if (devid > amd_iommu_last_bdf) 294 amd_iommu_last_bdf = devid; 295} 296 297static inline unsigned long tbl_size(int entry_size) 298{ 299 unsigned shift = PAGE_SHIFT + 300 get_order(((int)amd_iommu_last_bdf + 1) * entry_size); 301 302 return 1UL << shift; 303} 304 305int amd_iommu_get_num_iommus(void) 306{ 307 return amd_iommus_present; 308} 309 310/* Access to l1 and l2 indexed register spaces */ 311 312static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) 313{ 314 u32 val; 315 316 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 317 pci_read_config_dword(iommu->dev, 0xfc, &val); 318 return val; 319} 320 321static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val) 322{ 323 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31)); 324 pci_write_config_dword(iommu->dev, 0xfc, val); 325 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16)); 326} 327 328static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address) 329{ 330 u32 val; 331 332 pci_write_config_dword(iommu->dev, 0xf0, address); 333 pci_read_config_dword(iommu->dev, 0xf4, &val); 334 return val; 335} 336 337static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val) 338{ 339 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8)); 340 pci_write_config_dword(iommu->dev, 0xf4, val); 341} 342 343/**************************************************************************** 344 * 345 * AMD IOMMU MMIO register space handling functions 346 * 347 * These functions are used to program the IOMMU device registers in 348 * MMIO space required for that driver. 349 * 350 ****************************************************************************/ 351 352/* 353 * This function set the exclusion range in the IOMMU. DMA accesses to the 354 * exclusion range are passed through untranslated 355 */ 356static void iommu_set_exclusion_range(struct amd_iommu *iommu) 357{ 358 u64 start = iommu->exclusion_start & PAGE_MASK; 359 u64 limit = (start + iommu->exclusion_length) & PAGE_MASK; 360 u64 entry; 361 362 if (!iommu->exclusion_start) 363 return; 364 365 entry = start | MMIO_EXCL_ENABLE_MASK; 366 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET, 367 &entry, sizeof(entry)); 368 369 entry = limit; 370 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET, 371 &entry, sizeof(entry)); 372} 373 374/* Programs the physical address of the device table into the IOMMU hardware */ 375static void iommu_set_device_table(struct amd_iommu *iommu) 376{ 377 u64 entry; 378 379 BUG_ON(iommu->mmio_base == NULL); 380 381 entry = iommu_virt_to_phys(amd_iommu_dev_table); 382 entry |= (dev_table_size >> 12) - 1; 383 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET, 384 &entry, sizeof(entry)); 385} 386 387/* Generic functions to enable/disable certain features of the IOMMU. */ 388static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) 389{ 390 u64 ctrl; 391 392 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 393 ctrl |= (1ULL << bit); 394 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 395} 396 397static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) 398{ 399 u64 ctrl; 400 401 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 402 ctrl &= ~(1ULL << bit); 403 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 404} 405 406static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) 407{ 408 u64 ctrl; 409 410 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET); 411 ctrl &= ~CTRL_INV_TO_MASK; 412 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; 413 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); 414} 415 416/* Function to enable the hardware */ 417static void iommu_enable(struct amd_iommu *iommu) 418{ 419 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 420} 421 422static void iommu_disable(struct amd_iommu *iommu) 423{ 424 /* Disable command buffer */ 425 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 426 427 /* Disable event logging and event interrupts */ 428 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN); 429 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 430 431 /* Disable IOMMU GA_LOG */ 432 iommu_feature_disable(iommu, CONTROL_GALOG_EN); 433 iommu_feature_disable(iommu, CONTROL_GAINT_EN); 434 435 /* Disable IOMMU hardware itself */ 436 iommu_feature_disable(iommu, CONTROL_IOMMU_EN); 437} 438 439/* 440 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 441 * the system has one. 442 */ 443static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end) 444{ 445 if (!request_mem_region(address, end, "amd_iommu")) { 446 pr_err("AMD-Vi: Can not reserve memory region %llx-%llx for mmio\n", 447 address, end); 448 pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); 449 return NULL; 450 } 451 452 return (u8 __iomem *)ioremap_nocache(address, end); 453} 454 455static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu) 456{ 457 if (iommu->mmio_base) 458 iounmap(iommu->mmio_base); 459 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end); 460} 461 462static inline u32 get_ivhd_header_size(struct ivhd_header *h) 463{ 464 u32 size = 0; 465 466 switch (h->type) { 467 case 0x10: 468 size = 24; 469 break; 470 case 0x11: 471 case 0x40: 472 size = 40; 473 break; 474 } 475 return size; 476} 477 478/**************************************************************************** 479 * 480 * The functions below belong to the first pass of AMD IOMMU ACPI table 481 * parsing. In this pass we try to find out the highest device id this 482 * code has to handle. Upon this information the size of the shared data 483 * structures is determined later. 484 * 485 ****************************************************************************/ 486 487/* 488 * This function calculates the length of a given IVHD entry 489 */ 490static inline int ivhd_entry_length(u8 *ivhd) 491{ 492 u32 type = ((struct ivhd_entry *)ivhd)->type; 493 494 if (type < 0x80) { 495 return 0x04 << (*ivhd >> 6); 496 } else if (type == IVHD_DEV_ACPI_HID) { 497 /* For ACPI_HID, offset 21 is uid len */ 498 return *((u8 *)ivhd + 21) + 22; 499 } 500 return 0; 501} 502 503/* 504 * After reading the highest device id from the IOMMU PCI capability header 505 * this function looks if there is a higher device id defined in the ACPI table 506 */ 507static int __init find_last_devid_from_ivhd(struct ivhd_header *h) 508{ 509 u8 *p = (void *)h, *end = (void *)h; 510 struct ivhd_entry *dev; 511 512 u32 ivhd_size = get_ivhd_header_size(h); 513 514 if (!ivhd_size) { 515 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type); 516 return -EINVAL; 517 } 518 519 p += ivhd_size; 520 end += h->length; 521 522 while (p < end) { 523 dev = (struct ivhd_entry *)p; 524 switch (dev->type) { 525 case IVHD_DEV_ALL: 526 /* Use maximum BDF value for DEV_ALL */ 527 update_last_devid(0xffff); 528 break; 529 case IVHD_DEV_SELECT: 530 case IVHD_DEV_RANGE_END: 531 case IVHD_DEV_ALIAS: 532 case IVHD_DEV_EXT_SELECT: 533 /* all the above subfield types refer to device ids */ 534 update_last_devid(dev->devid); 535 break; 536 default: 537 break; 538 } 539 p += ivhd_entry_length(p); 540 } 541 542 WARN_ON(p != end); 543 544 return 0; 545} 546 547static int __init check_ivrs_checksum(struct acpi_table_header *table) 548{ 549 int i; 550 u8 checksum = 0, *p = (u8 *)table; 551 552 for (i = 0; i < table->length; ++i) 553 checksum += p[i]; 554 if (checksum != 0) { 555 /* ACPI table corrupt */ 556 pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n"); 557 return -ENODEV; 558 } 559 560 return 0; 561} 562 563/* 564 * Iterate over all IVHD entries in the ACPI table and find the highest device 565 * id which we need to handle. This is the first of three functions which parse 566 * the ACPI table. So we check the checksum here. 567 */ 568static int __init find_last_devid_acpi(struct acpi_table_header *table) 569{ 570 u8 *p = (u8 *)table, *end = (u8 *)table; 571 struct ivhd_header *h; 572 573 p += IVRS_HEADER_LENGTH; 574 575 end += table->length; 576 while (p < end) { 577 h = (struct ivhd_header *)p; 578 if (h->type == amd_iommu_target_ivhd_type) { 579 int ret = find_last_devid_from_ivhd(h); 580 581 if (ret) 582 return ret; 583 } 584 p += h->length; 585 } 586 WARN_ON(p != end); 587 588 return 0; 589} 590 591/**************************************************************************** 592 * 593 * The following functions belong to the code path which parses the ACPI table 594 * the second time. In this ACPI parsing iteration we allocate IOMMU specific 595 * data structures, initialize the device/alias/rlookup table and also 596 * basically initialize the hardware. 597 * 598 ****************************************************************************/ 599 600/* 601 * Allocates the command buffer. This buffer is per AMD IOMMU. We can 602 * write commands to that buffer later and the IOMMU will execute them 603 * asynchronously 604 */ 605static int __init alloc_command_buffer(struct amd_iommu *iommu) 606{ 607 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 608 get_order(CMD_BUFFER_SIZE)); 609 610 return iommu->cmd_buf ? 0 : -ENOMEM; 611} 612 613/* 614 * This function resets the command buffer if the IOMMU stopped fetching 615 * commands from it. 616 */ 617void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) 618{ 619 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 620 621 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 622 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 623 iommu->cmd_buf_head = 0; 624 iommu->cmd_buf_tail = 0; 625 626 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 627} 628 629/* 630 * This function writes the command buffer address to the hardware and 631 * enables it. 632 */ 633static void iommu_enable_command_buffer(struct amd_iommu *iommu) 634{ 635 u64 entry; 636 637 BUG_ON(iommu->cmd_buf == NULL); 638 639 entry = iommu_virt_to_phys(iommu->cmd_buf); 640 entry |= MMIO_CMD_SIZE_512; 641 642 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 643 &entry, sizeof(entry)); 644 645 amd_iommu_reset_cmd_buffer(iommu); 646} 647 648/* 649 * This function disables the command buffer 650 */ 651static void iommu_disable_command_buffer(struct amd_iommu *iommu) 652{ 653 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); 654} 655 656static void __init free_command_buffer(struct amd_iommu *iommu) 657{ 658 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE)); 659} 660 661/* allocates the memory where the IOMMU will log its events to */ 662static int __init alloc_event_buffer(struct amd_iommu *iommu) 663{ 664 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 665 get_order(EVT_BUFFER_SIZE)); 666 667 return iommu->evt_buf ? 0 : -ENOMEM; 668} 669 670static void iommu_enable_event_buffer(struct amd_iommu *iommu) 671{ 672 u64 entry; 673 674 BUG_ON(iommu->evt_buf == NULL); 675 676 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 677 678 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 679 &entry, sizeof(entry)); 680 681 /* set head and tail to zero manually */ 682 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET); 683 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); 684 685 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN); 686} 687 688/* 689 * This function disables the event log buffer 690 */ 691static void iommu_disable_event_buffer(struct amd_iommu *iommu) 692{ 693 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN); 694} 695 696static void __init free_event_buffer(struct amd_iommu *iommu) 697{ 698 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); 699} 700 701/* allocates the memory where the IOMMU will log its events to */ 702static int __init alloc_ppr_log(struct amd_iommu *iommu) 703{ 704 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 705 get_order(PPR_LOG_SIZE)); 706 707 return iommu->ppr_log ? 0 : -ENOMEM; 708} 709 710static void iommu_enable_ppr_log(struct amd_iommu *iommu) 711{ 712 u64 entry; 713 714 if (iommu->ppr_log == NULL) 715 return; 716 717 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; 718 719 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, 720 &entry, sizeof(entry)); 721 722 /* set head and tail to zero manually */ 723 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 724 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 725 726 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); 727 iommu_feature_enable(iommu, CONTROL_PPR_EN); 728} 729 730static void __init free_ppr_log(struct amd_iommu *iommu) 731{ 732 if (iommu->ppr_log == NULL) 733 return; 734 735 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); 736} 737 738static void free_ga_log(struct amd_iommu *iommu) 739{ 740#ifdef CONFIG_IRQ_REMAP 741 if (iommu->ga_log) 742 free_pages((unsigned long)iommu->ga_log, 743 get_order(GA_LOG_SIZE)); 744 if (iommu->ga_log_tail) 745 free_pages((unsigned long)iommu->ga_log_tail, 746 get_order(8)); 747#endif 748} 749 750static int iommu_ga_log_enable(struct amd_iommu *iommu) 751{ 752#ifdef CONFIG_IRQ_REMAP 753 u32 status, i; 754 755 if (!iommu->ga_log) 756 return -EINVAL; 757 758 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 759 760 /* Check if already running */ 761 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 762 return 0; 763 764 iommu_feature_enable(iommu, CONTROL_GAINT_EN); 765 iommu_feature_enable(iommu, CONTROL_GALOG_EN); 766 767 for (i = 0; i < LOOP_TIMEOUT; ++i) { 768 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); 769 if (status & (MMIO_STATUS_GALOG_RUN_MASK)) 770 break; 771 } 772 773 if (i >= LOOP_TIMEOUT) 774 return -EINVAL; 775#endif /* CONFIG_IRQ_REMAP */ 776 return 0; 777} 778 779#ifdef CONFIG_IRQ_REMAP 780static int iommu_init_ga_log(struct amd_iommu *iommu) 781{ 782 u64 entry; 783 784 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 785 return 0; 786 787 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 788 get_order(GA_LOG_SIZE)); 789 if (!iommu->ga_log) 790 goto err_out; 791 792 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 793 get_order(8)); 794 if (!iommu->ga_log_tail) 795 goto err_out; 796 797 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; 798 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, 799 &entry, sizeof(entry)); 800 entry = (iommu_virt_to_phys(iommu->ga_log_tail) & 801 (BIT_ULL(52)-1)) & ~7ULL; 802 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, 803 &entry, sizeof(entry)); 804 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); 805 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET); 806 807 return 0; 808err_out: 809 free_ga_log(iommu); 810 return -EINVAL; 811} 812#endif /* CONFIG_IRQ_REMAP */ 813 814static int iommu_init_ga(struct amd_iommu *iommu) 815{ 816 int ret = 0; 817 818#ifdef CONFIG_IRQ_REMAP 819 /* Note: We have already checked GASup from IVRS table. 820 * Now, we need to make sure that GAMSup is set. 821 */ 822 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) && 823 !iommu_feature(iommu, FEATURE_GAM_VAPIC)) 824 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA; 825 826 ret = iommu_init_ga_log(iommu); 827#endif /* CONFIG_IRQ_REMAP */ 828 829 return ret; 830} 831 832static void iommu_enable_xt(struct amd_iommu *iommu) 833{ 834#ifdef CONFIG_IRQ_REMAP 835 /* 836 * XT mode (32-bit APIC destination ID) requires 837 * GA mode (128-bit IRTE support) as a prerequisite. 838 */ 839 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) && 840 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 841 iommu_feature_enable(iommu, CONTROL_XT_EN); 842#endif /* CONFIG_IRQ_REMAP */ 843} 844 845static void iommu_enable_gt(struct amd_iommu *iommu) 846{ 847 if (!iommu_feature(iommu, FEATURE_GT)) 848 return; 849 850 iommu_feature_enable(iommu, CONTROL_GT_EN); 851} 852 853/* sets a specific bit in the device table entry. */ 854static void set_dev_entry_bit(u16 devid, u8 bit) 855{ 856 int i = (bit >> 6) & 0x03; 857 int _bit = bit & 0x3f; 858 859 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); 860} 861 862static int get_dev_entry_bit(u16 devid, u8 bit) 863{ 864 int i = (bit >> 6) & 0x03; 865 int _bit = bit & 0x3f; 866 867 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; 868} 869 870 871static bool copy_device_table(void) 872{ 873 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0; 874 struct dev_table_entry *old_devtb = NULL; 875 u32 lo, hi, devid, old_devtb_size; 876 phys_addr_t old_devtb_phys; 877 struct amd_iommu *iommu; 878 u16 dom_id, dte_v, irq_v; 879 gfp_t gfp_flag; 880 u64 tmp; 881 882 if (!amd_iommu_pre_enabled) 883 return false; 884 885 pr_warn("Translation is already enabled - trying to copy translation structures\n"); 886 for_each_iommu(iommu) { 887 /* All IOMMUs should use the same device table with the same size */ 888 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET); 889 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 890 entry = (((u64) hi) << 32) + lo; 891 if (last_entry && last_entry != entry) { 892 pr_err("IOMMU:%d should use the same dev table as others!\n", 893 iommu->index); 894 return false; 895 } 896 last_entry = entry; 897 898 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 899 if (old_devtb_size != dev_table_size) { 900 pr_err("The device table size of IOMMU:%d is not expected!\n", 901 iommu->index); 902 return false; 903 } 904 } 905 906 /* 907 * When SME is enabled in the first kernel, the entry includes the 908 * memory encryption mask(sme_me_mask), we must remove the memory 909 * encryption mask to obtain the true physical address in kdump kernel. 910 */ 911 old_devtb_phys = __sme_clr(entry) & PAGE_MASK; 912 913 if (old_devtb_phys >= 0x100000000ULL) { 914 pr_err("The address of old device table is above 4G, not trustworthy!\n"); 915 return false; 916 } 917 old_devtb = (sme_active() && is_kdump_kernel()) 918 ? (__force void *)ioremap_encrypted(old_devtb_phys, 919 dev_table_size) 920 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 921 922 if (!old_devtb) 923 return false; 924 925 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32; 926 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 927 get_order(dev_table_size)); 928 if (old_dev_tbl_cpy == NULL) { 929 pr_err("Failed to allocate memory for copying old device table!\n"); 930 return false; 931 } 932 933 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 934 old_dev_tbl_cpy[devid] = old_devtb[devid]; 935 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK; 936 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V; 937 938 if (dte_v && dom_id) { 939 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0]; 940 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1]; 941 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap); 942 /* If gcr3 table existed, mask it out */ 943 if (old_devtb[devid].data[0] & DTE_FLAG_GV) { 944 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B; 945 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C; 946 old_dev_tbl_cpy[devid].data[1] &= ~tmp; 947 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A; 948 tmp |= DTE_FLAG_GV; 949 old_dev_tbl_cpy[devid].data[0] &= ~tmp; 950 } 951 } 952 953 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; 954 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; 955 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; 956 if (irq_v && (int_ctl || int_tab_len)) { 957 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || 958 (int_tab_len != DTE_IRQ_TABLE_LEN)) { 959 pr_err("Wrong old irq remapping flag: %#x\n", devid); 960 return false; 961 } 962 963 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2]; 964 } 965 } 966 memunmap(old_devtb); 967 968 return true; 969} 970 971void amd_iommu_apply_erratum_63(u16 devid) 972{ 973 int sysmgt; 974 975 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | 976 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); 977 978 if (sysmgt == 0x01) 979 set_dev_entry_bit(devid, DEV_ENTRY_IW); 980} 981 982/* Writes the specific IOMMU for a device into the rlookup table */ 983static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) 984{ 985 amd_iommu_rlookup_table[devid] = iommu; 986} 987 988/* 989 * This function takes the device specific flags read from the ACPI 990 * table and sets up the device table entry with that information 991 */ 992static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, 993 u16 devid, u32 flags, u32 ext_flags) 994{ 995 if (flags & ACPI_DEVFLAG_INITPASS) 996 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS); 997 if (flags & ACPI_DEVFLAG_EXTINT) 998 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS); 999 if (flags & ACPI_DEVFLAG_NMI) 1000 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS); 1001 if (flags & ACPI_DEVFLAG_SYSMGT1) 1002 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1); 1003 if (flags & ACPI_DEVFLAG_SYSMGT2) 1004 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2); 1005 if (flags & ACPI_DEVFLAG_LINT0) 1006 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS); 1007 if (flags & ACPI_DEVFLAG_LINT1) 1008 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); 1009 1010 amd_iommu_apply_erratum_63(devid); 1011 1012 set_iommu_for_device(iommu, devid); 1013} 1014 1015static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line) 1016{ 1017 struct devid_map *entry; 1018 struct list_head *list; 1019 1020 if (type == IVHD_SPECIAL_IOAPIC) 1021 list = &ioapic_map; 1022 else if (type == IVHD_SPECIAL_HPET) 1023 list = &hpet_map; 1024 else 1025 return -EINVAL; 1026 1027 list_for_each_entry(entry, list, list) { 1028 if (!(entry->id == id && entry->cmd_line)) 1029 continue; 1030 1031 pr_info("AMD-Vi: Command-line override present for %s id %d - ignoring\n", 1032 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id); 1033 1034 *devid = entry->devid; 1035 1036 return 0; 1037 } 1038 1039 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1040 if (!entry) 1041 return -ENOMEM; 1042 1043 entry->id = id; 1044 entry->devid = *devid; 1045 entry->cmd_line = cmd_line; 1046 1047 list_add_tail(&entry->list, list); 1048 1049 return 0; 1050} 1051 1052static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid, 1053 bool cmd_line) 1054{ 1055 struct acpihid_map_entry *entry; 1056 struct list_head *list = &acpihid_map; 1057 1058 list_for_each_entry(entry, list, list) { 1059 if (strcmp(entry->hid, hid) || 1060 (*uid && *entry->uid && strcmp(entry->uid, uid)) || 1061 !entry->cmd_line) 1062 continue; 1063 1064 pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n", 1065 hid, uid); 1066 *devid = entry->devid; 1067 return 0; 1068 } 1069 1070 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 1071 if (!entry) 1072 return -ENOMEM; 1073 1074 memcpy(entry->uid, uid, strlen(uid)); 1075 memcpy(entry->hid, hid, strlen(hid)); 1076 entry->devid = *devid; 1077 entry->cmd_line = cmd_line; 1078 entry->root_devid = (entry->devid & (~0x7)); 1079 1080 pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n", 1081 entry->cmd_line ? "cmd" : "ivrs", 1082 entry->hid, entry->uid, entry->root_devid); 1083 1084 list_add_tail(&entry->list, list); 1085 return 0; 1086} 1087 1088static int __init add_early_maps(void) 1089{ 1090 int i, ret; 1091 1092 for (i = 0; i < early_ioapic_map_size; ++i) { 1093 ret = add_special_device(IVHD_SPECIAL_IOAPIC, 1094 early_ioapic_map[i].id, 1095 &early_ioapic_map[i].devid, 1096 early_ioapic_map[i].cmd_line); 1097 if (ret) 1098 return ret; 1099 } 1100 1101 for (i = 0; i < early_hpet_map_size; ++i) { 1102 ret = add_special_device(IVHD_SPECIAL_HPET, 1103 early_hpet_map[i].id, 1104 &early_hpet_map[i].devid, 1105 early_hpet_map[i].cmd_line); 1106 if (ret) 1107 return ret; 1108 } 1109 1110 for (i = 0; i < early_acpihid_map_size; ++i) { 1111 ret = add_acpi_hid_device(early_acpihid_map[i].hid, 1112 early_acpihid_map[i].uid, 1113 &early_acpihid_map[i].devid, 1114 early_acpihid_map[i].cmd_line); 1115 if (ret) 1116 return ret; 1117 } 1118 1119 return 0; 1120} 1121 1122/* 1123 * Reads the device exclusion range from ACPI and initializes the IOMMU with 1124 * it 1125 */ 1126static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) 1127{ 1128 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; 1129 1130 if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) 1131 return; 1132 1133 if (iommu) { 1134 /* 1135 * We only can configure exclusion ranges per IOMMU, not 1136 * per device. But we can enable the exclusion range per 1137 * device. This is done here 1138 */ 1139 set_dev_entry_bit(devid, DEV_ENTRY_EX); 1140 iommu->exclusion_start = m->range_start; 1141 iommu->exclusion_length = m->range_length; 1142 } 1143} 1144 1145/* 1146 * Takes a pointer to an AMD IOMMU entry in the ACPI table and 1147 * initializes the hardware and our data structures with it. 1148 */ 1149static int __init init_iommu_from_acpi(struct amd_iommu *iommu, 1150 struct ivhd_header *h) 1151{ 1152 u8 *p = (u8 *)h; 1153 u8 *end = p, flags = 0; 1154 u16 devid = 0, devid_start = 0, devid_to = 0; 1155 u32 dev_i, ext_flags = 0; 1156 bool alias = false; 1157 struct ivhd_entry *e; 1158 u32 ivhd_size; 1159 int ret; 1160 1161 1162 ret = add_early_maps(); 1163 if (ret) 1164 return ret; 1165 1166 /* 1167 * First save the recommended feature enable bits from ACPI 1168 */ 1169 iommu->acpi_flags = h->flags; 1170 1171 /* 1172 * Done. Now parse the device entries 1173 */ 1174 ivhd_size = get_ivhd_header_size(h); 1175 if (!ivhd_size) { 1176 pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type); 1177 return -EINVAL; 1178 } 1179 1180 p += ivhd_size; 1181 1182 end += h->length; 1183 1184 1185 while (p < end) { 1186 e = (struct ivhd_entry *)p; 1187 switch (e->type) { 1188 case IVHD_DEV_ALL: 1189 1190 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags); 1191 1192 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i) 1193 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0); 1194 break; 1195 case IVHD_DEV_SELECT: 1196 1197 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x " 1198 "flags: %02x\n", 1199 PCI_BUS_NUM(e->devid), 1200 PCI_SLOT(e->devid), 1201 PCI_FUNC(e->devid), 1202 e->flags); 1203 1204 devid = e->devid; 1205 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1206 break; 1207 case IVHD_DEV_SELECT_RANGE_START: 1208 1209 DUMP_printk(" DEV_SELECT_RANGE_START\t " 1210 "devid: %02x:%02x.%x flags: %02x\n", 1211 PCI_BUS_NUM(e->devid), 1212 PCI_SLOT(e->devid), 1213 PCI_FUNC(e->devid), 1214 e->flags); 1215 1216 devid_start = e->devid; 1217 flags = e->flags; 1218 ext_flags = 0; 1219 alias = false; 1220 break; 1221 case IVHD_DEV_ALIAS: 1222 1223 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x " 1224 "flags: %02x devid_to: %02x:%02x.%x\n", 1225 PCI_BUS_NUM(e->devid), 1226 PCI_SLOT(e->devid), 1227 PCI_FUNC(e->devid), 1228 e->flags, 1229 PCI_BUS_NUM(e->ext >> 8), 1230 PCI_SLOT(e->ext >> 8), 1231 PCI_FUNC(e->ext >> 8)); 1232 1233 devid = e->devid; 1234 devid_to = e->ext >> 8; 1235 set_dev_entry_from_acpi(iommu, devid , e->flags, 0); 1236 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0); 1237 amd_iommu_alias_table[devid] = devid_to; 1238 break; 1239 case IVHD_DEV_ALIAS_RANGE: 1240 1241 DUMP_printk(" DEV_ALIAS_RANGE\t\t " 1242 "devid: %02x:%02x.%x flags: %02x " 1243 "devid_to: %02x:%02x.%x\n", 1244 PCI_BUS_NUM(e->devid), 1245 PCI_SLOT(e->devid), 1246 PCI_FUNC(e->devid), 1247 e->flags, 1248 PCI_BUS_NUM(e->ext >> 8), 1249 PCI_SLOT(e->ext >> 8), 1250 PCI_FUNC(e->ext >> 8)); 1251 1252 devid_start = e->devid; 1253 flags = e->flags; 1254 devid_to = e->ext >> 8; 1255 ext_flags = 0; 1256 alias = true; 1257 break; 1258 case IVHD_DEV_EXT_SELECT: 1259 1260 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x " 1261 "flags: %02x ext: %08x\n", 1262 PCI_BUS_NUM(e->devid), 1263 PCI_SLOT(e->devid), 1264 PCI_FUNC(e->devid), 1265 e->flags, e->ext); 1266 1267 devid = e->devid; 1268 set_dev_entry_from_acpi(iommu, devid, e->flags, 1269 e->ext); 1270 break; 1271 case IVHD_DEV_EXT_SELECT_RANGE: 1272 1273 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: " 1274 "%02x:%02x.%x flags: %02x ext: %08x\n", 1275 PCI_BUS_NUM(e->devid), 1276 PCI_SLOT(e->devid), 1277 PCI_FUNC(e->devid), 1278 e->flags, e->ext); 1279 1280 devid_start = e->devid; 1281 flags = e->flags; 1282 ext_flags = e->ext; 1283 alias = false; 1284 break; 1285 case IVHD_DEV_RANGE_END: 1286 1287 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n", 1288 PCI_BUS_NUM(e->devid), 1289 PCI_SLOT(e->devid), 1290 PCI_FUNC(e->devid)); 1291 1292 devid = e->devid; 1293 for (dev_i = devid_start; dev_i <= devid; ++dev_i) { 1294 if (alias) { 1295 amd_iommu_alias_table[dev_i] = devid_to; 1296 set_dev_entry_from_acpi(iommu, 1297 devid_to, flags, ext_flags); 1298 } 1299 set_dev_entry_from_acpi(iommu, dev_i, 1300 flags, ext_flags); 1301 } 1302 break; 1303 case IVHD_DEV_SPECIAL: { 1304 u8 handle, type; 1305 const char *var; 1306 u16 devid; 1307 int ret; 1308 1309 handle = e->ext & 0xff; 1310 devid = (e->ext >> 8) & 0xffff; 1311 type = (e->ext >> 24) & 0xff; 1312 1313 if (type == IVHD_SPECIAL_IOAPIC) 1314 var = "IOAPIC"; 1315 else if (type == IVHD_SPECIAL_HPET) 1316 var = "HPET"; 1317 else 1318 var = "UNKNOWN"; 1319 1320 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", 1321 var, (int)handle, 1322 PCI_BUS_NUM(devid), 1323 PCI_SLOT(devid), 1324 PCI_FUNC(devid)); 1325 1326 ret = add_special_device(type, handle, &devid, false); 1327 if (ret) 1328 return ret; 1329 1330 /* 1331 * add_special_device might update the devid in case a 1332 * command-line override is present. So call 1333 * set_dev_entry_from_acpi after add_special_device. 1334 */ 1335 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1336 1337 break; 1338 } 1339 case IVHD_DEV_ACPI_HID: { 1340 u16 devid; 1341 u8 hid[ACPIHID_HID_LEN] = {0}; 1342 u8 uid[ACPIHID_UID_LEN] = {0}; 1343 int ret; 1344 1345 if (h->type != 0x40) { 1346 pr_err(FW_BUG "Invalid IVHD device type %#x\n", 1347 e->type); 1348 break; 1349 } 1350 1351 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1); 1352 hid[ACPIHID_HID_LEN - 1] = '\0'; 1353 1354 if (!(*hid)) { 1355 pr_err(FW_BUG "Invalid HID.\n"); 1356 break; 1357 } 1358 1359 switch (e->uidf) { 1360 case UID_NOT_PRESENT: 1361 1362 if (e->uidl != 0) 1363 pr_warn(FW_BUG "Invalid UID length.\n"); 1364 1365 break; 1366 case UID_IS_INTEGER: 1367 1368 sprintf(uid, "%d", e->uid); 1369 1370 break; 1371 case UID_IS_CHARACTER: 1372 1373 memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1); 1374 uid[ACPIHID_UID_LEN - 1] = '\0'; 1375 1376 break; 1377 default: 1378 break; 1379 } 1380 1381 devid = e->devid; 1382 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n", 1383 hid, uid, 1384 PCI_BUS_NUM(devid), 1385 PCI_SLOT(devid), 1386 PCI_FUNC(devid)); 1387 1388 flags = e->flags; 1389 1390 ret = add_acpi_hid_device(hid, uid, &devid, false); 1391 if (ret) 1392 return ret; 1393 1394 /* 1395 * add_special_device might update the devid in case a 1396 * command-line override is present. So call 1397 * set_dev_entry_from_acpi after add_special_device. 1398 */ 1399 set_dev_entry_from_acpi(iommu, devid, e->flags, 0); 1400 1401 break; 1402 } 1403 default: 1404 break; 1405 } 1406 1407 p += ivhd_entry_length(p); 1408 } 1409 1410 return 0; 1411} 1412 1413static void __init free_iommu_one(struct amd_iommu *iommu) 1414{ 1415 free_command_buffer(iommu); 1416 free_event_buffer(iommu); 1417 free_ppr_log(iommu); 1418 free_ga_log(iommu); 1419 iommu_unmap_mmio_space(iommu); 1420} 1421 1422static void __init free_iommu_all(void) 1423{ 1424 struct amd_iommu *iommu, *next; 1425 1426 for_each_iommu_safe(iommu, next) { 1427 list_del(&iommu->list); 1428 free_iommu_one(iommu); 1429 kfree(iommu); 1430 } 1431} 1432 1433/* 1434 * Family15h Model 10h-1fh erratum 746 (IOMMU Logging May Stall Translations) 1435 * Workaround: 1436 * BIOS should disable L2B micellaneous clock gating by setting 1437 * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b 1438 */ 1439static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) 1440{ 1441 u32 value; 1442 1443 if ((boot_cpu_data.x86 != 0x15) || 1444 (boot_cpu_data.x86_model < 0x10) || 1445 (boot_cpu_data.x86_model > 0x1f)) 1446 return; 1447 1448 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1449 pci_read_config_dword(iommu->dev, 0xf4, &value); 1450 1451 if (value & BIT(2)) 1452 return; 1453 1454 /* Select NB indirect register 0x90 and enable writing */ 1455 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8)); 1456 1457 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4); 1458 pr_info("AMD-Vi: Applying erratum 746 workaround for IOMMU at %s\n", 1459 dev_name(&iommu->dev->dev)); 1460 1461 /* Clear the enable writing bit */ 1462 pci_write_config_dword(iommu->dev, 0xf0, 0x90); 1463} 1464 1465/* 1466 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission) 1467 * Workaround: 1468 * BIOS should enable ATS write permission check by setting 1469 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b 1470 */ 1471static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu) 1472{ 1473 u32 value; 1474 1475 if ((boot_cpu_data.x86 != 0x15) || 1476 (boot_cpu_data.x86_model < 0x30) || 1477 (boot_cpu_data.x86_model > 0x3f)) 1478 return; 1479 1480 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */ 1481 value = iommu_read_l2(iommu, 0x47); 1482 1483 if (value & BIT(0)) 1484 return; 1485 1486 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */ 1487 iommu_write_l2(iommu, 0x47, value | BIT(0)); 1488 1489 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n", 1490 dev_name(&iommu->dev->dev)); 1491} 1492 1493/* 1494 * This function clues the initialization function for one IOMMU 1495 * together and also allocates the command buffer and programs the 1496 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1497 */ 1498static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) 1499{ 1500 int ret; 1501 1502 raw_spin_lock_init(&iommu->lock); 1503 1504 /* Add IOMMU to internal data structures */ 1505 list_add_tail(&iommu->list, &amd_iommu_list); 1506 iommu->index = amd_iommus_present++; 1507 1508 if (unlikely(iommu->index >= MAX_IOMMUS)) { 1509 WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); 1510 return -ENOSYS; 1511 } 1512 1513 /* Index is fine - add IOMMU to the array */ 1514 amd_iommus[iommu->index] = iommu; 1515 1516 /* 1517 * Copy data from ACPI table entry to the iommu struct 1518 */ 1519 iommu->devid = h->devid; 1520 iommu->cap_ptr = h->cap_ptr; 1521 iommu->pci_seg = h->pci_seg; 1522 iommu->mmio_phys = h->mmio_phys; 1523 1524 switch (h->type) { 1525 case 0x10: 1526 /* Check if IVHD EFR contains proper max banks/counters */ 1527 if ((h->efr_attr != 0) && 1528 ((h->efr_attr & (0xF << 13)) != 0) && 1529 ((h->efr_attr & (0x3F << 17)) != 0)) 1530 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1531 else 1532 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1533 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) 1534 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1535 if (((h->efr_attr & (0x1 << IOMMU_FEAT_XTSUP_SHIFT)) == 0)) 1536 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 1537 break; 1538 case 0x11: 1539 case 0x40: 1540 if (h->efr_reg & (1 << 9)) 1541 iommu->mmio_phys_end = MMIO_REG_END_OFFSET; 1542 else 1543 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; 1544 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) 1545 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 1546 if (((h->efr_reg & (0x1 << IOMMU_EFR_XTSUP_SHIFT)) == 0)) 1547 amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE; 1548 break; 1549 default: 1550 return -EINVAL; 1551 } 1552 1553 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys, 1554 iommu->mmio_phys_end); 1555 if (!iommu->mmio_base) 1556 return -ENOMEM; 1557 1558 if (alloc_command_buffer(iommu)) 1559 return -ENOMEM; 1560 1561 if (alloc_event_buffer(iommu)) 1562 return -ENOMEM; 1563 1564 iommu->int_enabled = false; 1565 1566 init_translation_status(iommu); 1567 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) { 1568 iommu_disable(iommu); 1569 clear_translation_pre_enabled(iommu); 1570 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n", 1571 iommu->index); 1572 } 1573 if (amd_iommu_pre_enabled) 1574 amd_iommu_pre_enabled = translation_pre_enabled(iommu); 1575 1576 ret = init_iommu_from_acpi(iommu, h); 1577 if (ret) 1578 return ret; 1579 1580 ret = amd_iommu_create_irq_domain(iommu); 1581 if (ret) 1582 return ret; 1583 1584 /* 1585 * Make sure IOMMU is not considered to translate itself. The IVRS 1586 * table tells us so, but this is a lie! 1587 */ 1588 amd_iommu_rlookup_table[iommu->devid] = NULL; 1589 1590 return 0; 1591} 1592 1593/** 1594 * get_highest_supported_ivhd_type - Look up the appropriate IVHD type 1595 * @ivrs Pointer to the IVRS header 1596 * 1597 * This function search through all IVDB of the maximum supported IVHD 1598 */ 1599static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs) 1600{ 1601 u8 *base = (u8 *)ivrs; 1602 struct ivhd_header *ivhd = (struct ivhd_header *) 1603 (base + IVRS_HEADER_LENGTH); 1604 u8 last_type = ivhd->type; 1605 u16 devid = ivhd->devid; 1606 1607 while (((u8 *)ivhd - base < ivrs->length) && 1608 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) { 1609 u8 *p = (u8 *) ivhd; 1610 1611 if (ivhd->devid == devid) 1612 last_type = ivhd->type; 1613 ivhd = (struct ivhd_header *)(p + ivhd->length); 1614 } 1615 1616 return last_type; 1617} 1618 1619/* 1620 * Iterates over all IOMMU entries in the ACPI table, allocates the 1621 * IOMMU structure and initializes it with init_iommu_one() 1622 */ 1623static int __init init_iommu_all(struct acpi_table_header *table) 1624{ 1625 u8 *p = (u8 *)table, *end = (u8 *)table; 1626 struct ivhd_header *h; 1627 struct amd_iommu *iommu; 1628 int ret; 1629 1630 end += table->length; 1631 p += IVRS_HEADER_LENGTH; 1632 1633 while (p < end) { 1634 h = (struct ivhd_header *)p; 1635 if (*p == amd_iommu_target_ivhd_type) { 1636 1637 DUMP_printk("device: %02x:%02x.%01x cap: %04x " 1638 "seg: %d flags: %01x info %04x\n", 1639 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid), 1640 PCI_FUNC(h->devid), h->cap_ptr, 1641 h->pci_seg, h->flags, h->info); 1642 DUMP_printk(" mmio-addr: %016llx\n", 1643 h->mmio_phys); 1644 1645 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 1646 if (iommu == NULL) 1647 return -ENOMEM; 1648 1649 ret = init_iommu_one(iommu, h); 1650 if (ret) 1651 return ret; 1652 } 1653 p += h->length; 1654 1655 } 1656 WARN_ON(p != end); 1657 1658 return 0; 1659} 1660 1661static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 1662 u8 fxn, u64 *value, bool is_write); 1663 1664static void init_iommu_perf_ctr(struct amd_iommu *iommu) 1665{ 1666 u64 val = 0xabcd, val2 = 0; 1667 1668 if (!iommu_feature(iommu, FEATURE_PC)) 1669 return; 1670 1671 amd_iommu_pc_present = true; 1672 1673 /* Check if the performance counters can be written to */ 1674 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || 1675 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || 1676 (val != val2)) { 1677 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); 1678 amd_iommu_pc_present = false; 1679 return; 1680 } 1681 1682 pr_info("AMD-Vi: IOMMU performance counters supported\n"); 1683 1684 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); 1685 iommu->max_banks = (u8) ((val >> 12) & 0x3f); 1686 iommu->max_counters = (u8) ((val >> 7) & 0xf); 1687} 1688 1689static ssize_t amd_iommu_show_cap(struct device *dev, 1690 struct device_attribute *attr, 1691 char *buf) 1692{ 1693 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1694 return sprintf(buf, "%x\n", iommu->cap); 1695} 1696static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL); 1697 1698static ssize_t amd_iommu_show_features(struct device *dev, 1699 struct device_attribute *attr, 1700 char *buf) 1701{ 1702 struct amd_iommu *iommu = dev_to_amd_iommu(dev); 1703 return sprintf(buf, "%llx\n", iommu->features); 1704} 1705static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL); 1706 1707static struct attribute *amd_iommu_attrs[] = { 1708 &dev_attr_cap.attr, 1709 &dev_attr_features.attr, 1710 NULL, 1711}; 1712 1713static struct attribute_group amd_iommu_group = { 1714 .name = "amd-iommu", 1715 .attrs = amd_iommu_attrs, 1716}; 1717 1718static const struct attribute_group *amd_iommu_groups[] = { 1719 &amd_iommu_group, 1720 NULL, 1721}; 1722 1723static int __init iommu_init_pci(struct amd_iommu *iommu) 1724{ 1725 int cap_ptr = iommu->cap_ptr; 1726 u32 range, misc, low, high; 1727 int ret; 1728 1729 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid), 1730 iommu->devid & 0xff); 1731 if (!iommu->dev) 1732 return -ENODEV; 1733 1734 /* Prevent binding other PCI device drivers to IOMMU devices */ 1735 iommu->dev->match_driver = false; 1736 1737 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, 1738 &iommu->cap); 1739 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, 1740 &range); 1741 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, 1742 &misc); 1743 1744 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) 1745 amd_iommu_iotlb_sup = false; 1746 1747 /* read extended feature bits */ 1748 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); 1749 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); 1750 1751 iommu->features = ((u64)high << 32) | low; 1752 1753 if (iommu_feature(iommu, FEATURE_GT)) { 1754 int glxval; 1755 u32 max_pasid; 1756 u64 pasmax; 1757 1758 pasmax = iommu->features & FEATURE_PASID_MASK; 1759 pasmax >>= FEATURE_PASID_SHIFT; 1760 max_pasid = (1 << (pasmax + 1)) - 1; 1761 1762 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid); 1763 1764 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK); 1765 1766 glxval = iommu->features & FEATURE_GLXVAL_MASK; 1767 glxval >>= FEATURE_GLXVAL_SHIFT; 1768 1769 if (amd_iommu_max_glx_val == -1) 1770 amd_iommu_max_glx_val = glxval; 1771 else 1772 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); 1773 } 1774 1775 if (iommu_feature(iommu, FEATURE_GT) && 1776 iommu_feature(iommu, FEATURE_PPR)) { 1777 iommu->is_iommu_v2 = true; 1778 amd_iommu_v2_present = true; 1779 } 1780 1781 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu)) 1782 return -ENOMEM; 1783 1784 ret = iommu_init_ga(iommu); 1785 if (ret) 1786 return ret; 1787 1788 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) 1789 amd_iommu_np_cache = true; 1790 1791 init_iommu_perf_ctr(iommu); 1792 1793 if (is_rd890_iommu(iommu->dev)) { 1794 int i, j; 1795 1796 iommu->root_pdev = 1797 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number, 1798 PCI_DEVFN(0, 0)); 1799 1800 /* 1801 * Some rd890 systems may not be fully reconfigured by the 1802 * BIOS, so it's necessary for us to store this information so 1803 * it can be reprogrammed on resume 1804 */ 1805 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, 1806 &iommu->stored_addr_lo); 1807 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, 1808 &iommu->stored_addr_hi); 1809 1810 /* Low bit locks writes to configuration space */ 1811 iommu->stored_addr_lo &= ~1; 1812 1813 for (i = 0; i < 6; i++) 1814 for (j = 0; j < 0x12; j++) 1815 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); 1816 1817 for (i = 0; i < 0x83; i++) 1818 iommu->stored_l2[i] = iommu_read_l2(iommu, i); 1819 } 1820 1821 amd_iommu_erratum_746_workaround(iommu); 1822 amd_iommu_ats_write_check_workaround(iommu); 1823 1824 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, 1825 amd_iommu_groups, "ivhd%d", iommu->index); 1826 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); 1827 iommu_device_register(&iommu->iommu); 1828 1829 return pci_enable_device(iommu->dev); 1830} 1831 1832static void print_iommu_info(void) 1833{ 1834 static const char * const feat_str[] = { 1835 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", 1836 "IA", "GA", "HE", "PC" 1837 }; 1838 struct amd_iommu *iommu; 1839 1840 for_each_iommu(iommu) { 1841 int i; 1842 1843 pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n", 1844 dev_name(&iommu->dev->dev), iommu->cap_ptr); 1845 1846 if (iommu->cap & (1 << IOMMU_CAP_EFR)) { 1847 pr_info("AMD-Vi: Extended features (%#llx):\n", 1848 iommu->features); 1849 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { 1850 if (iommu_feature(iommu, (1ULL << i))) 1851 pr_cont(" %s", feat_str[i]); 1852 } 1853 1854 if (iommu->features & FEATURE_GAM_VAPIC) 1855 pr_cont(" GA_vAPIC"); 1856 1857 pr_cont("\n"); 1858 } 1859 } 1860 if (irq_remapping_enabled) { 1861 pr_info("AMD-Vi: Interrupt remapping enabled\n"); 1862 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 1863 pr_info("AMD-Vi: virtual APIC enabled\n"); 1864 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) 1865 pr_info("AMD-Vi: X2APIC enabled\n"); 1866 } 1867} 1868 1869static int __init amd_iommu_init_pci(void) 1870{ 1871 struct amd_iommu *iommu; 1872 int ret = 0; 1873 1874 for_each_iommu(iommu) { 1875 ret = iommu_init_pci(iommu); 1876 if (ret) 1877 break; 1878 } 1879 1880 /* 1881 * Order is important here to make sure any unity map requirements are 1882 * fulfilled. The unity mappings are created and written to the device 1883 * table during the amd_iommu_init_api() call. 1884 * 1885 * After that we call init_device_table_dma() to make sure any 1886 * uninitialized DTE will block DMA, and in the end we flush the caches 1887 * of all IOMMUs to make sure the changes to the device table are 1888 * active. 1889 */ 1890 ret = amd_iommu_init_api(); 1891 1892 init_device_table_dma(); 1893 1894 for_each_iommu(iommu) 1895 iommu_flush_all_caches(iommu); 1896 1897 if (!ret) 1898 print_iommu_info(); 1899 1900 return ret; 1901} 1902 1903/**************************************************************************** 1904 * 1905 * The following functions initialize the MSI interrupts for all IOMMUs 1906 * in the system. It's a bit challenging because there could be multiple 1907 * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per 1908 * pci_dev. 1909 * 1910 ****************************************************************************/ 1911 1912static int iommu_setup_msi(struct amd_iommu *iommu) 1913{ 1914 int r; 1915 1916 r = pci_enable_msi(iommu->dev); 1917 if (r) 1918 return r; 1919 1920 r = request_threaded_irq(iommu->dev->irq, 1921 amd_iommu_int_handler, 1922 amd_iommu_int_thread, 1923 0, "AMD-Vi", 1924 iommu); 1925 1926 if (r) { 1927 pci_disable_msi(iommu->dev); 1928 return r; 1929 } 1930 1931 iommu->int_enabled = true; 1932 1933 return 0; 1934} 1935 1936static int iommu_init_msi(struct amd_iommu *iommu) 1937{ 1938 int ret; 1939 1940 if (iommu->int_enabled) 1941 goto enable_faults; 1942 1943 if (iommu->dev->msi_cap) 1944 ret = iommu_setup_msi(iommu); 1945 else 1946 ret = -ENODEV; 1947 1948 if (ret) 1949 return ret; 1950 1951enable_faults: 1952 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); 1953 1954 if (iommu->ppr_log != NULL) 1955 iommu_feature_enable(iommu, CONTROL_PPFINT_EN); 1956 1957 iommu_ga_log_enable(iommu); 1958 1959 return 0; 1960} 1961 1962/**************************************************************************** 1963 * 1964 * The next functions belong to the third pass of parsing the ACPI 1965 * table. In this last pass the memory mapping requirements are 1966 * gathered (like exclusion and unity mapping ranges). 1967 * 1968 ****************************************************************************/ 1969 1970static void __init free_unity_maps(void) 1971{ 1972 struct unity_map_entry *entry, *next; 1973 1974 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) { 1975 list_del(&entry->list); 1976 kfree(entry); 1977 } 1978} 1979 1980/* called when we find an exclusion range definition in ACPI */ 1981static int __init init_exclusion_range(struct ivmd_header *m) 1982{ 1983 int i; 1984 1985 switch (m->type) { 1986 case ACPI_IVMD_TYPE: 1987 set_device_exclusion_range(m->devid, m); 1988 break; 1989 case ACPI_IVMD_TYPE_ALL: 1990 for (i = 0; i <= amd_iommu_last_bdf; ++i) 1991 set_device_exclusion_range(i, m); 1992 break; 1993 case ACPI_IVMD_TYPE_RANGE: 1994 for (i = m->devid; i <= m->aux; ++i) 1995 set_device_exclusion_range(i, m); 1996 break; 1997 default: 1998 break; 1999 } 2000 2001 return 0; 2002} 2003 2004/* called for unity map ACPI definition */ 2005static int __init init_unity_map_range(struct ivmd_header *m) 2006{ 2007 struct unity_map_entry *e = NULL; 2008 char *s; 2009 2010 e = kzalloc(sizeof(*e), GFP_KERNEL); 2011 if (e == NULL) 2012 return -ENOMEM; 2013 2014 switch (m->type) { 2015 default: 2016 kfree(e); 2017 return 0; 2018 case ACPI_IVMD_TYPE: 2019 s = "IVMD_TYPEi\t\t\t"; 2020 e->devid_start = e->devid_end = m->devid; 2021 break; 2022 case ACPI_IVMD_TYPE_ALL: 2023 s = "IVMD_TYPE_ALL\t\t"; 2024 e->devid_start = 0; 2025 e->devid_end = amd_iommu_last_bdf; 2026 break; 2027 case ACPI_IVMD_TYPE_RANGE: 2028 s = "IVMD_TYPE_RANGE\t\t"; 2029 e->devid_start = m->devid; 2030 e->devid_end = m->aux; 2031 break; 2032 } 2033 e->address_start = PAGE_ALIGN(m->range_start); 2034 e->address_end = e->address_start + PAGE_ALIGN(m->range_length); 2035 e->prot = m->flags >> 1; 2036 2037 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" 2038 " range_start: %016llx range_end: %016llx flags: %x\n", s, 2039 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), 2040 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end), 2041 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end), 2042 e->address_start, e->address_end, m->flags); 2043 2044 list_add_tail(&e->list, &amd_iommu_unity_map); 2045 2046 return 0; 2047} 2048 2049/* iterates over all memory definitions we find in the ACPI table */ 2050static int __init init_memory_definitions(struct acpi_table_header *table) 2051{ 2052 u8 *p = (u8 *)table, *end = (u8 *)table; 2053 struct ivmd_header *m; 2054 2055 end += table->length; 2056 p += IVRS_HEADER_LENGTH; 2057 2058 while (p < end) { 2059 m = (struct ivmd_header *)p; 2060 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2061 init_exclusion_range(m); 2062 else if (m->flags & IVMD_FLAG_UNITY_MAP) 2063 init_unity_map_range(m); 2064 2065 p += m->length; 2066 } 2067 2068 return 0; 2069} 2070 2071/* 2072 * Init the device table to not allow DMA access for devices 2073 */ 2074static void init_device_table_dma(void) 2075{ 2076 u32 devid; 2077 2078 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2079 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 2080 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); 2081 } 2082} 2083 2084static void __init uninit_device_table_dma(void) 2085{ 2086 u32 devid; 2087 2088 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 2089 amd_iommu_dev_table[devid].data[0] = 0ULL; 2090 amd_iommu_dev_table[devid].data[1] = 0ULL; 2091 } 2092} 2093 2094static void init_device_table(void) 2095{ 2096 u32 devid; 2097 2098 if (!amd_iommu_irq_remap) 2099 return; 2100 2101 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) 2102 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); 2103} 2104 2105static void iommu_init_flags(struct amd_iommu *iommu) 2106{ 2107 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? 2108 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) : 2109 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN); 2110 2111 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ? 2112 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) : 2113 iommu_feature_disable(iommu, CONTROL_PASSPW_EN); 2114 2115 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ? 2116 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) : 2117 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN); 2118 2119 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ? 2120 iommu_feature_enable(iommu, CONTROL_ISOC_EN) : 2121 iommu_feature_disable(iommu, CONTROL_ISOC_EN); 2122 2123 /* 2124 * make IOMMU memory accesses cache coherent 2125 */ 2126 iommu_feature_enable(iommu, CONTROL_COHERENT_EN); 2127 2128 /* Set IOTLB invalidation timeout to 1s */ 2129 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); 2130} 2131 2132static void iommu_apply_resume_quirks(struct amd_iommu *iommu) 2133{ 2134 int i, j; 2135 u32 ioc_feature_control; 2136 struct pci_dev *pdev = iommu->root_pdev; 2137 2138 /* RD890 BIOSes may not have completely reconfigured the iommu */ 2139 if (!is_rd890_iommu(iommu->dev) || !pdev) 2140 return; 2141 2142 /* 2143 * First, we need to ensure that the iommu is enabled. This is 2144 * controlled by a register in the northbridge 2145 */ 2146 2147 /* Select Northbridge indirect register 0x75 and enable writing */ 2148 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 2149 pci_read_config_dword(pdev, 0x64, &ioc_feature_control); 2150 2151 /* Enable the iommu */ 2152 if (!(ioc_feature_control & 0x1)) 2153 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 2154 2155 /* Restore the iommu BAR */ 2156 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2157 iommu->stored_addr_lo); 2158 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8, 2159 iommu->stored_addr_hi); 2160 2161 /* Restore the l1 indirect regs for each of the 6 l1s */ 2162 for (i = 0; i < 6; i++) 2163 for (j = 0; j < 0x12; j++) 2164 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]); 2165 2166 /* Restore the l2 indirect regs */ 2167 for (i = 0; i < 0x83; i++) 2168 iommu_write_l2(iommu, i, iommu->stored_l2[i]); 2169 2170 /* Lock PCI setup registers */ 2171 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 2172 iommu->stored_addr_lo | 1); 2173} 2174 2175static void iommu_enable_ga(struct amd_iommu *iommu) 2176{ 2177#ifdef CONFIG_IRQ_REMAP 2178 switch (amd_iommu_guest_ir) { 2179 case AMD_IOMMU_GUEST_IR_VAPIC: 2180 iommu_feature_enable(iommu, CONTROL_GAM_EN); 2181 /* Fall through */ 2182 case AMD_IOMMU_GUEST_IR_LEGACY_GA: 2183 iommu_feature_enable(iommu, CONTROL_GA_EN); 2184 iommu->irte_ops = &irte_128_ops; 2185 break; 2186 default: 2187 iommu->irte_ops = &irte_32_ops; 2188 break; 2189 } 2190#endif 2191} 2192 2193static void early_enable_iommu(struct amd_iommu *iommu) 2194{ 2195 iommu_disable(iommu); 2196 iommu_init_flags(iommu); 2197 iommu_set_device_table(iommu); 2198 iommu_enable_command_buffer(iommu); 2199 iommu_enable_event_buffer(iommu); 2200 iommu_set_exclusion_range(iommu); 2201 iommu_enable_ga(iommu); 2202 iommu_enable_xt(iommu); 2203 iommu_enable(iommu); 2204 iommu_flush_all_caches(iommu); 2205} 2206 2207/* 2208 * This function finally enables all IOMMUs found in the system after 2209 * they have been initialized. 2210 * 2211 * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy 2212 * the old content of device table entries. Not this case or copy failed, 2213 * just continue as normal kernel does. 2214 */ 2215static void early_enable_iommus(void) 2216{ 2217 struct amd_iommu *iommu; 2218 2219 2220 if (!copy_device_table()) { 2221 /* 2222 * If come here because of failure in copying device table from old 2223 * kernel with all IOMMUs enabled, print error message and try to 2224 * free allocated old_dev_tbl_cpy. 2225 */ 2226 if (amd_iommu_pre_enabled) 2227 pr_err("Failed to copy DEV table from previous kernel.\n"); 2228 if (old_dev_tbl_cpy != NULL) 2229 free_pages((unsigned long)old_dev_tbl_cpy, 2230 get_order(dev_table_size)); 2231 2232 for_each_iommu(iommu) { 2233 clear_translation_pre_enabled(iommu); 2234 early_enable_iommu(iommu); 2235 } 2236 } else { 2237 pr_info("Copied DEV table from previous kernel.\n"); 2238 free_pages((unsigned long)amd_iommu_dev_table, 2239 get_order(dev_table_size)); 2240 amd_iommu_dev_table = old_dev_tbl_cpy; 2241 for_each_iommu(iommu) { 2242 iommu_disable_command_buffer(iommu); 2243 iommu_disable_event_buffer(iommu); 2244 iommu_enable_command_buffer(iommu); 2245 iommu_enable_event_buffer(iommu); 2246 iommu_enable_ga(iommu); 2247 iommu_enable_xt(iommu); 2248 iommu_set_device_table(iommu); 2249 iommu_flush_all_caches(iommu); 2250 } 2251 } 2252 2253#ifdef CONFIG_IRQ_REMAP 2254 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2255 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP); 2256#endif 2257} 2258 2259static void enable_iommus_v2(void) 2260{ 2261 struct amd_iommu *iommu; 2262 2263 for_each_iommu(iommu) { 2264 iommu_enable_ppr_log(iommu); 2265 iommu_enable_gt(iommu); 2266 } 2267} 2268 2269static void enable_iommus(void) 2270{ 2271 early_enable_iommus(); 2272 2273 enable_iommus_v2(); 2274} 2275 2276static void disable_iommus(void) 2277{ 2278 struct amd_iommu *iommu; 2279 2280 for_each_iommu(iommu) 2281 iommu_disable(iommu); 2282 2283#ifdef CONFIG_IRQ_REMAP 2284 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir)) 2285 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP); 2286#endif 2287} 2288 2289/* 2290 * Suspend/Resume support 2291 * disable suspend until real resume implemented 2292 */ 2293 2294static void amd_iommu_resume(void) 2295{ 2296 struct amd_iommu *iommu; 2297 2298 for_each_iommu(iommu) 2299 iommu_apply_resume_quirks(iommu); 2300 2301 /* re-load the hardware */ 2302 enable_iommus(); 2303 2304 amd_iommu_enable_interrupts(); 2305} 2306 2307static int amd_iommu_suspend(void) 2308{ 2309 /* disable IOMMUs to go out of the way for BIOS */ 2310 disable_iommus(); 2311 2312 return 0; 2313} 2314 2315static struct syscore_ops amd_iommu_syscore_ops = { 2316 .suspend = amd_iommu_suspend, 2317 .resume = amd_iommu_resume, 2318}; 2319 2320static void __init free_iommu_resources(void) 2321{ 2322 kmemleak_free(irq_lookup_table); 2323 free_pages((unsigned long)irq_lookup_table, 2324 get_order(rlookup_table_size)); 2325 irq_lookup_table = NULL; 2326 2327 kmem_cache_destroy(amd_iommu_irq_cache); 2328 amd_iommu_irq_cache = NULL; 2329 2330 free_pages((unsigned long)amd_iommu_rlookup_table, 2331 get_order(rlookup_table_size)); 2332 amd_iommu_rlookup_table = NULL; 2333 2334 free_pages((unsigned long)amd_iommu_alias_table, 2335 get_order(alias_table_size)); 2336 amd_iommu_alias_table = NULL; 2337 2338 free_pages((unsigned long)amd_iommu_dev_table, 2339 get_order(dev_table_size)); 2340 amd_iommu_dev_table = NULL; 2341 2342 free_iommu_all(); 2343 2344#ifdef CONFIG_GART_IOMMU 2345 /* 2346 * We failed to initialize the AMD IOMMU - try fallback to GART 2347 * if possible. 2348 */ 2349 gart_iommu_init(); 2350 2351#endif 2352} 2353 2354/* SB IOAPIC is always on this device in AMD systems */ 2355#define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) 2356 2357static bool __init check_ioapic_information(void) 2358{ 2359 const char *fw_bug = FW_BUG; 2360 bool ret, has_sb_ioapic; 2361 int idx; 2362 2363 has_sb_ioapic = false; 2364 ret = false; 2365 2366 /* 2367 * If we have map overrides on the kernel command line the 2368 * messages in this function might not describe firmware bugs 2369 * anymore - so be careful 2370 */ 2371 if (cmdline_maps) 2372 fw_bug = ""; 2373 2374 for (idx = 0; idx < nr_ioapics; idx++) { 2375 int devid, id = mpc_ioapic_id(idx); 2376 2377 devid = get_ioapic_devid(id); 2378 if (devid < 0) { 2379 pr_err("%sAMD-Vi: IOAPIC[%d] not in IVRS table\n", 2380 fw_bug, id); 2381 ret = false; 2382 } else if (devid == IOAPIC_SB_DEVID) { 2383 has_sb_ioapic = true; 2384 ret = true; 2385 } 2386 } 2387 2388 if (!has_sb_ioapic) { 2389 /* 2390 * We expect the SB IOAPIC to be listed in the IVRS 2391 * table. The system timer is connected to the SB IOAPIC 2392 * and if we don't have it in the list the system will 2393 * panic at boot time. This situation usually happens 2394 * when the BIOS is buggy and provides us the wrong 2395 * device id for the IOAPIC in the system. 2396 */ 2397 pr_err("%sAMD-Vi: No southbridge IOAPIC found\n", fw_bug); 2398 } 2399 2400 if (!ret) 2401 pr_err("AMD-Vi: Disabling interrupt remapping\n"); 2402 2403 return ret; 2404} 2405 2406static void __init free_dma_resources(void) 2407{ 2408 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, 2409 get_order(MAX_DOMAIN_ID/8)); 2410 amd_iommu_pd_alloc_bitmap = NULL; 2411 2412 free_unity_maps(); 2413} 2414 2415/* 2416 * This is the hardware init function for AMD IOMMU in the system. 2417 * This function is called either from amd_iommu_init or from the interrupt 2418 * remapping setup code. 2419 * 2420 * This function basically parses the ACPI table for AMD IOMMU (IVRS) 2421 * four times: 2422 * 2423 * 1 pass) Discover the most comprehensive IVHD type to use. 2424 * 2425 * 2 pass) Find the highest PCI device id the driver has to handle. 2426 * Upon this information the size of the data structures is 2427 * determined that needs to be allocated. 2428 * 2429 * 3 pass) Initialize the data structures just allocated with the 2430 * information in the ACPI table about available AMD IOMMUs 2431 * in the system. It also maps the PCI devices in the 2432 * system to specific IOMMUs 2433 * 2434 * 4 pass) After the basic data structures are allocated and 2435 * initialized we update them with information about memory 2436 * remapping requirements parsed out of the ACPI table in 2437 * this last pass. 2438 * 2439 * After everything is set up the IOMMUs are enabled and the necessary 2440 * hotplug and suspend notifiers are registered. 2441 */ 2442static int __init early_amd_iommu_init(void) 2443{ 2444 struct acpi_table_header *ivrs_base; 2445 acpi_status status; 2446 int i, remap_cache_sz, ret = 0; 2447 2448 if (!amd_iommu_detected) 2449 return -ENODEV; 2450 2451 status = acpi_get_table("IVRS", 0, &ivrs_base); 2452 if (status == AE_NOT_FOUND) 2453 return -ENODEV; 2454 else if (ACPI_FAILURE(status)) { 2455 const char *err = acpi_format_exception(status); 2456 pr_err("AMD-Vi: IVRS table error: %s\n", err); 2457 return -EINVAL; 2458 } 2459 2460 /* 2461 * Validate checksum here so we don't need to do it when 2462 * we actually parse the table 2463 */ 2464 ret = check_ivrs_checksum(ivrs_base); 2465 if (ret) 2466 goto out; 2467 2468 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); 2469 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); 2470 2471 /* 2472 * First parse ACPI tables to find the largest Bus/Dev/Func 2473 * we need to handle. Upon this information the shared data 2474 * structures for the IOMMUs in the system will be allocated 2475 */ 2476 ret = find_last_devid_acpi(ivrs_base); 2477 if (ret) 2478 goto out; 2479 2480 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 2481 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 2482 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 2483 2484 /* Device table - directly used by all IOMMUs */ 2485 ret = -ENOMEM; 2486 amd_iommu_dev_table = (void *)__get_free_pages( 2487 GFP_KERNEL | __GFP_ZERO | GFP_DMA32, 2488 get_order(dev_table_size)); 2489 if (amd_iommu_dev_table == NULL) 2490 goto out; 2491 2492 /* 2493 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the 2494 * IOMMU see for that device 2495 */ 2496 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL, 2497 get_order(alias_table_size)); 2498 if (amd_iommu_alias_table == NULL) 2499 goto out; 2500 2501 /* IOMMU rlookup table - find the IOMMU for a specific device */ 2502 amd_iommu_rlookup_table = (void *)__get_free_pages( 2503 GFP_KERNEL | __GFP_ZERO, 2504 get_order(rlookup_table_size)); 2505 if (amd_iommu_rlookup_table == NULL) 2506 goto out; 2507 2508 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( 2509 GFP_KERNEL | __GFP_ZERO, 2510 get_order(MAX_DOMAIN_ID/8)); 2511 if (amd_iommu_pd_alloc_bitmap == NULL) 2512 goto out; 2513 2514 /* 2515 * let all alias entries point to itself 2516 */ 2517 for (i = 0; i <= amd_iommu_last_bdf; ++i) 2518 amd_iommu_alias_table[i] = i; 2519 2520 /* 2521 * never allocate domain 0 because its used as the non-allocated and 2522 * error value placeholder 2523 */ 2524 __set_bit(0, amd_iommu_pd_alloc_bitmap); 2525 2526 spin_lock_init(&amd_iommu_pd_lock); 2527 2528 /* 2529 * now the data structures are allocated and basically initialized 2530 * start the real acpi table scan 2531 */ 2532 ret = init_iommu_all(ivrs_base); 2533 if (ret) 2534 goto out; 2535 2536 /* Disable any previously enabled IOMMUs */ 2537 if (!is_kdump_kernel() || amd_iommu_disabled) 2538 disable_iommus(); 2539 2540 if (amd_iommu_irq_remap) 2541 amd_iommu_irq_remap = check_ioapic_information(); 2542 2543 if (amd_iommu_irq_remap) { 2544 /* 2545 * Interrupt remapping enabled, create kmem_cache for the 2546 * remapping tables. 2547 */ 2548 ret = -ENOMEM; 2549 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir)) 2550 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32); 2551 else 2552 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); 2553 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", 2554 remap_cache_sz, 2555 IRQ_TABLE_ALIGNMENT, 2556 0, NULL); 2557 if (!amd_iommu_irq_cache) 2558 goto out; 2559 2560 irq_lookup_table = (void *)__get_free_pages( 2561 GFP_KERNEL | __GFP_ZERO, 2562 get_order(rlookup_table_size)); 2563 kmemleak_alloc(irq_lookup_table, rlookup_table_size, 2564 1, GFP_KERNEL); 2565 if (!irq_lookup_table) 2566 goto out; 2567 } 2568 2569 ret = init_memory_definitions(ivrs_base); 2570 if (ret) 2571 goto out; 2572 2573 /* init the device table */ 2574 init_device_table(); 2575 2576out: 2577 /* Don't leak any ACPI memory */ 2578 acpi_put_table(ivrs_base); 2579 ivrs_base = NULL; 2580 2581 return ret; 2582} 2583 2584static int amd_iommu_enable_interrupts(void) 2585{ 2586 struct amd_iommu *iommu; 2587 int ret = 0; 2588 2589 for_each_iommu(iommu) { 2590 ret = iommu_init_msi(iommu); 2591 if (ret) 2592 goto out; 2593 } 2594 2595out: 2596 return ret; 2597} 2598 2599static bool detect_ivrs(void) 2600{ 2601 struct acpi_table_header *ivrs_base; 2602 acpi_status status; 2603 2604 status = acpi_get_table("IVRS", 0, &ivrs_base); 2605 if (status == AE_NOT_FOUND) 2606 return false; 2607 else if (ACPI_FAILURE(status)) { 2608 const char *err = acpi_format_exception(status); 2609 pr_err("AMD-Vi: IVRS table error: %s\n", err); 2610 return false; 2611 } 2612 2613 acpi_put_table(ivrs_base); 2614 2615 /* Make sure ACS will be enabled during PCI probe */ 2616 pci_request_acs(); 2617 2618 return true; 2619} 2620 2621/**************************************************************************** 2622 * 2623 * AMD IOMMU Initialization State Machine 2624 * 2625 ****************************************************************************/ 2626 2627static int __init state_next(void) 2628{ 2629 int ret = 0; 2630 2631 switch (init_state) { 2632 case IOMMU_START_STATE: 2633 if (!detect_ivrs()) { 2634 init_state = IOMMU_NOT_FOUND; 2635 ret = -ENODEV; 2636 } else { 2637 init_state = IOMMU_IVRS_DETECTED; 2638 } 2639 break; 2640 case IOMMU_IVRS_DETECTED: 2641 ret = early_amd_iommu_init(); 2642 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; 2643 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { 2644 pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n"); 2645 free_dma_resources(); 2646 free_iommu_resources(); 2647 init_state = IOMMU_CMDLINE_DISABLED; 2648 ret = -EINVAL; 2649 } 2650 break; 2651 case IOMMU_ACPI_FINISHED: 2652 early_enable_iommus(); 2653 x86_platform.iommu_shutdown = disable_iommus; 2654 init_state = IOMMU_ENABLED; 2655 break; 2656 case IOMMU_ENABLED: 2657 register_syscore_ops(&amd_iommu_syscore_ops); 2658 ret = amd_iommu_init_pci(); 2659 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT; 2660 enable_iommus_v2(); 2661 break; 2662 case IOMMU_PCI_INIT: 2663 ret = amd_iommu_enable_interrupts(); 2664 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2665 break; 2666 case IOMMU_INTERRUPTS_EN: 2667 ret = amd_iommu_init_dma_ops(); 2668 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2669 break; 2670 case IOMMU_DMA_OPS: 2671 init_state = IOMMU_INITIALIZED; 2672 break; 2673 case IOMMU_INITIALIZED: 2674 /* Nothing to do */ 2675 break; 2676 case IOMMU_NOT_FOUND: 2677 case IOMMU_INIT_ERROR: 2678 case IOMMU_CMDLINE_DISABLED: 2679 /* Error states => do nothing */ 2680 ret = -EINVAL; 2681 break; 2682 default: 2683 /* Unknown state */ 2684 BUG(); 2685 } 2686 2687 return ret; 2688} 2689 2690static int __init iommu_go_to_state(enum iommu_init_state state) 2691{ 2692 int ret = -EINVAL; 2693 2694 while (init_state != state) { 2695 if (init_state == IOMMU_NOT_FOUND || 2696 init_state == IOMMU_INIT_ERROR || 2697 init_state == IOMMU_CMDLINE_DISABLED) 2698 break; 2699 ret = state_next(); 2700 } 2701 2702 return ret; 2703} 2704 2705#ifdef CONFIG_IRQ_REMAP 2706int __init amd_iommu_prepare(void) 2707{ 2708 int ret; 2709 2710 amd_iommu_irq_remap = true; 2711 2712 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED); 2713 if (ret) 2714 return ret; 2715 return amd_iommu_irq_remap ? 0 : -ENODEV; 2716} 2717 2718int __init amd_iommu_enable(void) 2719{ 2720 int ret; 2721 2722 ret = iommu_go_to_state(IOMMU_ENABLED); 2723 if (ret) 2724 return ret; 2725 2726 irq_remapping_enabled = 1; 2727 return amd_iommu_xt_mode; 2728} 2729 2730void amd_iommu_disable(void) 2731{ 2732 amd_iommu_suspend(); 2733} 2734 2735int amd_iommu_reenable(int mode) 2736{ 2737 amd_iommu_resume(); 2738 2739 return 0; 2740} 2741 2742int __init amd_iommu_enable_faulting(void) 2743{ 2744 /* We enable MSI later when PCI is initialized */ 2745 return 0; 2746} 2747#endif 2748 2749/* 2750 * This is the core init function for AMD IOMMU hardware in the system. 2751 * This function is called from the generic x86 DMA layer initialization 2752 * code. 2753 */ 2754static int __init amd_iommu_init(void) 2755{ 2756 struct amd_iommu *iommu; 2757 int ret; 2758 2759 ret = iommu_go_to_state(IOMMU_INITIALIZED); 2760 if (ret) { 2761 free_dma_resources(); 2762 if (!irq_remapping_enabled) { 2763 disable_iommus(); 2764 free_iommu_resources(); 2765 } else { 2766 uninit_device_table_dma(); 2767 for_each_iommu(iommu) 2768 iommu_flush_all_caches(iommu); 2769 } 2770 } 2771 2772 for_each_iommu(iommu) 2773 amd_iommu_debugfs_setup(iommu); 2774 2775 return ret; 2776} 2777 2778static bool amd_iommu_sme_check(void) 2779{ 2780 if (!sme_active() || (boot_cpu_data.x86 != 0x17)) 2781 return true; 2782 2783 /* For Fam17h, a specific level of support is required */ 2784 if (boot_cpu_data.microcode >= 0x08001205) 2785 return true; 2786 2787 if ((boot_cpu_data.microcode >= 0x08001126) && 2788 (boot_cpu_data.microcode <= 0x080011ff)) 2789 return true; 2790 2791 pr_notice("AMD-Vi: IOMMU not currently supported when SME is active\n"); 2792 2793 return false; 2794} 2795 2796/**************************************************************************** 2797 * 2798 * Early detect code. This code runs at IOMMU detection time in the DMA 2799 * layer. It just looks if there is an IVRS ACPI table to detect AMD 2800 * IOMMUs 2801 * 2802 ****************************************************************************/ 2803int __init amd_iommu_detect(void) 2804{ 2805 int ret; 2806 2807 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 2808 return -ENODEV; 2809 2810 if (!amd_iommu_sme_check()) 2811 return -ENODEV; 2812 2813 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); 2814 if (ret) 2815 return ret; 2816 2817 amd_iommu_detected = true; 2818 iommu_detected = 1; 2819 x86_init.iommu.iommu_init = amd_iommu_init; 2820 2821 return 1; 2822} 2823 2824/**************************************************************************** 2825 * 2826 * Parsing functions for the AMD IOMMU specific kernel command line 2827 * options. 2828 * 2829 ****************************************************************************/ 2830 2831static int __init parse_amd_iommu_dump(char *str) 2832{ 2833 amd_iommu_dump = true; 2834 2835 return 1; 2836} 2837 2838static int __init parse_amd_iommu_intr(char *str) 2839{ 2840 for (; *str; ++str) { 2841 if (strncmp(str, "legacy", 6) == 0) { 2842 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; 2843 break; 2844 } 2845 if (strncmp(str, "vapic", 5) == 0) { 2846 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC; 2847 break; 2848 } 2849 } 2850 return 1; 2851} 2852 2853static int __init parse_amd_iommu_options(char *str) 2854{ 2855 for (; *str; ++str) { 2856 if (strncmp(str, "fullflush", 9) == 0) 2857 amd_iommu_unmap_flush = true; 2858 if (strncmp(str, "off", 3) == 0) 2859 amd_iommu_disabled = true; 2860 if (strncmp(str, "force_isolation", 15) == 0) 2861 amd_iommu_force_isolation = true; 2862 } 2863 2864 return 1; 2865} 2866 2867static int __init parse_ivrs_ioapic(char *str) 2868{ 2869 unsigned int bus, dev, fn; 2870 int ret, id, i; 2871 u16 devid; 2872 2873 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2874 2875 if (ret != 4) { 2876 pr_err("AMD-Vi: Invalid command line: ivrs_ioapic%s\n", str); 2877 return 1; 2878 } 2879 2880 if (early_ioapic_map_size == EARLY_MAP_SIZE) { 2881 pr_err("AMD-Vi: Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n", 2882 str); 2883 return 1; 2884 } 2885 2886 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 2887 2888 cmdline_maps = true; 2889 i = early_ioapic_map_size++; 2890 early_ioapic_map[i].id = id; 2891 early_ioapic_map[i].devid = devid; 2892 early_ioapic_map[i].cmd_line = true; 2893 2894 return 1; 2895} 2896 2897static int __init parse_ivrs_hpet(char *str) 2898{ 2899 unsigned int bus, dev, fn; 2900 int ret, id, i; 2901 u16 devid; 2902 2903 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn); 2904 2905 if (ret != 4) { 2906 pr_err("AMD-Vi: Invalid command line: ivrs_hpet%s\n", str); 2907 return 1; 2908 } 2909 2910 if (early_hpet_map_size == EARLY_MAP_SIZE) { 2911 pr_err("AMD-Vi: Early HPET map overflow - ignoring ivrs_hpet%s\n", 2912 str); 2913 return 1; 2914 } 2915 2916 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 2917 2918 cmdline_maps = true; 2919 i = early_hpet_map_size++; 2920 early_hpet_map[i].id = id; 2921 early_hpet_map[i].devid = devid; 2922 early_hpet_map[i].cmd_line = true; 2923 2924 return 1; 2925} 2926 2927static int __init parse_ivrs_acpihid(char *str) 2928{ 2929 u32 bus, dev, fn; 2930 char *hid, *uid, *p; 2931 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0}; 2932 int ret, i; 2933 2934 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid); 2935 if (ret != 4) { 2936 pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str); 2937 return 1; 2938 } 2939 2940 p = acpiid; 2941 hid = strsep(&p, ":"); 2942 uid = p; 2943 2944 if (!hid || !(*hid) || !uid) { 2945 pr_err("AMD-Vi: Invalid command line: hid or uid\n"); 2946 return 1; 2947 } 2948 2949 i = early_acpihid_map_size++; 2950 memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); 2951 memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); 2952 early_acpihid_map[i].devid = 2953 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7); 2954 early_acpihid_map[i].cmd_line = true; 2955 2956 return 1; 2957} 2958 2959__setup("amd_iommu_dump", parse_amd_iommu_dump); 2960__setup("amd_iommu=", parse_amd_iommu_options); 2961__setup("amd_iommu_intr=", parse_amd_iommu_intr); 2962__setup("ivrs_ioapic", parse_ivrs_ioapic); 2963__setup("ivrs_hpet", parse_ivrs_hpet); 2964__setup("ivrs_acpihid", parse_ivrs_acpihid); 2965 2966IOMMU_INIT_FINISH(amd_iommu_detect, 2967 gart_iommu_hole_init, 2968 NULL, 2969 NULL); 2970 2971bool amd_iommu_v2_supported(void) 2972{ 2973 return amd_iommu_v2_present; 2974} 2975EXPORT_SYMBOL(amd_iommu_v2_supported); 2976 2977struct amd_iommu *get_amd_iommu(unsigned int idx) 2978{ 2979 unsigned int i = 0; 2980 struct amd_iommu *iommu; 2981 2982 for_each_iommu(iommu) 2983 if (i++ == idx) 2984 return iommu; 2985 return NULL; 2986} 2987EXPORT_SYMBOL(get_amd_iommu); 2988 2989/**************************************************************************** 2990 * 2991 * IOMMU EFR Performance Counter support functionality. This code allows 2992 * access to the IOMMU PC functionality. 2993 * 2994 ****************************************************************************/ 2995 2996u8 amd_iommu_pc_get_max_banks(unsigned int idx) 2997{ 2998 struct amd_iommu *iommu = get_amd_iommu(idx); 2999 3000 if (iommu) 3001 return iommu->max_banks; 3002 3003 return 0; 3004} 3005EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); 3006 3007bool amd_iommu_pc_supported(void) 3008{ 3009 return amd_iommu_pc_present; 3010} 3011EXPORT_SYMBOL(amd_iommu_pc_supported); 3012 3013u8 amd_iommu_pc_get_max_counters(unsigned int idx) 3014{ 3015 struct amd_iommu *iommu = get_amd_iommu(idx); 3016 3017 if (iommu) 3018 return iommu->max_counters; 3019 3020 return 0; 3021} 3022EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 3023 3024static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, 3025 u8 fxn, u64 *value, bool is_write) 3026{ 3027 u32 offset; 3028 u32 max_offset_lim; 3029 3030 /* Make sure the IOMMU PC resource is available */ 3031 if (!amd_iommu_pc_present) 3032 return -ENODEV; 3033 3034 /* Check for valid iommu and pc register indexing */ 3035 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) 3036 return -ENODEV; 3037 3038 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); 3039 3040 /* Limit the offset to the hw defined mmio region aperture */ 3041 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | 3042 (iommu->max_counters << 8) | 0x28); 3043 if ((offset < MMIO_CNTR_REG_OFFSET) || 3044 (offset > max_offset_lim)) 3045 return -EINVAL; 3046 3047 if (is_write) { 3048 u64 val = *value & GENMASK_ULL(47, 0); 3049 3050 writel((u32)val, iommu->mmio_base + offset); 3051 writel((val >> 32), iommu->mmio_base + offset + 4); 3052 } else { 3053 *value = readl(iommu->mmio_base + offset + 4); 3054 *value <<= 32; 3055 *value |= readl(iommu->mmio_base + offset); 3056 *value &= GENMASK_ULL(47, 0); 3057 } 3058 3059 return 0; 3060} 3061 3062int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3063{ 3064 if (!iommu) 3065 return -EINVAL; 3066 3067 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); 3068} 3069EXPORT_SYMBOL(amd_iommu_pc_get_reg); 3070 3071int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) 3072{ 3073 if (!iommu) 3074 return -EINVAL; 3075 3076 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); 3077} 3078EXPORT_SYMBOL(amd_iommu_pc_set_reg);