Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.8 1477 lines 37 kB view raw
1 2#define pr_fmt(fmt) "DMAR-IR: " fmt 3 4#include <linux/interrupt.h> 5#include <linux/dmar.h> 6#include <linux/spinlock.h> 7#include <linux/slab.h> 8#include <linux/jiffies.h> 9#include <linux/hpet.h> 10#include <linux/pci.h> 11#include <linux/irq.h> 12#include <linux/intel-iommu.h> 13#include <linux/acpi.h> 14#include <linux/irqdomain.h> 15#include <linux/crash_dump.h> 16#include <asm/io_apic.h> 17#include <asm/smp.h> 18#include <asm/cpu.h> 19#include <asm/irq_remapping.h> 20#include <asm/pci-direct.h> 21#include <asm/msidef.h> 22 23#include "irq_remapping.h" 24 25enum irq_mode { 26 IRQ_REMAPPING, 27 IRQ_POSTING, 28}; 29 30struct ioapic_scope { 31 struct intel_iommu *iommu; 32 unsigned int id; 33 unsigned int bus; /* PCI bus number */ 34 unsigned int devfn; /* PCI devfn number */ 35}; 36 37struct hpet_scope { 38 struct intel_iommu *iommu; 39 u8 id; 40 unsigned int bus; 41 unsigned int devfn; 42}; 43 44struct irq_2_iommu { 45 struct intel_iommu *iommu; 46 u16 irte_index; 47 u16 sub_handle; 48 u8 irte_mask; 49 enum irq_mode mode; 50}; 51 52struct intel_ir_data { 53 struct irq_2_iommu irq_2_iommu; 54 struct irte irte_entry; 55 union { 56 struct msi_msg msi_entry; 57 }; 58}; 59 60#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) 61#define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8) 62 63static int __read_mostly eim_mode; 64static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 65static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 66 67/* 68 * Lock ordering: 69 * ->dmar_global_lock 70 * ->irq_2_ir_lock 71 * ->qi->q_lock 72 * ->iommu->register_lock 73 * Note: 74 * intel_irq_remap_ops.{supported,prepare,enable,disable,reenable} are called 75 * in single-threaded environment with interrupt disabled, so no need to tabke 76 * the dmar_global_lock. 77 */ 78static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); 79static struct irq_domain_ops intel_ir_domain_ops; 80 81static void iommu_disable_irq_remapping(struct intel_iommu *iommu); 82static int __init parse_ioapics_under_ir(void); 83 84static bool ir_pre_enabled(struct intel_iommu *iommu) 85{ 86 return (iommu->flags & VTD_FLAG_IRQ_REMAP_PRE_ENABLED); 87} 88 89static void clear_ir_pre_enabled(struct intel_iommu *iommu) 90{ 91 iommu->flags &= ~VTD_FLAG_IRQ_REMAP_PRE_ENABLED; 92} 93 94static void init_ir_status(struct intel_iommu *iommu) 95{ 96 u32 gsts; 97 98 gsts = readl(iommu->reg + DMAR_GSTS_REG); 99 if (gsts & DMA_GSTS_IRES) 100 iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; 101} 102 103static int alloc_irte(struct intel_iommu *iommu, int irq, 104 struct irq_2_iommu *irq_iommu, u16 count) 105{ 106 struct ir_table *table = iommu->ir_table; 107 unsigned int mask = 0; 108 unsigned long flags; 109 int index; 110 111 if (!count || !irq_iommu) 112 return -1; 113 114 if (count > 1) { 115 count = __roundup_pow_of_two(count); 116 mask = ilog2(count); 117 } 118 119 if (mask > ecap_max_handle_mask(iommu->ecap)) { 120 pr_err("Requested mask %x exceeds the max invalidation handle" 121 " mask value %Lx\n", mask, 122 ecap_max_handle_mask(iommu->ecap)); 123 return -1; 124 } 125 126 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 127 index = bitmap_find_free_region(table->bitmap, 128 INTR_REMAP_TABLE_ENTRIES, mask); 129 if (index < 0) { 130 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); 131 } else { 132 irq_iommu->iommu = iommu; 133 irq_iommu->irte_index = index; 134 irq_iommu->sub_handle = 0; 135 irq_iommu->irte_mask = mask; 136 irq_iommu->mode = IRQ_REMAPPING; 137 } 138 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 139 140 return index; 141} 142 143static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 144{ 145 struct qi_desc desc; 146 147 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) 148 | QI_IEC_SELECTIVE; 149 desc.high = 0; 150 151 return qi_submit_sync(&desc, iommu); 152} 153 154static int modify_irte(struct irq_2_iommu *irq_iommu, 155 struct irte *irte_modified) 156{ 157 struct intel_iommu *iommu; 158 unsigned long flags; 159 struct irte *irte; 160 int rc, index; 161 162 if (!irq_iommu) 163 return -1; 164 165 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 166 167 iommu = irq_iommu->iommu; 168 169 index = irq_iommu->irte_index + irq_iommu->sub_handle; 170 irte = &iommu->ir_table->base[index]; 171 172#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) 173 if ((irte->pst == 1) || (irte_modified->pst == 1)) { 174 bool ret; 175 176 ret = cmpxchg_double(&irte->low, &irte->high, 177 irte->low, irte->high, 178 irte_modified->low, irte_modified->high); 179 /* 180 * We use cmpxchg16 to atomically update the 128-bit IRTE, 181 * and it cannot be updated by the hardware or other processors 182 * behind us, so the return value of cmpxchg16 should be the 183 * same as the old value. 184 */ 185 WARN_ON(!ret); 186 } else 187#endif 188 { 189 set_64bit(&irte->low, irte_modified->low); 190 set_64bit(&irte->high, irte_modified->high); 191 } 192 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 193 194 rc = qi_flush_iec(iommu, index, 0); 195 196 /* Update iommu mode according to the IRTE mode */ 197 irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING; 198 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 199 200 return rc; 201} 202 203static struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 204{ 205 int i; 206 207 for (i = 0; i < MAX_HPET_TBS; i++) 208 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu) 209 return ir_hpet[i].iommu; 210 return NULL; 211} 212 213static struct intel_iommu *map_ioapic_to_ir(int apic) 214{ 215 int i; 216 217 for (i = 0; i < MAX_IO_APICS; i++) 218 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu) 219 return ir_ioapic[i].iommu; 220 return NULL; 221} 222 223static struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) 224{ 225 struct dmar_drhd_unit *drhd; 226 227 drhd = dmar_find_matched_drhd_unit(dev); 228 if (!drhd) 229 return NULL; 230 231 return drhd->iommu; 232} 233 234static int clear_entries(struct irq_2_iommu *irq_iommu) 235{ 236 struct irte *start, *entry, *end; 237 struct intel_iommu *iommu; 238 int index; 239 240 if (irq_iommu->sub_handle) 241 return 0; 242 243 iommu = irq_iommu->iommu; 244 index = irq_iommu->irte_index; 245 246 start = iommu->ir_table->base + index; 247 end = start + (1 << irq_iommu->irte_mask); 248 249 for (entry = start; entry < end; entry++) { 250 set_64bit(&entry->low, 0); 251 set_64bit(&entry->high, 0); 252 } 253 bitmap_release_region(iommu->ir_table->bitmap, index, 254 irq_iommu->irte_mask); 255 256 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); 257} 258 259/* 260 * source validation type 261 */ 262#define SVT_NO_VERIFY 0x0 /* no verification is required */ 263#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fields */ 264#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ 265 266/* 267 * source-id qualifier 268 */ 269#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ 270#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore 271 * the third least significant bit 272 */ 273#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore 274 * the second and third least significant bits 275 */ 276#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore 277 * the least three significant bits 278 */ 279 280/* 281 * set SVT, SQ and SID fields of irte to verify 282 * source ids of interrupt requests 283 */ 284static void set_irte_sid(struct irte *irte, unsigned int svt, 285 unsigned int sq, unsigned int sid) 286{ 287 if (disable_sourceid_checking) 288 svt = SVT_NO_VERIFY; 289 irte->svt = svt; 290 irte->sq = sq; 291 irte->sid = sid; 292} 293 294static int set_ioapic_sid(struct irte *irte, int apic) 295{ 296 int i; 297 u16 sid = 0; 298 299 if (!irte) 300 return -1; 301 302 down_read(&dmar_global_lock); 303 for (i = 0; i < MAX_IO_APICS; i++) { 304 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) { 305 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; 306 break; 307 } 308 } 309 up_read(&dmar_global_lock); 310 311 if (sid == 0) { 312 pr_warn("Failed to set source-id of IOAPIC (%d)\n", apic); 313 return -1; 314 } 315 316 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid); 317 318 return 0; 319} 320 321static int set_hpet_sid(struct irte *irte, u8 id) 322{ 323 int i; 324 u16 sid = 0; 325 326 if (!irte) 327 return -1; 328 329 down_read(&dmar_global_lock); 330 for (i = 0; i < MAX_HPET_TBS; i++) { 331 if (ir_hpet[i].iommu && ir_hpet[i].id == id) { 332 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; 333 break; 334 } 335 } 336 up_read(&dmar_global_lock); 337 338 if (sid == 0) { 339 pr_warn("Failed to set source-id of HPET block (%d)\n", id); 340 return -1; 341 } 342 343 /* 344 * Should really use SQ_ALL_16. Some platforms are broken. 345 * While we figure out the right quirks for these broken platforms, use 346 * SQ_13_IGNORE_3 for now. 347 */ 348 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); 349 350 return 0; 351} 352 353struct set_msi_sid_data { 354 struct pci_dev *pdev; 355 u16 alias; 356}; 357 358static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque) 359{ 360 struct set_msi_sid_data *data = opaque; 361 362 data->pdev = pdev; 363 data->alias = alias; 364 365 return 0; 366} 367 368static int set_msi_sid(struct irte *irte, struct pci_dev *dev) 369{ 370 struct set_msi_sid_data data; 371 372 if (!irte || !dev) 373 return -1; 374 375 pci_for_each_dma_alias(dev, set_msi_sid_cb, &data); 376 377 /* 378 * DMA alias provides us with a PCI device and alias. The only case 379 * where the it will return an alias on a different bus than the 380 * device is the case of a PCIe-to-PCI bridge, where the alias is for 381 * the subordinate bus. In this case we can only verify the bus. 382 * 383 * If the alias device is on a different bus than our source device 384 * then we have a topology based alias, use it. 385 * 386 * Otherwise, the alias is for a device DMA quirk and we cannot 387 * assume that MSI uses the same requester ID. Therefore use the 388 * original device. 389 */ 390 if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number) 391 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 392 PCI_DEVID(PCI_BUS_NUM(data.alias), 393 dev->bus->number)); 394 else if (data.pdev->bus->number != dev->bus->number) 395 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); 396 else 397 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 398 PCI_DEVID(dev->bus->number, dev->devfn)); 399 400 return 0; 401} 402 403static int iommu_load_old_irte(struct intel_iommu *iommu) 404{ 405 struct irte *old_ir_table; 406 phys_addr_t irt_phys; 407 unsigned int i; 408 size_t size; 409 u64 irta; 410 411 if (!is_kdump_kernel()) { 412 pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n", 413 iommu->name); 414 clear_ir_pre_enabled(iommu); 415 iommu_disable_irq_remapping(iommu); 416 return -EINVAL; 417 } 418 419 /* Check whether the old ir-table has the same size as ours */ 420 irta = dmar_readq(iommu->reg + DMAR_IRTA_REG); 421 if ((irta & INTR_REMAP_TABLE_REG_SIZE_MASK) 422 != INTR_REMAP_TABLE_REG_SIZE) 423 return -EINVAL; 424 425 irt_phys = irta & VTD_PAGE_MASK; 426 size = INTR_REMAP_TABLE_ENTRIES*sizeof(struct irte); 427 428 /* Map the old IR table */ 429 old_ir_table = memremap(irt_phys, size, MEMREMAP_WB); 430 if (!old_ir_table) 431 return -ENOMEM; 432 433 /* Copy data over */ 434 memcpy(iommu->ir_table->base, old_ir_table, size); 435 436 __iommu_flush_cache(iommu, iommu->ir_table->base, size); 437 438 /* 439 * Now check the table for used entries and mark those as 440 * allocated in the bitmap 441 */ 442 for (i = 0; i < INTR_REMAP_TABLE_ENTRIES; i++) { 443 if (iommu->ir_table->base[i].present) 444 bitmap_set(iommu->ir_table->bitmap, i, 1); 445 } 446 447 memunmap(old_ir_table); 448 449 return 0; 450} 451 452 453static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode) 454{ 455 unsigned long flags; 456 u64 addr; 457 u32 sts; 458 459 addr = virt_to_phys((void *)iommu->ir_table->base); 460 461 raw_spin_lock_irqsave(&iommu->register_lock, flags); 462 463 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 464 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 465 466 /* Set interrupt-remapping table pointer */ 467 writel(iommu->gcmd | DMA_GCMD_SIRTP, iommu->reg + DMAR_GCMD_REG); 468 469 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 470 readl, (sts & DMA_GSTS_IRTPS), sts); 471 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 472 473 /* 474 * Global invalidation of interrupt entry cache to make sure the 475 * hardware uses the new irq remapping table. 476 */ 477 qi_global_iec(iommu); 478} 479 480static void iommu_enable_irq_remapping(struct intel_iommu *iommu) 481{ 482 unsigned long flags; 483 u32 sts; 484 485 raw_spin_lock_irqsave(&iommu->register_lock, flags); 486 487 /* Enable interrupt-remapping */ 488 iommu->gcmd |= DMA_GCMD_IRE; 489 iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */ 490 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 491 492 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 493 readl, (sts & DMA_GSTS_IRES), sts); 494 495 /* 496 * With CFI clear in the Global Command register, we should be 497 * protected from dangerous (i.e. compatibility) interrupts 498 * regardless of x2apic status. Check just to be sure. 499 */ 500 if (sts & DMA_GSTS_CFIS) 501 WARN(1, KERN_WARNING 502 "Compatibility-format IRQs enabled despite intr remapping;\n" 503 "you are vulnerable to IRQ injection.\n"); 504 505 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 506} 507 508static int intel_setup_irq_remapping(struct intel_iommu *iommu) 509{ 510 struct ir_table *ir_table; 511 struct page *pages; 512 unsigned long *bitmap; 513 514 if (iommu->ir_table) 515 return 0; 516 517 ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL); 518 if (!ir_table) 519 return -ENOMEM; 520 521 pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, 522 INTR_REMAP_PAGE_ORDER); 523 if (!pages) { 524 pr_err("IR%d: failed to allocate pages of order %d\n", 525 iommu->seq_id, INTR_REMAP_PAGE_ORDER); 526 goto out_free_table; 527 } 528 529 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), 530 sizeof(long), GFP_ATOMIC); 531 if (bitmap == NULL) { 532 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); 533 goto out_free_pages; 534 } 535 536 iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(), 537 0, INTR_REMAP_TABLE_ENTRIES, 538 NULL, &intel_ir_domain_ops, 539 iommu); 540 if (!iommu->ir_domain) { 541 pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); 542 goto out_free_bitmap; 543 } 544 iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); 545 546 ir_table->base = page_address(pages); 547 ir_table->bitmap = bitmap; 548 iommu->ir_table = ir_table; 549 550 /* 551 * If the queued invalidation is already initialized, 552 * shouldn't disable it. 553 */ 554 if (!iommu->qi) { 555 /* 556 * Clear previous faults. 557 */ 558 dmar_fault(-1, iommu); 559 dmar_disable_qi(iommu); 560 561 if (dmar_enable_qi(iommu)) { 562 pr_err("Failed to enable queued invalidation\n"); 563 goto out_free_bitmap; 564 } 565 } 566 567 init_ir_status(iommu); 568 569 if (ir_pre_enabled(iommu)) { 570 if (iommu_load_old_irte(iommu)) 571 pr_err("Failed to copy IR table for %s from previous kernel\n", 572 iommu->name); 573 else 574 pr_info("Copied IR table for %s from previous kernel\n", 575 iommu->name); 576 } 577 578 iommu_set_irq_remapping(iommu, eim_mode); 579 580 return 0; 581 582out_free_bitmap: 583 kfree(bitmap); 584out_free_pages: 585 __free_pages(pages, INTR_REMAP_PAGE_ORDER); 586out_free_table: 587 kfree(ir_table); 588 589 iommu->ir_table = NULL; 590 591 return -ENOMEM; 592} 593 594static void intel_teardown_irq_remapping(struct intel_iommu *iommu) 595{ 596 if (iommu && iommu->ir_table) { 597 if (iommu->ir_msi_domain) { 598 irq_domain_remove(iommu->ir_msi_domain); 599 iommu->ir_msi_domain = NULL; 600 } 601 if (iommu->ir_domain) { 602 irq_domain_remove(iommu->ir_domain); 603 iommu->ir_domain = NULL; 604 } 605 free_pages((unsigned long)iommu->ir_table->base, 606 INTR_REMAP_PAGE_ORDER); 607 kfree(iommu->ir_table->bitmap); 608 kfree(iommu->ir_table); 609 iommu->ir_table = NULL; 610 } 611} 612 613/* 614 * Disable Interrupt Remapping. 615 */ 616static void iommu_disable_irq_remapping(struct intel_iommu *iommu) 617{ 618 unsigned long flags; 619 u32 sts; 620 621 if (!ecap_ir_support(iommu->ecap)) 622 return; 623 624 /* 625 * global invalidation of interrupt entry cache before disabling 626 * interrupt-remapping. 627 */ 628 qi_global_iec(iommu); 629 630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 631 632 sts = readl(iommu->reg + DMAR_GSTS_REG); 633 if (!(sts & DMA_GSTS_IRES)) 634 goto end; 635 636 iommu->gcmd &= ~DMA_GCMD_IRE; 637 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 638 639 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 640 readl, !(sts & DMA_GSTS_IRES), sts); 641 642end: 643 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 644} 645 646static int __init dmar_x2apic_optout(void) 647{ 648 struct acpi_table_dmar *dmar; 649 dmar = (struct acpi_table_dmar *)dmar_tbl; 650 if (!dmar || no_x2apic_optout) 651 return 0; 652 return dmar->flags & DMAR_X2APIC_OPT_OUT; 653} 654 655static void __init intel_cleanup_irq_remapping(void) 656{ 657 struct dmar_drhd_unit *drhd; 658 struct intel_iommu *iommu; 659 660 for_each_iommu(iommu, drhd) { 661 if (ecap_ir_support(iommu->ecap)) { 662 iommu_disable_irq_remapping(iommu); 663 intel_teardown_irq_remapping(iommu); 664 } 665 } 666 667 if (x2apic_supported()) 668 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); 669} 670 671static int __init intel_prepare_irq_remapping(void) 672{ 673 struct dmar_drhd_unit *drhd; 674 struct intel_iommu *iommu; 675 int eim = 0; 676 677 if (irq_remap_broken) { 678 pr_warn("This system BIOS has enabled interrupt remapping\n" 679 "on a chipset that contains an erratum making that\n" 680 "feature unstable. To maintain system stability\n" 681 "interrupt remapping is being disabled. Please\n" 682 "contact your BIOS vendor for an update\n"); 683 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); 684 return -ENODEV; 685 } 686 687 if (dmar_table_init() < 0) 688 return -ENODEV; 689 690 if (!dmar_ir_support()) 691 return -ENODEV; 692 693 if (parse_ioapics_under_ir()) { 694 pr_info("Not enabling interrupt remapping\n"); 695 goto error; 696 } 697 698 /* First make sure all IOMMUs support IRQ remapping */ 699 for_each_iommu(iommu, drhd) 700 if (!ecap_ir_support(iommu->ecap)) 701 goto error; 702 703 /* Detect remapping mode: lapic or x2apic */ 704 if (x2apic_supported()) { 705 eim = !dmar_x2apic_optout(); 706 if (!eim) { 707 pr_info("x2apic is disabled because BIOS sets x2apic opt out bit."); 708 pr_info("Use 'intremap=no_x2apic_optout' to override the BIOS setting.\n"); 709 } 710 } 711 712 for_each_iommu(iommu, drhd) { 713 if (eim && !ecap_eim_support(iommu->ecap)) { 714 pr_info("%s does not support EIM\n", iommu->name); 715 eim = 0; 716 } 717 } 718 719 eim_mode = eim; 720 if (eim) 721 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); 722 723 /* Do the initializations early */ 724 for_each_iommu(iommu, drhd) { 725 if (intel_setup_irq_remapping(iommu)) { 726 pr_err("Failed to setup irq remapping for %s\n", 727 iommu->name); 728 goto error; 729 } 730 } 731 732 return 0; 733 734error: 735 intel_cleanup_irq_remapping(); 736 return -ENODEV; 737} 738 739/* 740 * Set Posted-Interrupts capability. 741 */ 742static inline void set_irq_posting_cap(void) 743{ 744 struct dmar_drhd_unit *drhd; 745 struct intel_iommu *iommu; 746 747 if (!disable_irq_post) { 748 /* 749 * If IRTE is in posted format, the 'pda' field goes across the 750 * 64-bit boundary, we need use cmpxchg16b to atomically update 751 * it. We only expose posted-interrupt when X86_FEATURE_CX16 752 * is supported. Actually, hardware platforms supporting PI 753 * should have X86_FEATURE_CX16 support, this has been confirmed 754 * with Intel hardware guys. 755 */ 756 if (boot_cpu_has(X86_FEATURE_CX16)) 757 intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP; 758 759 for_each_iommu(iommu, drhd) 760 if (!cap_pi_support(iommu->cap)) { 761 intel_irq_remap_ops.capability &= 762 ~(1 << IRQ_POSTING_CAP); 763 break; 764 } 765 } 766} 767 768static int __init intel_enable_irq_remapping(void) 769{ 770 struct dmar_drhd_unit *drhd; 771 struct intel_iommu *iommu; 772 bool setup = false; 773 774 /* 775 * Setup Interrupt-remapping for all the DRHD's now. 776 */ 777 for_each_iommu(iommu, drhd) { 778 if (!ir_pre_enabled(iommu)) 779 iommu_enable_irq_remapping(iommu); 780 setup = true; 781 } 782 783 if (!setup) 784 goto error; 785 786 irq_remapping_enabled = 1; 787 788 set_irq_posting_cap(); 789 790 pr_info("Enabled IRQ remapping in %s mode\n", eim_mode ? "x2apic" : "xapic"); 791 792 return eim_mode ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; 793 794error: 795 intel_cleanup_irq_remapping(); 796 return -1; 797} 798 799static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, 800 struct intel_iommu *iommu, 801 struct acpi_dmar_hardware_unit *drhd) 802{ 803 struct acpi_dmar_pci_path *path; 804 u8 bus; 805 int count, free = -1; 806 807 bus = scope->bus; 808 path = (struct acpi_dmar_pci_path *)(scope + 1); 809 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 810 / sizeof(struct acpi_dmar_pci_path); 811 812 while (--count > 0) { 813 /* 814 * Access PCI directly due to the PCI 815 * subsystem isn't initialized yet. 816 */ 817 bus = read_pci_config_byte(bus, path->device, path->function, 818 PCI_SECONDARY_BUS); 819 path++; 820 } 821 822 for (count = 0; count < MAX_HPET_TBS; count++) { 823 if (ir_hpet[count].iommu == iommu && 824 ir_hpet[count].id == scope->enumeration_id) 825 return 0; 826 else if (ir_hpet[count].iommu == NULL && free == -1) 827 free = count; 828 } 829 if (free == -1) { 830 pr_warn("Exceeded Max HPET blocks\n"); 831 return -ENOSPC; 832 } 833 834 ir_hpet[free].iommu = iommu; 835 ir_hpet[free].id = scope->enumeration_id; 836 ir_hpet[free].bus = bus; 837 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function); 838 pr_info("HPET id %d under DRHD base 0x%Lx\n", 839 scope->enumeration_id, drhd->address); 840 841 return 0; 842} 843 844static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, 845 struct intel_iommu *iommu, 846 struct acpi_dmar_hardware_unit *drhd) 847{ 848 struct acpi_dmar_pci_path *path; 849 u8 bus; 850 int count, free = -1; 851 852 bus = scope->bus; 853 path = (struct acpi_dmar_pci_path *)(scope + 1); 854 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 855 / sizeof(struct acpi_dmar_pci_path); 856 857 while (--count > 0) { 858 /* 859 * Access PCI directly due to the PCI 860 * subsystem isn't initialized yet. 861 */ 862 bus = read_pci_config_byte(bus, path->device, path->function, 863 PCI_SECONDARY_BUS); 864 path++; 865 } 866 867 for (count = 0; count < MAX_IO_APICS; count++) { 868 if (ir_ioapic[count].iommu == iommu && 869 ir_ioapic[count].id == scope->enumeration_id) 870 return 0; 871 else if (ir_ioapic[count].iommu == NULL && free == -1) 872 free = count; 873 } 874 if (free == -1) { 875 pr_warn("Exceeded Max IO APICS\n"); 876 return -ENOSPC; 877 } 878 879 ir_ioapic[free].bus = bus; 880 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function); 881 ir_ioapic[free].iommu = iommu; 882 ir_ioapic[free].id = scope->enumeration_id; 883 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n", 884 scope->enumeration_id, drhd->address, iommu->seq_id); 885 886 return 0; 887} 888 889static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, 890 struct intel_iommu *iommu) 891{ 892 int ret = 0; 893 struct acpi_dmar_hardware_unit *drhd; 894 struct acpi_dmar_device_scope *scope; 895 void *start, *end; 896 897 drhd = (struct acpi_dmar_hardware_unit *)header; 898 start = (void *)(drhd + 1); 899 end = ((void *)drhd) + header->length; 900 901 while (start < end && ret == 0) { 902 scope = start; 903 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) 904 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd); 905 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) 906 ret = ir_parse_one_hpet_scope(scope, iommu, drhd); 907 start += scope->length; 908 } 909 910 return ret; 911} 912 913static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu) 914{ 915 int i; 916 917 for (i = 0; i < MAX_HPET_TBS; i++) 918 if (ir_hpet[i].iommu == iommu) 919 ir_hpet[i].iommu = NULL; 920 921 for (i = 0; i < MAX_IO_APICS; i++) 922 if (ir_ioapic[i].iommu == iommu) 923 ir_ioapic[i].iommu = NULL; 924} 925 926/* 927 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 928 * hardware unit. 929 */ 930static int __init parse_ioapics_under_ir(void) 931{ 932 struct dmar_drhd_unit *drhd; 933 struct intel_iommu *iommu; 934 bool ir_supported = false; 935 int ioapic_idx; 936 937 for_each_iommu(iommu, drhd) { 938 int ret; 939 940 if (!ecap_ir_support(iommu->ecap)) 941 continue; 942 943 ret = ir_parse_ioapic_hpet_scope(drhd->hdr, iommu); 944 if (ret) 945 return ret; 946 947 ir_supported = true; 948 } 949 950 if (!ir_supported) 951 return -ENODEV; 952 953 for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { 954 int ioapic_id = mpc_ioapic_id(ioapic_idx); 955 if (!map_ioapic_to_ir(ioapic_id)) { 956 pr_err(FW_BUG "ioapic %d has no mapping iommu, " 957 "interrupt remapping will be disabled\n", 958 ioapic_id); 959 return -1; 960 } 961 } 962 963 return 0; 964} 965 966static int __init ir_dev_scope_init(void) 967{ 968 int ret; 969 970 if (!irq_remapping_enabled) 971 return 0; 972 973 down_write(&dmar_global_lock); 974 ret = dmar_dev_scope_init(); 975 up_write(&dmar_global_lock); 976 977 return ret; 978} 979rootfs_initcall(ir_dev_scope_init); 980 981static void disable_irq_remapping(void) 982{ 983 struct dmar_drhd_unit *drhd; 984 struct intel_iommu *iommu = NULL; 985 986 /* 987 * Disable Interrupt-remapping for all the DRHD's now. 988 */ 989 for_each_iommu(iommu, drhd) { 990 if (!ecap_ir_support(iommu->ecap)) 991 continue; 992 993 iommu_disable_irq_remapping(iommu); 994 } 995 996 /* 997 * Clear Posted-Interrupts capability. 998 */ 999 if (!disable_irq_post) 1000 intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP); 1001} 1002 1003static int reenable_irq_remapping(int eim) 1004{ 1005 struct dmar_drhd_unit *drhd; 1006 bool setup = false; 1007 struct intel_iommu *iommu = NULL; 1008 1009 for_each_iommu(iommu, drhd) 1010 if (iommu->qi) 1011 dmar_reenable_qi(iommu); 1012 1013 /* 1014 * Setup Interrupt-remapping for all the DRHD's now. 1015 */ 1016 for_each_iommu(iommu, drhd) { 1017 if (!ecap_ir_support(iommu->ecap)) 1018 continue; 1019 1020 /* Set up interrupt remapping for iommu.*/ 1021 iommu_set_irq_remapping(iommu, eim); 1022 iommu_enable_irq_remapping(iommu); 1023 setup = true; 1024 } 1025 1026 if (!setup) 1027 goto error; 1028 1029 set_irq_posting_cap(); 1030 1031 return 0; 1032 1033error: 1034 /* 1035 * handle error condition gracefully here! 1036 */ 1037 return -1; 1038} 1039 1040static void prepare_irte(struct irte *irte, int vector, unsigned int dest) 1041{ 1042 memset(irte, 0, sizeof(*irte)); 1043 1044 irte->present = 1; 1045 irte->dst_mode = apic->irq_dest_mode; 1046 /* 1047 * Trigger mode in the IRTE will always be edge, and for IO-APIC, the 1048 * actual level or edge trigger will be setup in the IO-APIC 1049 * RTE. This will help simplify level triggered irq migration. 1050 * For more details, see the comments (in io_apic.c) explainig IO-APIC 1051 * irq migration in the presence of interrupt-remapping. 1052 */ 1053 irte->trigger_mode = 0; 1054 irte->dlvry_mode = apic->irq_delivery_mode; 1055 irte->vector = vector; 1056 irte->dest_id = IRTE_DEST(dest); 1057 irte->redir_hint = 1; 1058} 1059 1060static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info) 1061{ 1062 struct intel_iommu *iommu = NULL; 1063 1064 if (!info) 1065 return NULL; 1066 1067 switch (info->type) { 1068 case X86_IRQ_ALLOC_TYPE_IOAPIC: 1069 iommu = map_ioapic_to_ir(info->ioapic_id); 1070 break; 1071 case X86_IRQ_ALLOC_TYPE_HPET: 1072 iommu = map_hpet_to_ir(info->hpet_id); 1073 break; 1074 case X86_IRQ_ALLOC_TYPE_MSI: 1075 case X86_IRQ_ALLOC_TYPE_MSIX: 1076 iommu = map_dev_to_ir(info->msi_dev); 1077 break; 1078 default: 1079 BUG_ON(1); 1080 break; 1081 } 1082 1083 return iommu ? iommu->ir_domain : NULL; 1084} 1085 1086static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info) 1087{ 1088 struct intel_iommu *iommu; 1089 1090 if (!info) 1091 return NULL; 1092 1093 switch (info->type) { 1094 case X86_IRQ_ALLOC_TYPE_MSI: 1095 case X86_IRQ_ALLOC_TYPE_MSIX: 1096 iommu = map_dev_to_ir(info->msi_dev); 1097 if (iommu) 1098 return iommu->ir_msi_domain; 1099 break; 1100 default: 1101 break; 1102 } 1103 1104 return NULL; 1105} 1106 1107struct irq_remap_ops intel_irq_remap_ops = { 1108 .prepare = intel_prepare_irq_remapping, 1109 .enable = intel_enable_irq_remapping, 1110 .disable = disable_irq_remapping, 1111 .reenable = reenable_irq_remapping, 1112 .enable_faulting = enable_drhd_fault_handling, 1113 .get_ir_irq_domain = intel_get_ir_irq_domain, 1114 .get_irq_domain = intel_get_irq_domain, 1115}; 1116 1117/* 1118 * Migrate the IO-APIC irq in the presence of intr-remapping. 1119 * 1120 * For both level and edge triggered, irq migration is a simple atomic 1121 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 1122 * 1123 * For level triggered, we eliminate the io-apic RTE modification (with the 1124 * updated vector information), by using a virtual vector (io-apic pin number). 1125 * Real vector that is used for interrupting cpu will be coming from 1126 * the interrupt-remapping table entry. 1127 * 1128 * As the migration is a simple atomic update of IRTE, the same mechanism 1129 * is used to migrate MSI irq's in the presence of interrupt-remapping. 1130 */ 1131static int 1132intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask, 1133 bool force) 1134{ 1135 struct intel_ir_data *ir_data = data->chip_data; 1136 struct irte *irte = &ir_data->irte_entry; 1137 struct irq_cfg *cfg = irqd_cfg(data); 1138 struct irq_data *parent = data->parent_data; 1139 int ret; 1140 1141 ret = parent->chip->irq_set_affinity(parent, mask, force); 1142 if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) 1143 return ret; 1144 1145 /* 1146 * Atomically updates the IRTE with the new destination, vector 1147 * and flushes the interrupt entry cache. 1148 */ 1149 irte->vector = cfg->vector; 1150 irte->dest_id = IRTE_DEST(cfg->dest_apicid); 1151 1152 /* Update the hardware only if the interrupt is in remapped mode. */ 1153 if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING) 1154 modify_irte(&ir_data->irq_2_iommu, irte); 1155 1156 /* 1157 * After this point, all the interrupts will start arriving 1158 * at the new destination. So, time to cleanup the previous 1159 * vector allocation. 1160 */ 1161 send_cleanup_vector(cfg); 1162 1163 return IRQ_SET_MASK_OK_DONE; 1164} 1165 1166static void intel_ir_compose_msi_msg(struct irq_data *irq_data, 1167 struct msi_msg *msg) 1168{ 1169 struct intel_ir_data *ir_data = irq_data->chip_data; 1170 1171 *msg = ir_data->msi_entry; 1172} 1173 1174static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info) 1175{ 1176 struct intel_ir_data *ir_data = data->chip_data; 1177 struct vcpu_data *vcpu_pi_info = info; 1178 1179 /* stop posting interrupts, back to remapping mode */ 1180 if (!vcpu_pi_info) { 1181 modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry); 1182 } else { 1183 struct irte irte_pi; 1184 1185 /* 1186 * We are not caching the posted interrupt entry. We 1187 * copy the data from the remapped entry and modify 1188 * the fields which are relevant for posted mode. The 1189 * cached remapped entry is used for switching back to 1190 * remapped mode. 1191 */ 1192 memset(&irte_pi, 0, sizeof(irte_pi)); 1193 dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry); 1194 1195 /* Update the posted mode fields */ 1196 irte_pi.p_pst = 1; 1197 irte_pi.p_urgent = 0; 1198 irte_pi.p_vector = vcpu_pi_info->vector; 1199 irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >> 1200 (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT); 1201 irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) & 1202 ~(-1UL << PDA_HIGH_BIT); 1203 1204 modify_irte(&ir_data->irq_2_iommu, &irte_pi); 1205 } 1206 1207 return 0; 1208} 1209 1210static struct irq_chip intel_ir_chip = { 1211 .irq_ack = ir_ack_apic_edge, 1212 .irq_set_affinity = intel_ir_set_affinity, 1213 .irq_compose_msi_msg = intel_ir_compose_msi_msg, 1214 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, 1215}; 1216 1217static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data, 1218 struct irq_cfg *irq_cfg, 1219 struct irq_alloc_info *info, 1220 int index, int sub_handle) 1221{ 1222 struct IR_IO_APIC_route_entry *entry; 1223 struct irte *irte = &data->irte_entry; 1224 struct msi_msg *msg = &data->msi_entry; 1225 1226 prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid); 1227 switch (info->type) { 1228 case X86_IRQ_ALLOC_TYPE_IOAPIC: 1229 /* Set source-id of interrupt request */ 1230 set_ioapic_sid(irte, info->ioapic_id); 1231 apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n", 1232 info->ioapic_id, irte->present, irte->fpd, 1233 irte->dst_mode, irte->redir_hint, 1234 irte->trigger_mode, irte->dlvry_mode, 1235 irte->avail, irte->vector, irte->dest_id, 1236 irte->sid, irte->sq, irte->svt); 1237 1238 entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry; 1239 info->ioapic_entry = NULL; 1240 memset(entry, 0, sizeof(*entry)); 1241 entry->index2 = (index >> 15) & 0x1; 1242 entry->zero = 0; 1243 entry->format = 1; 1244 entry->index = (index & 0x7fff); 1245 /* 1246 * IO-APIC RTE will be configured with virtual vector. 1247 * irq handler will do the explicit EOI to the io-apic. 1248 */ 1249 entry->vector = info->ioapic_pin; 1250 entry->mask = 0; /* enable IRQ */ 1251 entry->trigger = info->ioapic_trigger; 1252 entry->polarity = info->ioapic_polarity; 1253 if (info->ioapic_trigger) 1254 entry->mask = 1; /* Mask level triggered irqs. */ 1255 break; 1256 1257 case X86_IRQ_ALLOC_TYPE_HPET: 1258 case X86_IRQ_ALLOC_TYPE_MSI: 1259 case X86_IRQ_ALLOC_TYPE_MSIX: 1260 if (info->type == X86_IRQ_ALLOC_TYPE_HPET) 1261 set_hpet_sid(irte, info->hpet_id); 1262 else 1263 set_msi_sid(irte, info->msi_dev); 1264 1265 msg->address_hi = MSI_ADDR_BASE_HI; 1266 msg->data = sub_handle; 1267 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 1268 MSI_ADDR_IR_SHV | 1269 MSI_ADDR_IR_INDEX1(index) | 1270 MSI_ADDR_IR_INDEX2(index); 1271 break; 1272 1273 default: 1274 BUG_ON(1); 1275 break; 1276 } 1277} 1278 1279static void intel_free_irq_resources(struct irq_domain *domain, 1280 unsigned int virq, unsigned int nr_irqs) 1281{ 1282 struct irq_data *irq_data; 1283 struct intel_ir_data *data; 1284 struct irq_2_iommu *irq_iommu; 1285 unsigned long flags; 1286 int i; 1287 for (i = 0; i < nr_irqs; i++) { 1288 irq_data = irq_domain_get_irq_data(domain, virq + i); 1289 if (irq_data && irq_data->chip_data) { 1290 data = irq_data->chip_data; 1291 irq_iommu = &data->irq_2_iommu; 1292 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 1293 clear_entries(irq_iommu); 1294 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 1295 irq_domain_reset_irq_data(irq_data); 1296 kfree(data); 1297 } 1298 } 1299} 1300 1301static int intel_irq_remapping_alloc(struct irq_domain *domain, 1302 unsigned int virq, unsigned int nr_irqs, 1303 void *arg) 1304{ 1305 struct intel_iommu *iommu = domain->host_data; 1306 struct irq_alloc_info *info = arg; 1307 struct intel_ir_data *data, *ird; 1308 struct irq_data *irq_data; 1309 struct irq_cfg *irq_cfg; 1310 int i, ret, index; 1311 1312 if (!info || !iommu) 1313 return -EINVAL; 1314 if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && 1315 info->type != X86_IRQ_ALLOC_TYPE_MSIX) 1316 return -EINVAL; 1317 1318 /* 1319 * With IRQ remapping enabled, don't need contiguous CPU vectors 1320 * to support multiple MSI interrupts. 1321 */ 1322 if (info->type == X86_IRQ_ALLOC_TYPE_MSI) 1323 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; 1324 1325 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); 1326 if (ret < 0) 1327 return ret; 1328 1329 ret = -ENOMEM; 1330 data = kzalloc(sizeof(*data), GFP_KERNEL); 1331 if (!data) 1332 goto out_free_parent; 1333 1334 down_read(&dmar_global_lock); 1335 index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs); 1336 up_read(&dmar_global_lock); 1337 if (index < 0) { 1338 pr_warn("Failed to allocate IRTE\n"); 1339 kfree(data); 1340 goto out_free_parent; 1341 } 1342 1343 for (i = 0; i < nr_irqs; i++) { 1344 irq_data = irq_domain_get_irq_data(domain, virq + i); 1345 irq_cfg = irqd_cfg(irq_data); 1346 if (!irq_data || !irq_cfg) { 1347 ret = -EINVAL; 1348 goto out_free_data; 1349 } 1350 1351 if (i > 0) { 1352 ird = kzalloc(sizeof(*ird), GFP_KERNEL); 1353 if (!ird) 1354 goto out_free_data; 1355 /* Initialize the common data */ 1356 ird->irq_2_iommu = data->irq_2_iommu; 1357 ird->irq_2_iommu.sub_handle = i; 1358 } else { 1359 ird = data; 1360 } 1361 1362 irq_data->hwirq = (index << 16) + i; 1363 irq_data->chip_data = ird; 1364 irq_data->chip = &intel_ir_chip; 1365 intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i); 1366 irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); 1367 } 1368 return 0; 1369 1370out_free_data: 1371 intel_free_irq_resources(domain, virq, i); 1372out_free_parent: 1373 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1374 return ret; 1375} 1376 1377static void intel_irq_remapping_free(struct irq_domain *domain, 1378 unsigned int virq, unsigned int nr_irqs) 1379{ 1380 intel_free_irq_resources(domain, virq, nr_irqs); 1381 irq_domain_free_irqs_common(domain, virq, nr_irqs); 1382} 1383 1384static void intel_irq_remapping_activate(struct irq_domain *domain, 1385 struct irq_data *irq_data) 1386{ 1387 struct intel_ir_data *data = irq_data->chip_data; 1388 1389 modify_irte(&data->irq_2_iommu, &data->irte_entry); 1390} 1391 1392static void intel_irq_remapping_deactivate(struct irq_domain *domain, 1393 struct irq_data *irq_data) 1394{ 1395 struct intel_ir_data *data = irq_data->chip_data; 1396 struct irte entry; 1397 1398 memset(&entry, 0, sizeof(entry)); 1399 modify_irte(&data->irq_2_iommu, &entry); 1400} 1401 1402static struct irq_domain_ops intel_ir_domain_ops = { 1403 .alloc = intel_irq_remapping_alloc, 1404 .free = intel_irq_remapping_free, 1405 .activate = intel_irq_remapping_activate, 1406 .deactivate = intel_irq_remapping_deactivate, 1407}; 1408 1409/* 1410 * Support of Interrupt Remapping Unit Hotplug 1411 */ 1412static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu) 1413{ 1414 int ret; 1415 int eim = x2apic_enabled(); 1416 1417 if (eim && !ecap_eim_support(iommu->ecap)) { 1418 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n", 1419 iommu->reg_phys, iommu->ecap); 1420 return -ENODEV; 1421 } 1422 1423 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) { 1424 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n", 1425 iommu->reg_phys); 1426 return -ENODEV; 1427 } 1428 1429 /* TODO: check all IOAPICs are covered by IOMMU */ 1430 1431 /* Setup Interrupt-remapping now. */ 1432 ret = intel_setup_irq_remapping(iommu); 1433 if (ret) { 1434 pr_err("Failed to setup irq remapping for %s\n", 1435 iommu->name); 1436 intel_teardown_irq_remapping(iommu); 1437 ir_remove_ioapic_hpet_scope(iommu); 1438 } else { 1439 iommu_enable_irq_remapping(iommu); 1440 } 1441 1442 return ret; 1443} 1444 1445int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert) 1446{ 1447 int ret = 0; 1448 struct intel_iommu *iommu = dmaru->iommu; 1449 1450 if (!irq_remapping_enabled) 1451 return 0; 1452 if (iommu == NULL) 1453 return -EINVAL; 1454 if (!ecap_ir_support(iommu->ecap)) 1455 return 0; 1456 if (irq_remapping_cap(IRQ_POSTING_CAP) && 1457 !cap_pi_support(iommu->cap)) 1458 return -EBUSY; 1459 1460 if (insert) { 1461 if (!iommu->ir_table) 1462 ret = dmar_ir_add(dmaru, iommu); 1463 } else { 1464 if (iommu->ir_table) { 1465 if (!bitmap_empty(iommu->ir_table->bitmap, 1466 INTR_REMAP_TABLE_ENTRIES)) { 1467 ret = -EBUSY; 1468 } else { 1469 iommu_disable_irq_remapping(iommu); 1470 intel_teardown_irq_remapping(iommu); 1471 ir_remove_ioapic_hpet_scope(iommu); 1472 } 1473 } 1474 } 1475 1476 return ret; 1477}