Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.36-rc4 956 lines 20 kB view raw
1#include <linux/interrupt.h> 2#include <linux/dmar.h> 3#include <linux/spinlock.h> 4#include <linux/slab.h> 5#include <linux/jiffies.h> 6#include <linux/hpet.h> 7#include <linux/pci.h> 8#include <linux/irq.h> 9#include <asm/io_apic.h> 10#include <asm/smp.h> 11#include <asm/cpu.h> 12#include <linux/intel-iommu.h> 13#include "intr_remapping.h" 14#include <acpi/acpi.h> 15#include <asm/pci-direct.h> 16#include "pci.h" 17 18static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 19static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 20static int ir_ioapic_num, ir_hpet_num; 21int intr_remapping_enabled; 22 23static int disable_intremap; 24static int disable_sourceid_checking; 25 26static __init int setup_nointremap(char *str) 27{ 28 disable_intremap = 1; 29 return 0; 30} 31early_param("nointremap", setup_nointremap); 32 33static __init int setup_intremap(char *str) 34{ 35 if (!str) 36 return -EINVAL; 37 38 if (!strncmp(str, "on", 2)) 39 disable_intremap = 0; 40 else if (!strncmp(str, "off", 3)) 41 disable_intremap = 1; 42 else if (!strncmp(str, "nosid", 5)) 43 disable_sourceid_checking = 1; 44 45 return 0; 46} 47early_param("intremap", setup_intremap); 48 49struct irq_2_iommu { 50 struct intel_iommu *iommu; 51 u16 irte_index; 52 u16 sub_handle; 53 u8 irte_mask; 54}; 55 56#ifdef CONFIG_GENERIC_HARDIRQS 57static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) 58{ 59 struct irq_2_iommu *iommu; 60 61 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); 62 printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); 63 64 return iommu; 65} 66 67static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 68{ 69 struct irq_desc *desc; 70 71 desc = irq_to_desc(irq); 72 73 if (WARN_ON_ONCE(!desc)) 74 return NULL; 75 76 return desc->irq_2_iommu; 77} 78 79static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 80{ 81 struct irq_desc *desc; 82 struct irq_2_iommu *irq_iommu; 83 84 desc = irq_to_desc(irq); 85 if (!desc) { 86 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 87 return NULL; 88 } 89 90 irq_iommu = desc->irq_2_iommu; 91 92 if (!irq_iommu) 93 desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); 94 95 return desc->irq_2_iommu; 96} 97 98#else /* !CONFIG_SPARSE_IRQ */ 99 100static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 101 102static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 103{ 104 if (irq < nr_irqs) 105 return &irq_2_iommuX[irq]; 106 107 return NULL; 108} 109static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 110{ 111 return irq_2_iommu(irq); 112} 113#endif 114 115static DEFINE_SPINLOCK(irq_2_ir_lock); 116 117static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 118{ 119 struct irq_2_iommu *irq_iommu; 120 121 irq_iommu = irq_2_iommu(irq); 122 123 if (!irq_iommu) 124 return NULL; 125 126 if (!irq_iommu->iommu) 127 return NULL; 128 129 return irq_iommu; 130} 131 132int irq_remapped(int irq) 133{ 134 return valid_irq_2_iommu(irq) != NULL; 135} 136 137int get_irte(int irq, struct irte *entry) 138{ 139 int index; 140 struct irq_2_iommu *irq_iommu; 141 unsigned long flags; 142 143 if (!entry) 144 return -1; 145 146 spin_lock_irqsave(&irq_2_ir_lock, flags); 147 irq_iommu = valid_irq_2_iommu(irq); 148 if (!irq_iommu) { 149 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 150 return -1; 151 } 152 153 index = irq_iommu->irte_index + irq_iommu->sub_handle; 154 *entry = *(irq_iommu->iommu->ir_table->base + index); 155 156 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 157 return 0; 158} 159 160int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 161{ 162 struct ir_table *table = iommu->ir_table; 163 struct irq_2_iommu *irq_iommu; 164 u16 index, start_index; 165 unsigned int mask = 0; 166 unsigned long flags; 167 int i; 168 169 if (!count) 170 return -1; 171 172#ifndef CONFIG_SPARSE_IRQ 173 /* protect irq_2_iommu_alloc later */ 174 if (irq >= nr_irqs) 175 return -1; 176#endif 177 178 /* 179 * start the IRTE search from index 0. 180 */ 181 index = start_index = 0; 182 183 if (count > 1) { 184 count = __roundup_pow_of_two(count); 185 mask = ilog2(count); 186 } 187 188 if (mask > ecap_max_handle_mask(iommu->ecap)) { 189 printk(KERN_ERR 190 "Requested mask %x exceeds the max invalidation handle" 191 " mask value %Lx\n", mask, 192 ecap_max_handle_mask(iommu->ecap)); 193 return -1; 194 } 195 196 spin_lock_irqsave(&irq_2_ir_lock, flags); 197 do { 198 for (i = index; i < index + count; i++) 199 if (table->base[i].present) 200 break; 201 /* empty index found */ 202 if (i == index + count) 203 break; 204 205 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 206 207 if (index == start_index) { 208 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 209 printk(KERN_ERR "can't allocate an IRTE\n"); 210 return -1; 211 } 212 } while (1); 213 214 for (i = index; i < index + count; i++) 215 table->base[i].present = 1; 216 217 irq_iommu = irq_2_iommu_alloc(irq); 218 if (!irq_iommu) { 219 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 220 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 221 return -1; 222 } 223 224 irq_iommu->iommu = iommu; 225 irq_iommu->irte_index = index; 226 irq_iommu->sub_handle = 0; 227 irq_iommu->irte_mask = mask; 228 229 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 230 231 return index; 232} 233 234static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 235{ 236 struct qi_desc desc; 237 238 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) 239 | QI_IEC_SELECTIVE; 240 desc.high = 0; 241 242 return qi_submit_sync(&desc, iommu); 243} 244 245int map_irq_to_irte_handle(int irq, u16 *sub_handle) 246{ 247 int index; 248 struct irq_2_iommu *irq_iommu; 249 unsigned long flags; 250 251 spin_lock_irqsave(&irq_2_ir_lock, flags); 252 irq_iommu = valid_irq_2_iommu(irq); 253 if (!irq_iommu) { 254 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 255 return -1; 256 } 257 258 *sub_handle = irq_iommu->sub_handle; 259 index = irq_iommu->irte_index; 260 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 261 return index; 262} 263 264int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 265{ 266 struct irq_2_iommu *irq_iommu; 267 unsigned long flags; 268 269 spin_lock_irqsave(&irq_2_ir_lock, flags); 270 271 irq_iommu = irq_2_iommu_alloc(irq); 272 273 if (!irq_iommu) { 274 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 275 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 276 return -1; 277 } 278 279 irq_iommu->iommu = iommu; 280 irq_iommu->irte_index = index; 281 irq_iommu->sub_handle = subhandle; 282 irq_iommu->irte_mask = 0; 283 284 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 285 286 return 0; 287} 288 289int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 290{ 291 struct irq_2_iommu *irq_iommu; 292 unsigned long flags; 293 294 spin_lock_irqsave(&irq_2_ir_lock, flags); 295 irq_iommu = valid_irq_2_iommu(irq); 296 if (!irq_iommu) { 297 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 298 return -1; 299 } 300 301 irq_iommu->iommu = NULL; 302 irq_iommu->irte_index = 0; 303 irq_iommu->sub_handle = 0; 304 irq_2_iommu(irq)->irte_mask = 0; 305 306 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 307 308 return 0; 309} 310 311int modify_irte(int irq, struct irte *irte_modified) 312{ 313 int rc; 314 int index; 315 struct irte *irte; 316 struct intel_iommu *iommu; 317 struct irq_2_iommu *irq_iommu; 318 unsigned long flags; 319 320 spin_lock_irqsave(&irq_2_ir_lock, flags); 321 irq_iommu = valid_irq_2_iommu(irq); 322 if (!irq_iommu) { 323 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 324 return -1; 325 } 326 327 iommu = irq_iommu->iommu; 328 329 index = irq_iommu->irte_index + irq_iommu->sub_handle; 330 irte = &iommu->ir_table->base[index]; 331 332 set_64bit(&irte->low, irte_modified->low); 333 set_64bit(&irte->high, irte_modified->high); 334 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 335 336 rc = qi_flush_iec(iommu, index, 0); 337 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 338 339 return rc; 340} 341 342int flush_irte(int irq) 343{ 344 int rc; 345 int index; 346 struct intel_iommu *iommu; 347 struct irq_2_iommu *irq_iommu; 348 unsigned long flags; 349 350 spin_lock_irqsave(&irq_2_ir_lock, flags); 351 irq_iommu = valid_irq_2_iommu(irq); 352 if (!irq_iommu) { 353 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 354 return -1; 355 } 356 357 iommu = irq_iommu->iommu; 358 359 index = irq_iommu->irte_index + irq_iommu->sub_handle; 360 361 rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); 362 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 363 364 return rc; 365} 366 367struct intel_iommu *map_hpet_to_ir(u8 hpet_id) 368{ 369 int i; 370 371 for (i = 0; i < MAX_HPET_TBS; i++) 372 if (ir_hpet[i].id == hpet_id) 373 return ir_hpet[i].iommu; 374 return NULL; 375} 376 377struct intel_iommu *map_ioapic_to_ir(int apic) 378{ 379 int i; 380 381 for (i = 0; i < MAX_IO_APICS; i++) 382 if (ir_ioapic[i].id == apic) 383 return ir_ioapic[i].iommu; 384 return NULL; 385} 386 387struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) 388{ 389 struct dmar_drhd_unit *drhd; 390 391 drhd = dmar_find_matched_drhd_unit(dev); 392 if (!drhd) 393 return NULL; 394 395 return drhd->iommu; 396} 397 398static int clear_entries(struct irq_2_iommu *irq_iommu) 399{ 400 struct irte *start, *entry, *end; 401 struct intel_iommu *iommu; 402 int index; 403 404 if (irq_iommu->sub_handle) 405 return 0; 406 407 iommu = irq_iommu->iommu; 408 index = irq_iommu->irte_index + irq_iommu->sub_handle; 409 410 start = iommu->ir_table->base + index; 411 end = start + (1 << irq_iommu->irte_mask); 412 413 for (entry = start; entry < end; entry++) { 414 set_64bit(&entry->low, 0); 415 set_64bit(&entry->high, 0); 416 } 417 418 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); 419} 420 421int free_irte(int irq) 422{ 423 int rc = 0; 424 struct irq_2_iommu *irq_iommu; 425 unsigned long flags; 426 427 spin_lock_irqsave(&irq_2_ir_lock, flags); 428 irq_iommu = valid_irq_2_iommu(irq); 429 if (!irq_iommu) { 430 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 431 return -1; 432 } 433 434 rc = clear_entries(irq_iommu); 435 436 irq_iommu->iommu = NULL; 437 irq_iommu->irte_index = 0; 438 irq_iommu->sub_handle = 0; 439 irq_iommu->irte_mask = 0; 440 441 spin_unlock_irqrestore(&irq_2_ir_lock, flags); 442 443 return rc; 444} 445 446/* 447 * source validation type 448 */ 449#define SVT_NO_VERIFY 0x0 /* no verification is required */ 450#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */ 451#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */ 452 453/* 454 * source-id qualifier 455 */ 456#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */ 457#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore 458 * the third least significant bit 459 */ 460#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore 461 * the second and third least significant bits 462 */ 463#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore 464 * the least three significant bits 465 */ 466 467/* 468 * set SVT, SQ and SID fields of irte to verify 469 * source ids of interrupt requests 470 */ 471static void set_irte_sid(struct irte *irte, unsigned int svt, 472 unsigned int sq, unsigned int sid) 473{ 474 if (disable_sourceid_checking) 475 svt = SVT_NO_VERIFY; 476 irte->svt = svt; 477 irte->sq = sq; 478 irte->sid = sid; 479} 480 481int set_ioapic_sid(struct irte *irte, int apic) 482{ 483 int i; 484 u16 sid = 0; 485 486 if (!irte) 487 return -1; 488 489 for (i = 0; i < MAX_IO_APICS; i++) { 490 if (ir_ioapic[i].id == apic) { 491 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; 492 break; 493 } 494 } 495 496 if (sid == 0) { 497 pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic); 498 return -1; 499 } 500 501 set_irte_sid(irte, 1, 0, sid); 502 503 return 0; 504} 505 506int set_hpet_sid(struct irte *irte, u8 id) 507{ 508 int i; 509 u16 sid = 0; 510 511 if (!irte) 512 return -1; 513 514 for (i = 0; i < MAX_HPET_TBS; i++) { 515 if (ir_hpet[i].id == id) { 516 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; 517 break; 518 } 519 } 520 521 if (sid == 0) { 522 pr_warning("Failed to set source-id of HPET block (%d)\n", id); 523 return -1; 524 } 525 526 /* 527 * Should really use SQ_ALL_16. Some platforms are broken. 528 * While we figure out the right quirks for these broken platforms, use 529 * SQ_13_IGNORE_3 for now. 530 */ 531 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_13_IGNORE_3, sid); 532 533 return 0; 534} 535 536int set_msi_sid(struct irte *irte, struct pci_dev *dev) 537{ 538 struct pci_dev *bridge; 539 540 if (!irte || !dev) 541 return -1; 542 543 /* PCIe device or Root Complex integrated PCI device */ 544 if (pci_is_pcie(dev) || !dev->bus->parent) { 545 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 546 (dev->bus->number << 8) | dev->devfn); 547 return 0; 548 } 549 550 bridge = pci_find_upstream_pcie_bridge(dev); 551 if (bridge) { 552 if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */ 553 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 554 (bridge->bus->number << 8) | dev->bus->number); 555 else /* this is a legacy PCI bridge */ 556 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 557 (bridge->bus->number << 8) | bridge->devfn); 558 } 559 560 return 0; 561} 562 563static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 564{ 565 u64 addr; 566 u32 sts; 567 unsigned long flags; 568 569 addr = virt_to_phys((void *)iommu->ir_table->base); 570 571 spin_lock_irqsave(&iommu->register_lock, flags); 572 573 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 574 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 575 576 /* Set interrupt-remapping table pointer */ 577 iommu->gcmd |= DMA_GCMD_SIRTP; 578 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 579 580 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 581 readl, (sts & DMA_GSTS_IRTPS), sts); 582 spin_unlock_irqrestore(&iommu->register_lock, flags); 583 584 /* 585 * global invalidation of interrupt entry cache before enabling 586 * interrupt-remapping. 587 */ 588 qi_global_iec(iommu); 589 590 spin_lock_irqsave(&iommu->register_lock, flags); 591 592 /* Enable interrupt-remapping */ 593 iommu->gcmd |= DMA_GCMD_IRE; 594 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 595 596 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 597 readl, (sts & DMA_GSTS_IRES), sts); 598 599 spin_unlock_irqrestore(&iommu->register_lock, flags); 600} 601 602 603static int setup_intr_remapping(struct intel_iommu *iommu, int mode) 604{ 605 struct ir_table *ir_table; 606 struct page *pages; 607 608 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 609 GFP_ATOMIC); 610 611 if (!iommu->ir_table) 612 return -ENOMEM; 613 614 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 615 INTR_REMAP_PAGE_ORDER); 616 617 if (!pages) { 618 printk(KERN_ERR "failed to allocate pages of order %d\n", 619 INTR_REMAP_PAGE_ORDER); 620 kfree(iommu->ir_table); 621 return -ENOMEM; 622 } 623 624 ir_table->base = page_address(pages); 625 626 iommu_set_intr_remapping(iommu, mode); 627 return 0; 628} 629 630/* 631 * Disable Interrupt Remapping. 632 */ 633static void iommu_disable_intr_remapping(struct intel_iommu *iommu) 634{ 635 unsigned long flags; 636 u32 sts; 637 638 if (!ecap_ir_support(iommu->ecap)) 639 return; 640 641 /* 642 * global invalidation of interrupt entry cache before disabling 643 * interrupt-remapping. 644 */ 645 qi_global_iec(iommu); 646 647 spin_lock_irqsave(&iommu->register_lock, flags); 648 649 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 650 if (!(sts & DMA_GSTS_IRES)) 651 goto end; 652 653 iommu->gcmd &= ~DMA_GCMD_IRE; 654 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 655 656 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 657 readl, !(sts & DMA_GSTS_IRES), sts); 658 659end: 660 spin_unlock_irqrestore(&iommu->register_lock, flags); 661} 662 663int __init intr_remapping_supported(void) 664{ 665 struct dmar_drhd_unit *drhd; 666 667 if (disable_intremap) 668 return 0; 669 670 if (!dmar_ir_support()) 671 return 0; 672 673 for_each_drhd_unit(drhd) { 674 struct intel_iommu *iommu = drhd->iommu; 675 676 if (!ecap_ir_support(iommu->ecap)) 677 return 0; 678 } 679 680 return 1; 681} 682 683int __init enable_intr_remapping(int eim) 684{ 685 struct dmar_drhd_unit *drhd; 686 int setup = 0; 687 688 if (parse_ioapics_under_ir() != 1) { 689 printk(KERN_INFO "Not enable interrupt remapping\n"); 690 return -1; 691 } 692 693 for_each_drhd_unit(drhd) { 694 struct intel_iommu *iommu = drhd->iommu; 695 696 /* 697 * If the queued invalidation is already initialized, 698 * shouldn't disable it. 699 */ 700 if (iommu->qi) 701 continue; 702 703 /* 704 * Clear previous faults. 705 */ 706 dmar_fault(-1, iommu); 707 708 /* 709 * Disable intr remapping and queued invalidation, if already 710 * enabled prior to OS handover. 711 */ 712 iommu_disable_intr_remapping(iommu); 713 714 dmar_disable_qi(iommu); 715 } 716 717 /* 718 * check for the Interrupt-remapping support 719 */ 720 for_each_drhd_unit(drhd) { 721 struct intel_iommu *iommu = drhd->iommu; 722 723 if (!ecap_ir_support(iommu->ecap)) 724 continue; 725 726 if (eim && !ecap_eim_support(iommu->ecap)) { 727 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " 728 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); 729 return -1; 730 } 731 } 732 733 /* 734 * Enable queued invalidation for all the DRHD's. 735 */ 736 for_each_drhd_unit(drhd) { 737 int ret; 738 struct intel_iommu *iommu = drhd->iommu; 739 ret = dmar_enable_qi(iommu); 740 741 if (ret) { 742 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " 743 " invalidation, ecap %Lx, ret %d\n", 744 drhd->reg_base_addr, iommu->ecap, ret); 745 return -1; 746 } 747 } 748 749 /* 750 * Setup Interrupt-remapping for all the DRHD's now. 751 */ 752 for_each_drhd_unit(drhd) { 753 struct intel_iommu *iommu = drhd->iommu; 754 755 if (!ecap_ir_support(iommu->ecap)) 756 continue; 757 758 if (setup_intr_remapping(iommu, eim)) 759 goto error; 760 761 setup = 1; 762 } 763 764 if (!setup) 765 goto error; 766 767 intr_remapping_enabled = 1; 768 769 return 0; 770 771error: 772 /* 773 * handle error condition gracefully here! 774 */ 775 return -1; 776} 777 778static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, 779 struct intel_iommu *iommu) 780{ 781 struct acpi_dmar_pci_path *path; 782 u8 bus; 783 int count; 784 785 bus = scope->bus; 786 path = (struct acpi_dmar_pci_path *)(scope + 1); 787 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 788 / sizeof(struct acpi_dmar_pci_path); 789 790 while (--count > 0) { 791 /* 792 * Access PCI directly due to the PCI 793 * subsystem isn't initialized yet. 794 */ 795 bus = read_pci_config_byte(bus, path->dev, path->fn, 796 PCI_SECONDARY_BUS); 797 path++; 798 } 799 ir_hpet[ir_hpet_num].bus = bus; 800 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->dev, path->fn); 801 ir_hpet[ir_hpet_num].iommu = iommu; 802 ir_hpet[ir_hpet_num].id = scope->enumeration_id; 803 ir_hpet_num++; 804} 805 806static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, 807 struct intel_iommu *iommu) 808{ 809 struct acpi_dmar_pci_path *path; 810 u8 bus; 811 int count; 812 813 bus = scope->bus; 814 path = (struct acpi_dmar_pci_path *)(scope + 1); 815 count = (scope->length - sizeof(struct acpi_dmar_device_scope)) 816 / sizeof(struct acpi_dmar_pci_path); 817 818 while (--count > 0) { 819 /* 820 * Access PCI directly due to the PCI 821 * subsystem isn't initialized yet. 822 */ 823 bus = read_pci_config_byte(bus, path->dev, path->fn, 824 PCI_SECONDARY_BUS); 825 path++; 826 } 827 828 ir_ioapic[ir_ioapic_num].bus = bus; 829 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn); 830 ir_ioapic[ir_ioapic_num].iommu = iommu; 831 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; 832 ir_ioapic_num++; 833} 834 835static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, 836 struct intel_iommu *iommu) 837{ 838 struct acpi_dmar_hardware_unit *drhd; 839 struct acpi_dmar_device_scope *scope; 840 void *start, *end; 841 842 drhd = (struct acpi_dmar_hardware_unit *)header; 843 844 start = (void *)(drhd + 1); 845 end = ((void *)drhd) + header->length; 846 847 while (start < end) { 848 scope = start; 849 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 850 if (ir_ioapic_num == MAX_IO_APICS) { 851 printk(KERN_WARNING "Exceeded Max IO APICS\n"); 852 return -1; 853 } 854 855 printk(KERN_INFO "IOAPIC id %d under DRHD base " 856 " 0x%Lx IOMMU %d\n", scope->enumeration_id, 857 drhd->address, iommu->seq_id); 858 859 ir_parse_one_ioapic_scope(scope, iommu); 860 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { 861 if (ir_hpet_num == MAX_HPET_TBS) { 862 printk(KERN_WARNING "Exceeded Max HPET blocks\n"); 863 return -1; 864 } 865 866 printk(KERN_INFO "HPET id %d under DRHD base" 867 " 0x%Lx\n", scope->enumeration_id, 868 drhd->address); 869 870 ir_parse_one_hpet_scope(scope, iommu); 871 } 872 start += scope->length; 873 } 874 875 return 0; 876} 877 878/* 879 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 880 * hardware unit. 881 */ 882int __init parse_ioapics_under_ir(void) 883{ 884 struct dmar_drhd_unit *drhd; 885 int ir_supported = 0; 886 887 for_each_drhd_unit(drhd) { 888 struct intel_iommu *iommu = drhd->iommu; 889 890 if (ecap_ir_support(iommu->ecap)) { 891 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) 892 return -1; 893 894 ir_supported = 1; 895 } 896 } 897 898 if (ir_supported && ir_ioapic_num != nr_ioapics) { 899 printk(KERN_WARNING 900 "Not all IO-APIC's listed under remapping hardware\n"); 901 return -1; 902 } 903 904 return ir_supported; 905} 906 907void disable_intr_remapping(void) 908{ 909 struct dmar_drhd_unit *drhd; 910 struct intel_iommu *iommu = NULL; 911 912 /* 913 * Disable Interrupt-remapping for all the DRHD's now. 914 */ 915 for_each_iommu(iommu, drhd) { 916 if (!ecap_ir_support(iommu->ecap)) 917 continue; 918 919 iommu_disable_intr_remapping(iommu); 920 } 921} 922 923int reenable_intr_remapping(int eim) 924{ 925 struct dmar_drhd_unit *drhd; 926 int setup = 0; 927 struct intel_iommu *iommu = NULL; 928 929 for_each_iommu(iommu, drhd) 930 if (iommu->qi) 931 dmar_reenable_qi(iommu); 932 933 /* 934 * Setup Interrupt-remapping for all the DRHD's now. 935 */ 936 for_each_iommu(iommu, drhd) { 937 if (!ecap_ir_support(iommu->ecap)) 938 continue; 939 940 /* Set up interrupt remapping for iommu.*/ 941 iommu_set_intr_remapping(iommu, eim); 942 setup = 1; 943 } 944 945 if (!setup) 946 goto error; 947 948 return 0; 949 950error: 951 /* 952 * handle error condition gracefully here! 953 */ 954 return -1; 955} 956