Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc3 585 lines 12 kB view raw
1#include <linux/interrupt.h> 2#include <linux/dmar.h> 3#include <linux/spinlock.h> 4#include <linux/jiffies.h> 5#include <linux/pci.h> 6#include <linux/irq.h> 7#include <asm/io_apic.h> 8#include <asm/smp.h> 9#include <linux/intel-iommu.h> 10#include "intr_remapping.h" 11 12static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 13static int ir_ioapic_num; 14int intr_remapping_enabled; 15 16struct irq_2_iommu { 17 struct intel_iommu *iommu; 18 u16 irte_index; 19 u16 sub_handle; 20 u8 irte_mask; 21}; 22 23#ifdef CONFIG_SPARSE_IRQ 24static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu) 25{ 26 struct irq_2_iommu *iommu; 27 int node; 28 29 node = cpu_to_node(cpu); 30 31 iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); 32 printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node); 33 34 return iommu; 35} 36 37static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 38{ 39 struct irq_desc *desc; 40 41 desc = irq_to_desc(irq); 42 43 if (WARN_ON_ONCE(!desc)) 44 return NULL; 45 46 return desc->irq_2_iommu; 47} 48 49static struct irq_2_iommu *irq_2_iommu_alloc_cpu(unsigned int irq, int cpu) 50{ 51 struct irq_desc *desc; 52 struct irq_2_iommu *irq_iommu; 53 54 /* 55 * alloc irq desc if not allocated already. 56 */ 57 desc = irq_to_desc_alloc_cpu(irq, cpu); 58 if (!desc) { 59 printk(KERN_INFO "can not get irq_desc for %d\n", irq); 60 return NULL; 61 } 62 63 irq_iommu = desc->irq_2_iommu; 64 65 if (!irq_iommu) 66 desc->irq_2_iommu = get_one_free_irq_2_iommu(cpu); 67 68 return desc->irq_2_iommu; 69} 70 71static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 72{ 73 return irq_2_iommu_alloc_cpu(irq, boot_cpu_id); 74} 75 76#else /* !CONFIG_SPARSE_IRQ */ 77 78static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 79 80static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 81{ 82 if (irq < nr_irqs) 83 return &irq_2_iommuX[irq]; 84 85 return NULL; 86} 87static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 88{ 89 return irq_2_iommu(irq); 90} 91#endif 92 93static DEFINE_SPINLOCK(irq_2_ir_lock); 94 95static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 96{ 97 struct irq_2_iommu *irq_iommu; 98 99 irq_iommu = irq_2_iommu(irq); 100 101 if (!irq_iommu) 102 return NULL; 103 104 if (!irq_iommu->iommu) 105 return NULL; 106 107 return irq_iommu; 108} 109 110int irq_remapped(int irq) 111{ 112 return valid_irq_2_iommu(irq) != NULL; 113} 114 115int get_irte(int irq, struct irte *entry) 116{ 117 int index; 118 struct irq_2_iommu *irq_iommu; 119 120 if (!entry) 121 return -1; 122 123 spin_lock(&irq_2_ir_lock); 124 irq_iommu = valid_irq_2_iommu(irq); 125 if (!irq_iommu) { 126 spin_unlock(&irq_2_ir_lock); 127 return -1; 128 } 129 130 index = irq_iommu->irte_index + irq_iommu->sub_handle; 131 *entry = *(irq_iommu->iommu->ir_table->base + index); 132 133 spin_unlock(&irq_2_ir_lock); 134 return 0; 135} 136 137int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 138{ 139 struct ir_table *table = iommu->ir_table; 140 struct irq_2_iommu *irq_iommu; 141 u16 index, start_index; 142 unsigned int mask = 0; 143 int i; 144 145 if (!count) 146 return -1; 147 148#ifndef CONFIG_SPARSE_IRQ 149 /* protect irq_2_iommu_alloc later */ 150 if (irq >= nr_irqs) 151 return -1; 152#endif 153 154 /* 155 * start the IRTE search from index 0. 156 */ 157 index = start_index = 0; 158 159 if (count > 1) { 160 count = __roundup_pow_of_two(count); 161 mask = ilog2(count); 162 } 163 164 if (mask > ecap_max_handle_mask(iommu->ecap)) { 165 printk(KERN_ERR 166 "Requested mask %x exceeds the max invalidation handle" 167 " mask value %Lx\n", mask, 168 ecap_max_handle_mask(iommu->ecap)); 169 return -1; 170 } 171 172 spin_lock(&irq_2_ir_lock); 173 do { 174 for (i = index; i < index + count; i++) 175 if (table->base[i].present) 176 break; 177 /* empty index found */ 178 if (i == index + count) 179 break; 180 181 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 182 183 if (index == start_index) { 184 spin_unlock(&irq_2_ir_lock); 185 printk(KERN_ERR "can't allocate an IRTE\n"); 186 return -1; 187 } 188 } while (1); 189 190 for (i = index; i < index + count; i++) 191 table->base[i].present = 1; 192 193 irq_iommu = irq_2_iommu_alloc(irq); 194 if (!irq_iommu) { 195 spin_unlock(&irq_2_ir_lock); 196 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 197 return -1; 198 } 199 200 irq_iommu->iommu = iommu; 201 irq_iommu->irte_index = index; 202 irq_iommu->sub_handle = 0; 203 irq_iommu->irte_mask = mask; 204 205 spin_unlock(&irq_2_ir_lock); 206 207 return index; 208} 209 210static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 211{ 212 struct qi_desc desc; 213 214 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) 215 | QI_IEC_SELECTIVE; 216 desc.high = 0; 217 218 qi_submit_sync(&desc, iommu); 219} 220 221int map_irq_to_irte_handle(int irq, u16 *sub_handle) 222{ 223 int index; 224 struct irq_2_iommu *irq_iommu; 225 226 spin_lock(&irq_2_ir_lock); 227 irq_iommu = valid_irq_2_iommu(irq); 228 if (!irq_iommu) { 229 spin_unlock(&irq_2_ir_lock); 230 return -1; 231 } 232 233 *sub_handle = irq_iommu->sub_handle; 234 index = irq_iommu->irte_index; 235 spin_unlock(&irq_2_ir_lock); 236 return index; 237} 238 239int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 240{ 241 struct irq_2_iommu *irq_iommu; 242 243 spin_lock(&irq_2_ir_lock); 244 245 irq_iommu = irq_2_iommu_alloc(irq); 246 247 if (!irq_iommu) { 248 spin_unlock(&irq_2_ir_lock); 249 printk(KERN_ERR "can't allocate irq_2_iommu\n"); 250 return -1; 251 } 252 253 irq_iommu->iommu = iommu; 254 irq_iommu->irte_index = index; 255 irq_iommu->sub_handle = subhandle; 256 irq_iommu->irte_mask = 0; 257 258 spin_unlock(&irq_2_ir_lock); 259 260 return 0; 261} 262 263int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 264{ 265 struct irq_2_iommu *irq_iommu; 266 267 spin_lock(&irq_2_ir_lock); 268 irq_iommu = valid_irq_2_iommu(irq); 269 if (!irq_iommu) { 270 spin_unlock(&irq_2_ir_lock); 271 return -1; 272 } 273 274 irq_iommu->iommu = NULL; 275 irq_iommu->irte_index = 0; 276 irq_iommu->sub_handle = 0; 277 irq_2_iommu(irq)->irte_mask = 0; 278 279 spin_unlock(&irq_2_ir_lock); 280 281 return 0; 282} 283 284int modify_irte(int irq, struct irte *irte_modified) 285{ 286 int index; 287 struct irte *irte; 288 struct intel_iommu *iommu; 289 struct irq_2_iommu *irq_iommu; 290 291 spin_lock(&irq_2_ir_lock); 292 irq_iommu = valid_irq_2_iommu(irq); 293 if (!irq_iommu) { 294 spin_unlock(&irq_2_ir_lock); 295 return -1; 296 } 297 298 iommu = irq_iommu->iommu; 299 300 index = irq_iommu->irte_index + irq_iommu->sub_handle; 301 irte = &iommu->ir_table->base[index]; 302 303 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 304 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 305 306 qi_flush_iec(iommu, index, 0); 307 308 spin_unlock(&irq_2_ir_lock); 309 return 0; 310} 311 312int flush_irte(int irq) 313{ 314 int index; 315 struct intel_iommu *iommu; 316 struct irq_2_iommu *irq_iommu; 317 318 spin_lock(&irq_2_ir_lock); 319 irq_iommu = valid_irq_2_iommu(irq); 320 if (!irq_iommu) { 321 spin_unlock(&irq_2_ir_lock); 322 return -1; 323 } 324 325 iommu = irq_iommu->iommu; 326 327 index = irq_iommu->irte_index + irq_iommu->sub_handle; 328 329 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 330 spin_unlock(&irq_2_ir_lock); 331 332 return 0; 333} 334 335struct intel_iommu *map_ioapic_to_ir(int apic) 336{ 337 int i; 338 339 for (i = 0; i < MAX_IO_APICS; i++) 340 if (ir_ioapic[i].id == apic) 341 return ir_ioapic[i].iommu; 342 return NULL; 343} 344 345struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) 346{ 347 struct dmar_drhd_unit *drhd; 348 349 drhd = dmar_find_matched_drhd_unit(dev); 350 if (!drhd) 351 return NULL; 352 353 return drhd->iommu; 354} 355 356int free_irte(int irq) 357{ 358 int index, i; 359 struct irte *irte; 360 struct intel_iommu *iommu; 361 struct irq_2_iommu *irq_iommu; 362 363 spin_lock(&irq_2_ir_lock); 364 irq_iommu = valid_irq_2_iommu(irq); 365 if (!irq_iommu) { 366 spin_unlock(&irq_2_ir_lock); 367 return -1; 368 } 369 370 iommu = irq_iommu->iommu; 371 372 index = irq_iommu->irte_index + irq_iommu->sub_handle; 373 irte = &iommu->ir_table->base[index]; 374 375 if (!irq_iommu->sub_handle) { 376 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 377 set_64bit((unsigned long *)irte, 0); 378 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 379 } 380 381 irq_iommu->iommu = NULL; 382 irq_iommu->irte_index = 0; 383 irq_iommu->sub_handle = 0; 384 irq_iommu->irte_mask = 0; 385 386 spin_unlock(&irq_2_ir_lock); 387 388 return 0; 389} 390 391static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 392{ 393 u64 addr; 394 u32 cmd, sts; 395 unsigned long flags; 396 397 addr = virt_to_phys((void *)iommu->ir_table->base); 398 399 spin_lock_irqsave(&iommu->register_lock, flags); 400 401 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 402 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 403 404 /* Set interrupt-remapping table pointer */ 405 cmd = iommu->gcmd | DMA_GCMD_SIRTP; 406 writel(cmd, iommu->reg + DMAR_GCMD_REG); 407 408 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 409 readl, (sts & DMA_GSTS_IRTPS), sts); 410 spin_unlock_irqrestore(&iommu->register_lock, flags); 411 412 /* 413 * global invalidation of interrupt entry cache before enabling 414 * interrupt-remapping. 415 */ 416 qi_global_iec(iommu); 417 418 spin_lock_irqsave(&iommu->register_lock, flags); 419 420 /* Enable interrupt-remapping */ 421 cmd = iommu->gcmd | DMA_GCMD_IRE; 422 iommu->gcmd |= DMA_GCMD_IRE; 423 writel(cmd, iommu->reg + DMAR_GCMD_REG); 424 425 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 426 readl, (sts & DMA_GSTS_IRES), sts); 427 428 spin_unlock_irqrestore(&iommu->register_lock, flags); 429} 430 431 432static int setup_intr_remapping(struct intel_iommu *iommu, int mode) 433{ 434 struct ir_table *ir_table; 435 struct page *pages; 436 437 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 438 GFP_KERNEL); 439 440 if (!iommu->ir_table) 441 return -ENOMEM; 442 443 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 444 445 if (!pages) { 446 printk(KERN_ERR "failed to allocate pages of order %d\n", 447 INTR_REMAP_PAGE_ORDER); 448 kfree(iommu->ir_table); 449 return -ENOMEM; 450 } 451 452 ir_table->base = page_address(pages); 453 454 iommu_set_intr_remapping(iommu, mode); 455 return 0; 456} 457 458int __init enable_intr_remapping(int eim) 459{ 460 struct dmar_drhd_unit *drhd; 461 int setup = 0; 462 463 /* 464 * check for the Interrupt-remapping support 465 */ 466 for_each_drhd_unit(drhd) { 467 struct intel_iommu *iommu = drhd->iommu; 468 469 if (!ecap_ir_support(iommu->ecap)) 470 continue; 471 472 if (eim && !ecap_eim_support(iommu->ecap)) { 473 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " 474 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); 475 return -1; 476 } 477 } 478 479 /* 480 * Enable queued invalidation for all the DRHD's. 481 */ 482 for_each_drhd_unit(drhd) { 483 int ret; 484 struct intel_iommu *iommu = drhd->iommu; 485 ret = dmar_enable_qi(iommu); 486 487 if (ret) { 488 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " 489 " invalidation, ecap %Lx, ret %d\n", 490 drhd->reg_base_addr, iommu->ecap, ret); 491 return -1; 492 } 493 } 494 495 /* 496 * Setup Interrupt-remapping for all the DRHD's now. 497 */ 498 for_each_drhd_unit(drhd) { 499 struct intel_iommu *iommu = drhd->iommu; 500 501 if (!ecap_ir_support(iommu->ecap)) 502 continue; 503 504 if (setup_intr_remapping(iommu, eim)) 505 goto error; 506 507 setup = 1; 508 } 509 510 if (!setup) 511 goto error; 512 513 intr_remapping_enabled = 1; 514 515 return 0; 516 517error: 518 /* 519 * handle error condition gracefully here! 520 */ 521 return -1; 522} 523 524static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, 525 struct intel_iommu *iommu) 526{ 527 struct acpi_dmar_hardware_unit *drhd; 528 struct acpi_dmar_device_scope *scope; 529 void *start, *end; 530 531 drhd = (struct acpi_dmar_hardware_unit *)header; 532 533 start = (void *)(drhd + 1); 534 end = ((void *)drhd) + header->length; 535 536 while (start < end) { 537 scope = start; 538 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 539 if (ir_ioapic_num == MAX_IO_APICS) { 540 printk(KERN_WARNING "Exceeded Max IO APICS\n"); 541 return -1; 542 } 543 544 printk(KERN_INFO "IOAPIC id %d under DRHD base" 545 " 0x%Lx\n", scope->enumeration_id, 546 drhd->address); 547 548 ir_ioapic[ir_ioapic_num].iommu = iommu; 549 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; 550 ir_ioapic_num++; 551 } 552 start += scope->length; 553 } 554 555 return 0; 556} 557 558/* 559 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 560 * hardware unit. 561 */ 562int __init parse_ioapics_under_ir(void) 563{ 564 struct dmar_drhd_unit *drhd; 565 int ir_supported = 0; 566 567 for_each_drhd_unit(drhd) { 568 struct intel_iommu *iommu = drhd->iommu; 569 570 if (ecap_ir_support(iommu->ecap)) { 571 if (ir_parse_ioapic_scope(drhd->hdr, iommu)) 572 return -1; 573 574 ir_supported = 1; 575 } 576 } 577 578 if (ir_supported && ir_ioapic_num != nr_ioapics) { 579 printk(KERN_WARNING 580 "Not all IO-APIC's listed under remapping hardware\n"); 581 return -1; 582 } 583 584 return ir_supported; 585}