Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.28-rc3 512 lines 10 kB view raw
1#include <linux/interrupt.h> 2#include <linux/dmar.h> 3#include <linux/spinlock.h> 4#include <linux/jiffies.h> 5#include <linux/pci.h> 6#include <linux/irq.h> 7#include <asm/io_apic.h> 8#include <linux/intel-iommu.h> 9#include "intr_remapping.h" 10 11static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 12static int ir_ioapic_num; 13int intr_remapping_enabled; 14 15struct irq_2_iommu { 16 struct intel_iommu *iommu; 17 u16 irte_index; 18 u16 sub_handle; 19 u8 irte_mask; 20}; 21 22static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 23 24static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 25{ 26 return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; 27} 28 29static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 30{ 31 return irq_2_iommu(irq); 32} 33 34static DEFINE_SPINLOCK(irq_2_ir_lock); 35 36static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) 37{ 38 struct irq_2_iommu *irq_iommu; 39 40 irq_iommu = irq_2_iommu(irq); 41 42 if (!irq_iommu) 43 return NULL; 44 45 if (!irq_iommu->iommu) 46 return NULL; 47 48 return irq_iommu; 49} 50 51int irq_remapped(int irq) 52{ 53 return valid_irq_2_iommu(irq) != NULL; 54} 55 56int get_irte(int irq, struct irte *entry) 57{ 58 int index; 59 struct irq_2_iommu *irq_iommu; 60 61 if (!entry) 62 return -1; 63 64 spin_lock(&irq_2_ir_lock); 65 irq_iommu = valid_irq_2_iommu(irq); 66 if (!irq_iommu) { 67 spin_unlock(&irq_2_ir_lock); 68 return -1; 69 } 70 71 index = irq_iommu->irte_index + irq_iommu->sub_handle; 72 *entry = *(irq_iommu->iommu->ir_table->base + index); 73 74 spin_unlock(&irq_2_ir_lock); 75 return 0; 76} 77 78int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) 79{ 80 struct ir_table *table = iommu->ir_table; 81 struct irq_2_iommu *irq_iommu; 82 u16 index, start_index; 83 unsigned int mask = 0; 84 int i; 85 86 if (!count) 87 return -1; 88 89 /* protect irq_2_iommu_alloc later */ 90 if (irq >= nr_irqs) 91 return -1; 92 93 /* 94 * start the IRTE search from index 0. 95 */ 96 index = start_index = 0; 97 98 if (count > 1) { 99 count = __roundup_pow_of_two(count); 100 mask = ilog2(count); 101 } 102 103 if (mask > ecap_max_handle_mask(iommu->ecap)) { 104 printk(KERN_ERR 105 "Requested mask %x exceeds the max invalidation handle" 106 " mask value %Lx\n", mask, 107 ecap_max_handle_mask(iommu->ecap)); 108 return -1; 109 } 110 111 spin_lock(&irq_2_ir_lock); 112 do { 113 for (i = index; i < index + count; i++) 114 if (table->base[i].present) 115 break; 116 /* empty index found */ 117 if (i == index + count) 118 break; 119 120 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 121 122 if (index == start_index) { 123 spin_unlock(&irq_2_ir_lock); 124 printk(KERN_ERR "can't allocate an IRTE\n"); 125 return -1; 126 } 127 } while (1); 128 129 for (i = index; i < index + count; i++) 130 table->base[i].present = 1; 131 132 irq_iommu = irq_2_iommu_alloc(irq); 133 irq_iommu->iommu = iommu; 134 irq_iommu->irte_index = index; 135 irq_iommu->sub_handle = 0; 136 irq_iommu->irte_mask = mask; 137 138 spin_unlock(&irq_2_ir_lock); 139 140 return index; 141} 142 143static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) 144{ 145 struct qi_desc desc; 146 147 desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) 148 | QI_IEC_SELECTIVE; 149 desc.high = 0; 150 151 qi_submit_sync(&desc, iommu); 152} 153 154int map_irq_to_irte_handle(int irq, u16 *sub_handle) 155{ 156 int index; 157 struct irq_2_iommu *irq_iommu; 158 159 spin_lock(&irq_2_ir_lock); 160 irq_iommu = valid_irq_2_iommu(irq); 161 if (!irq_iommu) { 162 spin_unlock(&irq_2_ir_lock); 163 return -1; 164 } 165 166 *sub_handle = irq_iommu->sub_handle; 167 index = irq_iommu->irte_index; 168 spin_unlock(&irq_2_ir_lock); 169 return index; 170} 171 172int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) 173{ 174 struct irq_2_iommu *irq_iommu; 175 176 spin_lock(&irq_2_ir_lock); 177 178 irq_iommu = irq_2_iommu_alloc(irq); 179 180 irq_iommu->iommu = iommu; 181 irq_iommu->irte_index = index; 182 irq_iommu->sub_handle = subhandle; 183 irq_iommu->irte_mask = 0; 184 185 spin_unlock(&irq_2_ir_lock); 186 187 return 0; 188} 189 190int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) 191{ 192 struct irq_2_iommu *irq_iommu; 193 194 spin_lock(&irq_2_ir_lock); 195 irq_iommu = valid_irq_2_iommu(irq); 196 if (!irq_iommu) { 197 spin_unlock(&irq_2_ir_lock); 198 return -1; 199 } 200 201 irq_iommu->iommu = NULL; 202 irq_iommu->irte_index = 0; 203 irq_iommu->sub_handle = 0; 204 irq_2_iommu(irq)->irte_mask = 0; 205 206 spin_unlock(&irq_2_ir_lock); 207 208 return 0; 209} 210 211int modify_irte(int irq, struct irte *irte_modified) 212{ 213 int index; 214 struct irte *irte; 215 struct intel_iommu *iommu; 216 struct irq_2_iommu *irq_iommu; 217 218 spin_lock(&irq_2_ir_lock); 219 irq_iommu = valid_irq_2_iommu(irq); 220 if (!irq_iommu) { 221 spin_unlock(&irq_2_ir_lock); 222 return -1; 223 } 224 225 iommu = irq_iommu->iommu; 226 227 index = irq_iommu->irte_index + irq_iommu->sub_handle; 228 irte = &iommu->ir_table->base[index]; 229 230 set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); 231 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 232 233 qi_flush_iec(iommu, index, 0); 234 235 spin_unlock(&irq_2_ir_lock); 236 return 0; 237} 238 239int flush_irte(int irq) 240{ 241 int index; 242 struct intel_iommu *iommu; 243 struct irq_2_iommu *irq_iommu; 244 245 spin_lock(&irq_2_ir_lock); 246 irq_iommu = valid_irq_2_iommu(irq); 247 if (!irq_iommu) { 248 spin_unlock(&irq_2_ir_lock); 249 return -1; 250 } 251 252 iommu = irq_iommu->iommu; 253 254 index = irq_iommu->irte_index + irq_iommu->sub_handle; 255 256 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 257 spin_unlock(&irq_2_ir_lock); 258 259 return 0; 260} 261 262struct intel_iommu *map_ioapic_to_ir(int apic) 263{ 264 int i; 265 266 for (i = 0; i < MAX_IO_APICS; i++) 267 if (ir_ioapic[i].id == apic) 268 return ir_ioapic[i].iommu; 269 return NULL; 270} 271 272struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) 273{ 274 struct dmar_drhd_unit *drhd; 275 276 drhd = dmar_find_matched_drhd_unit(dev); 277 if (!drhd) 278 return NULL; 279 280 return drhd->iommu; 281} 282 283int free_irte(int irq) 284{ 285 int index, i; 286 struct irte *irte; 287 struct intel_iommu *iommu; 288 struct irq_2_iommu *irq_iommu; 289 290 spin_lock(&irq_2_ir_lock); 291 irq_iommu = valid_irq_2_iommu(irq); 292 if (!irq_iommu) { 293 spin_unlock(&irq_2_ir_lock); 294 return -1; 295 } 296 297 iommu = irq_iommu->iommu; 298 299 index = irq_iommu->irte_index + irq_iommu->sub_handle; 300 irte = &iommu->ir_table->base[index]; 301 302 if (!irq_iommu->sub_handle) { 303 for (i = 0; i < (1 << irq_iommu->irte_mask); i++) 304 set_64bit((unsigned long *)irte, 0); 305 qi_flush_iec(iommu, index, irq_iommu->irte_mask); 306 } 307 308 irq_iommu->iommu = NULL; 309 irq_iommu->irte_index = 0; 310 irq_iommu->sub_handle = 0; 311 irq_iommu->irte_mask = 0; 312 313 spin_unlock(&irq_2_ir_lock); 314 315 return 0; 316} 317 318static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) 319{ 320 u64 addr; 321 u32 cmd, sts; 322 unsigned long flags; 323 324 addr = virt_to_phys((void *)iommu->ir_table->base); 325 326 spin_lock_irqsave(&iommu->register_lock, flags); 327 328 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 329 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); 330 331 /* Set interrupt-remapping table pointer */ 332 cmd = iommu->gcmd | DMA_GCMD_SIRTP; 333 writel(cmd, iommu->reg + DMAR_GCMD_REG); 334 335 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 336 readl, (sts & DMA_GSTS_IRTPS), sts); 337 spin_unlock_irqrestore(&iommu->register_lock, flags); 338 339 /* 340 * global invalidation of interrupt entry cache before enabling 341 * interrupt-remapping. 342 */ 343 qi_global_iec(iommu); 344 345 spin_lock_irqsave(&iommu->register_lock, flags); 346 347 /* Enable interrupt-remapping */ 348 cmd = iommu->gcmd | DMA_GCMD_IRE; 349 iommu->gcmd |= DMA_GCMD_IRE; 350 writel(cmd, iommu->reg + DMAR_GCMD_REG); 351 352 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 353 readl, (sts & DMA_GSTS_IRES), sts); 354 355 spin_unlock_irqrestore(&iommu->register_lock, flags); 356} 357 358 359static int setup_intr_remapping(struct intel_iommu *iommu, int mode) 360{ 361 struct ir_table *ir_table; 362 struct page *pages; 363 364 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 365 GFP_KERNEL); 366 367 if (!iommu->ir_table) 368 return -ENOMEM; 369 370 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); 371 372 if (!pages) { 373 printk(KERN_ERR "failed to allocate pages of order %d\n", 374 INTR_REMAP_PAGE_ORDER); 375 kfree(iommu->ir_table); 376 return -ENOMEM; 377 } 378 379 ir_table->base = page_address(pages); 380 381 iommu_set_intr_remapping(iommu, mode); 382 return 0; 383} 384 385int __init enable_intr_remapping(int eim) 386{ 387 struct dmar_drhd_unit *drhd; 388 int setup = 0; 389 390 /* 391 * check for the Interrupt-remapping support 392 */ 393 for_each_drhd_unit(drhd) { 394 struct intel_iommu *iommu = drhd->iommu; 395 396 if (!ecap_ir_support(iommu->ecap)) 397 continue; 398 399 if (eim && !ecap_eim_support(iommu->ecap)) { 400 printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " 401 " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); 402 return -1; 403 } 404 } 405 406 /* 407 * Enable queued invalidation for all the DRHD's. 408 */ 409 for_each_drhd_unit(drhd) { 410 int ret; 411 struct intel_iommu *iommu = drhd->iommu; 412 ret = dmar_enable_qi(iommu); 413 414 if (ret) { 415 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " 416 " invalidation, ecap %Lx, ret %d\n", 417 drhd->reg_base_addr, iommu->ecap, ret); 418 return -1; 419 } 420 } 421 422 /* 423 * Setup Interrupt-remapping for all the DRHD's now. 424 */ 425 for_each_drhd_unit(drhd) { 426 struct intel_iommu *iommu = drhd->iommu; 427 428 if (!ecap_ir_support(iommu->ecap)) 429 continue; 430 431 if (setup_intr_remapping(iommu, eim)) 432 goto error; 433 434 setup = 1; 435 } 436 437 if (!setup) 438 goto error; 439 440 intr_remapping_enabled = 1; 441 442 return 0; 443 444error: 445 /* 446 * handle error condition gracefully here! 447 */ 448 return -1; 449} 450 451static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, 452 struct intel_iommu *iommu) 453{ 454 struct acpi_dmar_hardware_unit *drhd; 455 struct acpi_dmar_device_scope *scope; 456 void *start, *end; 457 458 drhd = (struct acpi_dmar_hardware_unit *)header; 459 460 start = (void *)(drhd + 1); 461 end = ((void *)drhd) + header->length; 462 463 while (start < end) { 464 scope = start; 465 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 466 if (ir_ioapic_num == MAX_IO_APICS) { 467 printk(KERN_WARNING "Exceeded Max IO APICS\n"); 468 return -1; 469 } 470 471 printk(KERN_INFO "IOAPIC id %d under DRHD base" 472 " 0x%Lx\n", scope->enumeration_id, 473 drhd->address); 474 475 ir_ioapic[ir_ioapic_num].iommu = iommu; 476 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; 477 ir_ioapic_num++; 478 } 479 start += scope->length; 480 } 481 482 return 0; 483} 484 485/* 486 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 487 * hardware unit. 488 */ 489int __init parse_ioapics_under_ir(void) 490{ 491 struct dmar_drhd_unit *drhd; 492 int ir_supported = 0; 493 494 for_each_drhd_unit(drhd) { 495 struct intel_iommu *iommu = drhd->iommu; 496 497 if (ecap_ir_support(iommu->ecap)) { 498 if (ir_parse_ioapic_scope(drhd->hdr, iommu)) 499 return -1; 500 501 ir_supported = 1; 502 } 503 } 504 505 if (ir_supported && ir_ioapic_num != nr_ioapics) { 506 printk(KERN_WARNING 507 "Not all IO-APIC's listed under remapping hardware\n"); 508 return -1; 509 } 510 511 return ir_supported; 512}