Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.16-rc6 1129 lines 32 kB view raw
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 */ 8 9#include <linux/mm.h> 10#include <linux/irq.h> 11#include <linux/interrupt.h> 12#include <linux/init.h> 13#include <linux/config.h> 14#include <linux/ioport.h> 15#include <linux/smp_lock.h> 16#include <linux/pci.h> 17#include <linux/proc_fs.h> 18 19#include <asm/errno.h> 20#include <asm/io.h> 21#include <asm/smp.h> 22 23#include "pci.h" 24#include "msi.h" 25 26#define MSI_TARGET_CPU first_cpu(cpu_online_map) 27 28static DEFINE_SPINLOCK(msi_lock); 29static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL }; 30static kmem_cache_t* msi_cachep; 31 32static int pci_msi_enable = 1; 33static int last_alloc_vector; 34static int nr_released_vectors; 35static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS; 36static int nr_msix_devices; 37 38#ifndef CONFIG_X86_IO_APIC 39int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 40u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 }; 41#endif 42 43static void msi_cache_ctor(void *p, kmem_cache_t *cache, unsigned long flags) 44{ 45 memset(p, 0, NR_IRQS * sizeof(struct msi_desc)); 46} 47 48static int msi_cache_init(void) 49{ 50 msi_cachep = kmem_cache_create("msi_cache", 51 NR_IRQS * sizeof(struct msi_desc), 52 0, SLAB_HWCACHE_ALIGN, msi_cache_ctor, NULL); 53 if (!msi_cachep) 54 return -ENOMEM; 55 56 return 0; 57} 58 59static void msi_set_mask_bit(unsigned int vector, int flag) 60{ 61 struct msi_desc *entry; 62 63 entry = (struct msi_desc *)msi_desc[vector]; 64 if (!entry || !entry->dev || !entry->mask_base) 65 return; 66 switch (entry->msi_attrib.type) { 67 case PCI_CAP_ID_MSI: 68 { 69 int pos; 70 u32 mask_bits; 71 72 pos = (long)entry->mask_base; 73 pci_read_config_dword(entry->dev, pos, &mask_bits); 74 mask_bits &= ~(1); 75 mask_bits |= flag; 76 pci_write_config_dword(entry->dev, pos, mask_bits); 77 break; 78 } 79 case PCI_CAP_ID_MSIX: 80 { 81 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 82 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; 83 writel(flag, entry->mask_base + offset); 84 break; 85 } 86 default: 87 break; 88 } 89} 90 91#ifdef CONFIG_SMP 92static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask) 93{ 94 struct msi_desc *entry; 95 struct msg_address address; 96 unsigned int irq = vector; 97 unsigned int dest_cpu = first_cpu(cpu_mask); 98 99 entry = (struct msi_desc *)msi_desc[vector]; 100 if (!entry || !entry->dev) 101 return; 102 103 switch (entry->msi_attrib.type) { 104 case PCI_CAP_ID_MSI: 105 { 106 int pos; 107 108 if (!(pos = pci_find_capability(entry->dev, PCI_CAP_ID_MSI))) 109 return; 110 111 pci_read_config_dword(entry->dev, msi_lower_address_reg(pos), 112 &address.lo_address.value); 113 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 114 address.lo_address.value |= (cpu_physical_id(dest_cpu) << 115 MSI_TARGET_CPU_SHIFT); 116 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); 117 pci_write_config_dword(entry->dev, msi_lower_address_reg(pos), 118 address.lo_address.value); 119 set_native_irq_info(irq, cpu_mask); 120 break; 121 } 122 case PCI_CAP_ID_MSIX: 123 { 124 int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + 125 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET; 126 127 address.lo_address.value = readl(entry->mask_base + offset); 128 address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK; 129 address.lo_address.value |= (cpu_physical_id(dest_cpu) << 130 MSI_TARGET_CPU_SHIFT); 131 entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu); 132 writel(address.lo_address.value, entry->mask_base + offset); 133 set_native_irq_info(irq, cpu_mask); 134 break; 135 } 136 default: 137 break; 138 } 139} 140#else 141#define set_msi_affinity NULL 142#endif /* CONFIG_SMP */ 143 144static void mask_MSI_irq(unsigned int vector) 145{ 146 msi_set_mask_bit(vector, 1); 147} 148 149static void unmask_MSI_irq(unsigned int vector) 150{ 151 msi_set_mask_bit(vector, 0); 152} 153 154static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector) 155{ 156 struct msi_desc *entry; 157 unsigned long flags; 158 159 spin_lock_irqsave(&msi_lock, flags); 160 entry = msi_desc[vector]; 161 if (!entry || !entry->dev) { 162 spin_unlock_irqrestore(&msi_lock, flags); 163 return 0; 164 } 165 entry->msi_attrib.state = 1; /* Mark it active */ 166 spin_unlock_irqrestore(&msi_lock, flags); 167 168 return 0; /* never anything pending */ 169} 170 171static unsigned int startup_msi_irq_w_maskbit(unsigned int vector) 172{ 173 startup_msi_irq_wo_maskbit(vector); 174 unmask_MSI_irq(vector); 175 return 0; /* never anything pending */ 176} 177 178static void shutdown_msi_irq(unsigned int vector) 179{ 180 struct msi_desc *entry; 181 unsigned long flags; 182 183 spin_lock_irqsave(&msi_lock, flags); 184 entry = msi_desc[vector]; 185 if (entry && entry->dev) 186 entry->msi_attrib.state = 0; /* Mark it not active */ 187 spin_unlock_irqrestore(&msi_lock, flags); 188} 189 190static void end_msi_irq_wo_maskbit(unsigned int vector) 191{ 192 move_native_irq(vector); 193 ack_APIC_irq(); 194} 195 196static void end_msi_irq_w_maskbit(unsigned int vector) 197{ 198 move_native_irq(vector); 199 unmask_MSI_irq(vector); 200 ack_APIC_irq(); 201} 202 203static void do_nothing(unsigned int vector) 204{ 205} 206 207/* 208 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices, 209 * which implement the MSI-X Capability Structure. 210 */ 211static struct hw_interrupt_type msix_irq_type = { 212 .typename = "PCI-MSI-X", 213 .startup = startup_msi_irq_w_maskbit, 214 .shutdown = shutdown_msi_irq, 215 .enable = unmask_MSI_irq, 216 .disable = mask_MSI_irq, 217 .ack = mask_MSI_irq, 218 .end = end_msi_irq_w_maskbit, 219 .set_affinity = set_msi_affinity 220}; 221 222/* 223 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, 224 * which implement the MSI Capability Structure with 225 * Mask-and-Pending Bits. 226 */ 227static struct hw_interrupt_type msi_irq_w_maskbit_type = { 228 .typename = "PCI-MSI", 229 .startup = startup_msi_irq_w_maskbit, 230 .shutdown = shutdown_msi_irq, 231 .enable = unmask_MSI_irq, 232 .disable = mask_MSI_irq, 233 .ack = mask_MSI_irq, 234 .end = end_msi_irq_w_maskbit, 235 .set_affinity = set_msi_affinity 236}; 237 238/* 239 * Interrupt Type for MSI PCI/PCI-X/PCI-Express Devices, 240 * which implement the MSI Capability Structure without 241 * Mask-and-Pending Bits. 242 */ 243static struct hw_interrupt_type msi_irq_wo_maskbit_type = { 244 .typename = "PCI-MSI", 245 .startup = startup_msi_irq_wo_maskbit, 246 .shutdown = shutdown_msi_irq, 247 .enable = do_nothing, 248 .disable = do_nothing, 249 .ack = do_nothing, 250 .end = end_msi_irq_wo_maskbit, 251 .set_affinity = set_msi_affinity 252}; 253 254static void msi_data_init(struct msg_data *msi_data, 255 unsigned int vector) 256{ 257 memset(msi_data, 0, sizeof(struct msg_data)); 258 msi_data->vector = (u8)vector; 259 msi_data->delivery_mode = MSI_DELIVERY_MODE; 260 msi_data->level = MSI_LEVEL_MODE; 261 msi_data->trigger = MSI_TRIGGER_MODE; 262} 263 264static void msi_address_init(struct msg_address *msi_address) 265{ 266 unsigned int dest_id; 267 unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU); 268 269 memset(msi_address, 0, sizeof(struct msg_address)); 270 msi_address->hi_address = (u32)0; 271 dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT); 272 msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE; 273 msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE; 274 msi_address->lo_address.u.dest_id = dest_id; 275 msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT); 276} 277 278static int msi_free_vector(struct pci_dev* dev, int vector, int reassign); 279static int assign_msi_vector(void) 280{ 281 static int new_vector_avail = 1; 282 int vector; 283 unsigned long flags; 284 285 /* 286 * msi_lock is provided to ensure that successful allocation of MSI 287 * vector is assigned unique among drivers. 288 */ 289 spin_lock_irqsave(&msi_lock, flags); 290 291 if (!new_vector_avail) { 292 int free_vector = 0; 293 294 /* 295 * vector_irq[] = -1 indicates that this specific vector is: 296 * - assigned for MSI (since MSI have no associated IRQ) or 297 * - assigned for legacy if less than 16, or 298 * - having no corresponding 1:1 vector-to-IOxAPIC IRQ mapping 299 * vector_irq[] = 0 indicates that this vector, previously 300 * assigned for MSI, is freed by hotplug removed operations. 301 * This vector will be reused for any subsequent hotplug added 302 * operations. 303 * vector_irq[] > 0 indicates that this vector is assigned for 304 * IOxAPIC IRQs. This vector and its value provides a 1-to-1 305 * vector-to-IOxAPIC IRQ mapping. 306 */ 307 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { 308 if (vector_irq[vector] != 0) 309 continue; 310 free_vector = vector; 311 if (!msi_desc[vector]) 312 break; 313 else 314 continue; 315 } 316 if (!free_vector) { 317 spin_unlock_irqrestore(&msi_lock, flags); 318 return -EBUSY; 319 } 320 vector_irq[free_vector] = -1; 321 nr_released_vectors--; 322 spin_unlock_irqrestore(&msi_lock, flags); 323 if (msi_desc[free_vector] != NULL) { 324 struct pci_dev *dev; 325 int tail; 326 327 /* free all linked vectors before re-assign */ 328 do { 329 spin_lock_irqsave(&msi_lock, flags); 330 dev = msi_desc[free_vector]->dev; 331 tail = msi_desc[free_vector]->link.tail; 332 spin_unlock_irqrestore(&msi_lock, flags); 333 msi_free_vector(dev, tail, 1); 334 } while (free_vector != tail); 335 } 336 337 return free_vector; 338 } 339 vector = assign_irq_vector(AUTO_ASSIGN); 340 last_alloc_vector = vector; 341 if (vector == LAST_DEVICE_VECTOR) 342 new_vector_avail = 0; 343 344 spin_unlock_irqrestore(&msi_lock, flags); 345 return vector; 346} 347 348static int get_new_vector(void) 349{ 350 int vector; 351 352 if ((vector = assign_msi_vector()) > 0) 353 set_intr_gate(vector, interrupt[vector]); 354 355 return vector; 356} 357 358static int msi_init(void) 359{ 360 static int status = -ENOMEM; 361 362 if (!status) 363 return status; 364 365 if (pci_msi_quirk) { 366 pci_msi_enable = 0; 367 printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n"); 368 status = -EINVAL; 369 return status; 370 } 371 372 if ((status = msi_cache_init()) < 0) { 373 pci_msi_enable = 0; 374 printk(KERN_WARNING "PCI: MSI cache init failed\n"); 375 return status; 376 } 377 last_alloc_vector = assign_irq_vector(AUTO_ASSIGN); 378 if (last_alloc_vector < 0) { 379 pci_msi_enable = 0; 380 printk(KERN_WARNING "PCI: No interrupt vectors available for MSI\n"); 381 status = -EBUSY; 382 return status; 383 } 384 vector_irq[last_alloc_vector] = 0; 385 nr_released_vectors++; 386 387 return status; 388} 389 390static int get_msi_vector(struct pci_dev *dev) 391{ 392 return get_new_vector(); 393} 394 395static struct msi_desc* alloc_msi_entry(void) 396{ 397 struct msi_desc *entry; 398 399 entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL); 400 if (!entry) 401 return NULL; 402 403 memset(entry, 0, sizeof(struct msi_desc)); 404 entry->link.tail = entry->link.head = 0; /* single message */ 405 entry->dev = NULL; 406 407 return entry; 408} 409 410static void attach_msi_entry(struct msi_desc *entry, int vector) 411{ 412 unsigned long flags; 413 414 spin_lock_irqsave(&msi_lock, flags); 415 msi_desc[vector] = entry; 416 spin_unlock_irqrestore(&msi_lock, flags); 417} 418 419static void irq_handler_init(int cap_id, int pos, int mask) 420{ 421 unsigned long flags; 422 423 spin_lock_irqsave(&irq_desc[pos].lock, flags); 424 if (cap_id == PCI_CAP_ID_MSIX) 425 irq_desc[pos].handler = &msix_irq_type; 426 else { 427 if (!mask) 428 irq_desc[pos].handler = &msi_irq_wo_maskbit_type; 429 else 430 irq_desc[pos].handler = &msi_irq_w_maskbit_type; 431 } 432 spin_unlock_irqrestore(&irq_desc[pos].lock, flags); 433} 434 435static void enable_msi_mode(struct pci_dev *dev, int pos, int type) 436{ 437 u16 control; 438 439 pci_read_config_word(dev, msi_control_reg(pos), &control); 440 if (type == PCI_CAP_ID_MSI) { 441 /* Set enabled bits to single MSI & enable MSI_enable bit */ 442 msi_enable(control, 1); 443 pci_write_config_word(dev, msi_control_reg(pos), control); 444 } else { 445 msix_enable(control); 446 pci_write_config_word(dev, msi_control_reg(pos), control); 447 } 448 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { 449 /* PCI Express Endpoint device detected */ 450 pci_intx(dev, 0); /* disable intx */ 451 } 452} 453 454void disable_msi_mode(struct pci_dev *dev, int pos, int type) 455{ 456 u16 control; 457 458 pci_read_config_word(dev, msi_control_reg(pos), &control); 459 if (type == PCI_CAP_ID_MSI) { 460 /* Set enabled bits to single MSI & enable MSI_enable bit */ 461 msi_disable(control); 462 pci_write_config_word(dev, msi_control_reg(pos), control); 463 } else { 464 msix_disable(control); 465 pci_write_config_word(dev, msi_control_reg(pos), control); 466 } 467 if (pci_find_capability(dev, PCI_CAP_ID_EXP)) { 468 /* PCI Express Endpoint device detected */ 469 pci_intx(dev, 1); /* enable intx */ 470 } 471} 472 473static int msi_lookup_vector(struct pci_dev *dev, int type) 474{ 475 int vector; 476 unsigned long flags; 477 478 spin_lock_irqsave(&msi_lock, flags); 479 for (vector = FIRST_DEVICE_VECTOR; vector < NR_IRQS; vector++) { 480 if (!msi_desc[vector] || msi_desc[vector]->dev != dev || 481 msi_desc[vector]->msi_attrib.type != type || 482 msi_desc[vector]->msi_attrib.default_vector != dev->irq) 483 continue; 484 spin_unlock_irqrestore(&msi_lock, flags); 485 /* This pre-assigned MSI vector for this device 486 already exits. Override dev->irq with this vector */ 487 dev->irq = vector; 488 return 0; 489 } 490 spin_unlock_irqrestore(&msi_lock, flags); 491 492 return -EACCES; 493} 494 495void pci_scan_msi_device(struct pci_dev *dev) 496{ 497 if (!dev) 498 return; 499 500 if (pci_find_capability(dev, PCI_CAP_ID_MSIX) > 0) 501 nr_msix_devices++; 502 else if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0) 503 nr_reserved_vectors++; 504} 505 506/** 507 * msi_capability_init - configure device's MSI capability structure 508 * @dev: pointer to the pci_dev data structure of MSI device function 509 * 510 * Setup the MSI capability structure of device function with a single 511 * MSI vector, regardless of device function is capable of handling 512 * multiple messages. A return of zero indicates the successful setup 513 * of an entry zero with the new MSI vector or non-zero for otherwise. 514 **/ 515static int msi_capability_init(struct pci_dev *dev) 516{ 517 struct msi_desc *entry; 518 struct msg_address address; 519 struct msg_data data; 520 int pos, vector; 521 u16 control; 522 523 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 524 pci_read_config_word(dev, msi_control_reg(pos), &control); 525 /* MSI Entry Initialization */ 526 if (!(entry = alloc_msi_entry())) 527 return -ENOMEM; 528 529 if ((vector = get_msi_vector(dev)) < 0) { 530 kmem_cache_free(msi_cachep, entry); 531 return -EBUSY; 532 } 533 entry->link.head = vector; 534 entry->link.tail = vector; 535 entry->msi_attrib.type = PCI_CAP_ID_MSI; 536 entry->msi_attrib.state = 0; /* Mark it not active */ 537 entry->msi_attrib.entry_nr = 0; 538 entry->msi_attrib.maskbit = is_mask_bit_support(control); 539 entry->msi_attrib.default_vector = dev->irq; /* Save IOAPIC IRQ */ 540 dev->irq = vector; 541 entry->dev = dev; 542 if (is_mask_bit_support(control)) { 543 entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, 544 is_64bit_address(control)); 545 } 546 /* Replace with MSI handler */ 547 irq_handler_init(PCI_CAP_ID_MSI, vector, entry->msi_attrib.maskbit); 548 /* Configure MSI capability structure */ 549 msi_address_init(&address); 550 msi_data_init(&data, vector); 551 entry->msi_attrib.current_cpu = ((address.lo_address.u.dest_id >> 552 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); 553 pci_write_config_dword(dev, msi_lower_address_reg(pos), 554 address.lo_address.value); 555 if (is_64bit_address(control)) { 556 pci_write_config_dword(dev, 557 msi_upper_address_reg(pos), address.hi_address); 558 pci_write_config_word(dev, 559 msi_data_reg(pos, 1), *((u32*)&data)); 560 } else 561 pci_write_config_word(dev, 562 msi_data_reg(pos, 0), *((u32*)&data)); 563 if (entry->msi_attrib.maskbit) { 564 unsigned int maskbits, temp; 565 /* All MSIs are unmasked by default, Mask them all */ 566 pci_read_config_dword(dev, 567 msi_mask_bits_reg(pos, is_64bit_address(control)), 568 &maskbits); 569 temp = (1 << multi_msi_capable(control)); 570 temp = ((temp - 1) & ~temp); 571 maskbits |= temp; 572 pci_write_config_dword(dev, 573 msi_mask_bits_reg(pos, is_64bit_address(control)), 574 maskbits); 575 } 576 attach_msi_entry(entry, vector); 577 /* Set MSI enabled bits */ 578 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 579 580 return 0; 581} 582 583/** 584 * msix_capability_init - configure device's MSI-X capability 585 * @dev: pointer to the pci_dev data structure of MSI-X device function 586 * @entries: pointer to an array of struct msix_entry entries 587 * @nvec: number of @entries 588 * 589 * Setup the MSI-X capability structure of device function with a 590 * single MSI-X vector. A return of zero indicates the successful setup of 591 * requested MSI-X entries with allocated vectors or non-zero for otherwise. 592 **/ 593static int msix_capability_init(struct pci_dev *dev, 594 struct msix_entry *entries, int nvec) 595{ 596 struct msi_desc *head = NULL, *tail = NULL, *entry = NULL; 597 struct msg_address address; 598 struct msg_data data; 599 int vector, pos, i, j, nr_entries, temp = 0; 600 u32 phys_addr, table_offset; 601 u16 control; 602 u8 bir; 603 void __iomem *base; 604 605 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 606 /* Request & Map MSI-X table region */ 607 pci_read_config_word(dev, msi_control_reg(pos), &control); 608 nr_entries = multi_msix_capable(control); 609 pci_read_config_dword(dev, msix_table_offset_reg(pos), 610 &table_offset); 611 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 612 phys_addr = pci_resource_start (dev, bir); 613 phys_addr += (u32)(table_offset & ~PCI_MSIX_FLAGS_BIRMASK); 614 base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 615 if (base == NULL) 616 return -ENOMEM; 617 618 /* MSI-X Table Initialization */ 619 for (i = 0; i < nvec; i++) { 620 entry = alloc_msi_entry(); 621 if (!entry) 622 break; 623 if ((vector = get_msi_vector(dev)) < 0) 624 break; 625 626 j = entries[i].entry; 627 entries[i].vector = vector; 628 entry->msi_attrib.type = PCI_CAP_ID_MSIX; 629 entry->msi_attrib.state = 0; /* Mark it not active */ 630 entry->msi_attrib.entry_nr = j; 631 entry->msi_attrib.maskbit = 1; 632 entry->msi_attrib.default_vector = dev->irq; 633 entry->dev = dev; 634 entry->mask_base = base; 635 if (!head) { 636 entry->link.head = vector; 637 entry->link.tail = vector; 638 head = entry; 639 } else { 640 entry->link.head = temp; 641 entry->link.tail = tail->link.tail; 642 tail->link.tail = vector; 643 head->link.head = vector; 644 } 645 temp = vector; 646 tail = entry; 647 /* Replace with MSI-X handler */ 648 irq_handler_init(PCI_CAP_ID_MSIX, vector, 1); 649 /* Configure MSI-X capability structure */ 650 msi_address_init(&address); 651 msi_data_init(&data, vector); 652 entry->msi_attrib.current_cpu = 653 ((address.lo_address.u.dest_id >> 654 MSI_TARGET_CPU_SHIFT) & MSI_TARGET_CPU_MASK); 655 writel(address.lo_address.value, 656 base + j * PCI_MSIX_ENTRY_SIZE + 657 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 658 writel(address.hi_address, 659 base + j * PCI_MSIX_ENTRY_SIZE + 660 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 661 writel(*(u32*)&data, 662 base + j * PCI_MSIX_ENTRY_SIZE + 663 PCI_MSIX_ENTRY_DATA_OFFSET); 664 attach_msi_entry(entry, vector); 665 } 666 if (i != nvec) { 667 i--; 668 for (; i >= 0; i--) { 669 vector = (entries + i)->vector; 670 msi_free_vector(dev, vector, 0); 671 (entries + i)->vector = 0; 672 } 673 return -EBUSY; 674 } 675 /* Set MSI-X enabled bits */ 676 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 677 678 return 0; 679} 680 681/** 682 * pci_enable_msi - configure device's MSI capability structure 683 * @dev: pointer to the pci_dev data structure of MSI device function 684 * 685 * Setup the MSI capability structure of device function with 686 * a single MSI vector upon its software driver call to request for 687 * MSI mode enabled on its hardware device function. A return of zero 688 * indicates the successful setup of an entry zero with the new MSI 689 * vector or non-zero for otherwise. 690 **/ 691int pci_enable_msi(struct pci_dev* dev) 692{ 693 int pos, temp, status = -EINVAL; 694 u16 control; 695 696 if (!pci_msi_enable || !dev) 697 return status; 698 699 if (dev->no_msi) 700 return status; 701 702 temp = dev->irq; 703 704 if ((status = msi_init()) < 0) 705 return status; 706 707 if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) 708 return -EINVAL; 709 710 pci_read_config_word(dev, msi_control_reg(pos), &control); 711 if (control & PCI_MSI_FLAGS_ENABLE) 712 return 0; /* Already in MSI mode */ 713 714 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 715 /* Lookup Sucess */ 716 unsigned long flags; 717 718 spin_lock_irqsave(&msi_lock, flags); 719 if (!vector_irq[dev->irq]) { 720 msi_desc[dev->irq]->msi_attrib.state = 0; 721 vector_irq[dev->irq] = -1; 722 nr_released_vectors--; 723 spin_unlock_irqrestore(&msi_lock, flags); 724 enable_msi_mode(dev, pos, PCI_CAP_ID_MSI); 725 return 0; 726 } 727 spin_unlock_irqrestore(&msi_lock, flags); 728 dev->irq = temp; 729 } 730 /* Check whether driver already requested for MSI-X vectors */ 731 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 && 732 !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 733 printk(KERN_INFO "PCI: %s: Can't enable MSI. " 734 "Device already has MSI-X vectors assigned\n", 735 pci_name(dev)); 736 dev->irq = temp; 737 return -EINVAL; 738 } 739 status = msi_capability_init(dev); 740 if (!status) { 741 if (!pos) 742 nr_reserved_vectors--; /* Only MSI capable */ 743 else if (nr_msix_devices > 0) 744 nr_msix_devices--; /* Both MSI and MSI-X capable, 745 but choose enabling MSI */ 746 } 747 748 return status; 749} 750 751void pci_disable_msi(struct pci_dev* dev) 752{ 753 struct msi_desc *entry; 754 int pos, default_vector; 755 u16 control; 756 unsigned long flags; 757 758 if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSI))) 759 return; 760 761 pci_read_config_word(dev, msi_control_reg(pos), &control); 762 if (!(control & PCI_MSI_FLAGS_ENABLE)) 763 return; 764 765 spin_lock_irqsave(&msi_lock, flags); 766 entry = msi_desc[dev->irq]; 767 if (!entry || !entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) { 768 spin_unlock_irqrestore(&msi_lock, flags); 769 return; 770 } 771 if (entry->msi_attrib.state) { 772 spin_unlock_irqrestore(&msi_lock, flags); 773 printk(KERN_WARNING "PCI: %s: pci_disable_msi() called without " 774 "free_irq() on MSI vector %d\n", 775 pci_name(dev), dev->irq); 776 BUG_ON(entry->msi_attrib.state > 0); 777 } else { 778 vector_irq[dev->irq] = 0; /* free it */ 779 nr_released_vectors++; 780 default_vector = entry->msi_attrib.default_vector; 781 spin_unlock_irqrestore(&msi_lock, flags); 782 /* Restore dev->irq to its default pin-assertion vector */ 783 dev->irq = default_vector; 784 disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), 785 PCI_CAP_ID_MSI); 786 } 787} 788 789static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) 790{ 791 struct msi_desc *entry; 792 int head, entry_nr, type; 793 void __iomem *base; 794 unsigned long flags; 795 796 spin_lock_irqsave(&msi_lock, flags); 797 entry = msi_desc[vector]; 798 if (!entry || entry->dev != dev) { 799 spin_unlock_irqrestore(&msi_lock, flags); 800 return -EINVAL; 801 } 802 type = entry->msi_attrib.type; 803 entry_nr = entry->msi_attrib.entry_nr; 804 head = entry->link.head; 805 base = entry->mask_base; 806 msi_desc[entry->link.head]->link.tail = entry->link.tail; 807 msi_desc[entry->link.tail]->link.head = entry->link.head; 808 entry->dev = NULL; 809 if (!reassign) { 810 vector_irq[vector] = 0; 811 nr_released_vectors++; 812 } 813 msi_desc[vector] = NULL; 814 spin_unlock_irqrestore(&msi_lock, flags); 815 816 kmem_cache_free(msi_cachep, entry); 817 818 if (type == PCI_CAP_ID_MSIX) { 819 if (!reassign) 820 writel(1, base + 821 entry_nr * PCI_MSIX_ENTRY_SIZE + 822 PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); 823 824 if (head == vector) { 825 /* 826 * Detect last MSI-X vector to be released. 827 * Release the MSI-X memory-mapped table. 828 */ 829 int pos, nr_entries; 830 u32 phys_addr, table_offset; 831 u16 control; 832 u8 bir; 833 834 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 835 pci_read_config_word(dev, msi_control_reg(pos), 836 &control); 837 nr_entries = multi_msix_capable(control); 838 pci_read_config_dword(dev, msix_table_offset_reg(pos), 839 &table_offset); 840 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 841 phys_addr = pci_resource_start (dev, bir); 842 phys_addr += (u32)(table_offset & 843 ~PCI_MSIX_FLAGS_BIRMASK); 844 iounmap(base); 845 } 846 } 847 848 return 0; 849} 850 851static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec) 852{ 853 int vector = head, tail = 0; 854 int i, j = 0, nr_entries = 0; 855 void __iomem *base; 856 unsigned long flags; 857 858 spin_lock_irqsave(&msi_lock, flags); 859 while (head != tail) { 860 nr_entries++; 861 tail = msi_desc[vector]->link.tail; 862 if (entries[0].entry == msi_desc[vector]->msi_attrib.entry_nr) 863 j = vector; 864 vector = tail; 865 } 866 if (*nvec > nr_entries) { 867 spin_unlock_irqrestore(&msi_lock, flags); 868 *nvec = nr_entries; 869 return -EINVAL; 870 } 871 vector = ((j > 0) ? j : head); 872 for (i = 0; i < *nvec; i++) { 873 j = msi_desc[vector]->msi_attrib.entry_nr; 874 msi_desc[vector]->msi_attrib.state = 0; /* Mark it not active */ 875 vector_irq[vector] = -1; /* Mark it busy */ 876 nr_released_vectors--; 877 entries[i].vector = vector; 878 if (j != (entries + i)->entry) { 879 base = msi_desc[vector]->mask_base; 880 msi_desc[vector]->msi_attrib.entry_nr = 881 (entries + i)->entry; 882 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + 883 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET), base + 884 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + 885 PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); 886 writel( readl(base + j * PCI_MSIX_ENTRY_SIZE + 887 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET), base + 888 (entries + i)->entry * PCI_MSIX_ENTRY_SIZE + 889 PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); 890 writel( (readl(base + j * PCI_MSIX_ENTRY_SIZE + 891 PCI_MSIX_ENTRY_DATA_OFFSET) & 0xff00) | vector, 892 base + (entries+i)->entry*PCI_MSIX_ENTRY_SIZE + 893 PCI_MSIX_ENTRY_DATA_OFFSET); 894 } 895 vector = msi_desc[vector]->link.tail; 896 } 897 spin_unlock_irqrestore(&msi_lock, flags); 898 899 return 0; 900} 901 902/** 903 * pci_enable_msix - configure device's MSI-X capability structure 904 * @dev: pointer to the pci_dev data structure of MSI-X device function 905 * @entries: pointer to an array of MSI-X entries 906 * @nvec: number of MSI-X vectors requested for allocation by device driver 907 * 908 * Setup the MSI-X capability structure of device function with the number 909 * of requested vectors upon its software driver call to request for 910 * MSI-X mode enabled on its hardware device function. A return of zero 911 * indicates the successful configuration of MSI-X capability structure 912 * with new allocated MSI-X vectors. A return of < 0 indicates a failure. 913 * Or a return of > 0 indicates that driver request is exceeding the number 914 * of vectors available. Driver should use the returned value to re-send 915 * its request. 916 **/ 917int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) 918{ 919 int status, pos, nr_entries, free_vectors; 920 int i, j, temp; 921 u16 control; 922 unsigned long flags; 923 924 if (!pci_msi_enable || !dev || !entries) 925 return -EINVAL; 926 927 if ((status = msi_init()) < 0) 928 return status; 929 930 if (!(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX))) 931 return -EINVAL; 932 933 pci_read_config_word(dev, msi_control_reg(pos), &control); 934 if (control & PCI_MSIX_FLAGS_ENABLE) 935 return -EINVAL; /* Already in MSI-X mode */ 936 937 nr_entries = multi_msix_capable(control); 938 if (nvec > nr_entries) 939 return -EINVAL; 940 941 /* Check for any invalid entries */ 942 for (i = 0; i < nvec; i++) { 943 if (entries[i].entry >= nr_entries) 944 return -EINVAL; /* invalid entry */ 945 for (j = i + 1; j < nvec; j++) { 946 if (entries[i].entry == entries[j].entry) 947 return -EINVAL; /* duplicate entry */ 948 } 949 } 950 temp = dev->irq; 951 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 952 /* Lookup Sucess */ 953 nr_entries = nvec; 954 /* Reroute MSI-X table */ 955 if (reroute_msix_table(dev->irq, entries, &nr_entries)) { 956 /* #requested > #previous-assigned */ 957 dev->irq = temp; 958 return nr_entries; 959 } 960 dev->irq = temp; 961 enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX); 962 return 0; 963 } 964 /* Check whether driver already requested for MSI vector */ 965 if (pci_find_capability(dev, PCI_CAP_ID_MSI) > 0 && 966 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 967 printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " 968 "Device already has an MSI vector assigned\n", 969 pci_name(dev)); 970 dev->irq = temp; 971 return -EINVAL; 972 } 973 974 spin_lock_irqsave(&msi_lock, flags); 975 /* 976 * msi_lock is provided to ensure that enough vectors resources are 977 * available before granting. 978 */ 979 free_vectors = pci_vector_resources(last_alloc_vector, 980 nr_released_vectors); 981 /* Ensure that each MSI/MSI-X device has one vector reserved by 982 default to avoid any MSI-X driver to take all available 983 resources */ 984 free_vectors -= nr_reserved_vectors; 985 /* Find the average of free vectors among MSI-X devices */ 986 if (nr_msix_devices > 0) 987 free_vectors /= nr_msix_devices; 988 spin_unlock_irqrestore(&msi_lock, flags); 989 990 if (nvec > free_vectors) { 991 if (free_vectors > 0) 992 return free_vectors; 993 else 994 return -EBUSY; 995 } 996 997 status = msix_capability_init(dev, entries, nvec); 998 if (!status && nr_msix_devices > 0) 999 nr_msix_devices--; 1000 1001 return status; 1002} 1003 1004void pci_disable_msix(struct pci_dev* dev) 1005{ 1006 int pos, temp; 1007 u16 control; 1008 1009 if (!dev || !(pos = pci_find_capability(dev, PCI_CAP_ID_MSIX))) 1010 return; 1011 1012 pci_read_config_word(dev, msi_control_reg(pos), &control); 1013 if (!(control & PCI_MSIX_FLAGS_ENABLE)) 1014 return; 1015 1016 temp = dev->irq; 1017 if (!msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 1018 int state, vector, head, tail = 0, warning = 0; 1019 unsigned long flags; 1020 1021 vector = head = dev->irq; 1022 spin_lock_irqsave(&msi_lock, flags); 1023 while (head != tail) { 1024 state = msi_desc[vector]->msi_attrib.state; 1025 if (state) 1026 warning = 1; 1027 else { 1028 vector_irq[vector] = 0; /* free it */ 1029 nr_released_vectors++; 1030 } 1031 tail = msi_desc[vector]->link.tail; 1032 vector = tail; 1033 } 1034 spin_unlock_irqrestore(&msi_lock, flags); 1035 if (warning) { 1036 dev->irq = temp; 1037 printk(KERN_WARNING "PCI: %s: pci_disable_msix() called without " 1038 "free_irq() on all MSI-X vectors\n", 1039 pci_name(dev)); 1040 BUG_ON(warning > 0); 1041 } else { 1042 dev->irq = temp; 1043 disable_msi_mode(dev, 1044 pci_find_capability(dev, PCI_CAP_ID_MSIX), 1045 PCI_CAP_ID_MSIX); 1046 1047 } 1048 } 1049} 1050 1051/** 1052 * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state 1053 * @dev: pointer to the pci_dev data structure of MSI(X) device function 1054 * 1055 * Being called during hotplug remove, from which the device function 1056 * is hot-removed. All previous assigned MSI/MSI-X vectors, if 1057 * allocated for this device function, are reclaimed to unused state, 1058 * which may be used later on. 1059 **/ 1060void msi_remove_pci_irq_vectors(struct pci_dev* dev) 1061{ 1062 int state, pos, temp; 1063 unsigned long flags; 1064 1065 if (!pci_msi_enable || !dev) 1066 return; 1067 1068 temp = dev->irq; /* Save IOAPIC IRQ */ 1069 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) > 0 && 1070 !msi_lookup_vector(dev, PCI_CAP_ID_MSI)) { 1071 spin_lock_irqsave(&msi_lock, flags); 1072 state = msi_desc[dev->irq]->msi_attrib.state; 1073 spin_unlock_irqrestore(&msi_lock, flags); 1074 if (state) { 1075 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 1076 "called without free_irq() on MSI vector %d\n", 1077 pci_name(dev), dev->irq); 1078 BUG_ON(state > 0); 1079 } else /* Release MSI vector assigned to this device */ 1080 msi_free_vector(dev, dev->irq, 0); 1081 dev->irq = temp; /* Restore IOAPIC IRQ */ 1082 } 1083 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSIX)) > 0 && 1084 !msi_lookup_vector(dev, PCI_CAP_ID_MSIX)) { 1085 int vector, head, tail = 0, warning = 0; 1086 void __iomem *base = NULL; 1087 1088 vector = head = dev->irq; 1089 while (head != tail) { 1090 spin_lock_irqsave(&msi_lock, flags); 1091 state = msi_desc[vector]->msi_attrib.state; 1092 tail = msi_desc[vector]->link.tail; 1093 base = msi_desc[vector]->mask_base; 1094 spin_unlock_irqrestore(&msi_lock, flags); 1095 if (state) 1096 warning = 1; 1097 else if (vector != head) /* Release MSI-X vector */ 1098 msi_free_vector(dev, vector, 0); 1099 vector = tail; 1100 } 1101 msi_free_vector(dev, vector, 0); 1102 if (warning) { 1103 /* Force to release the MSI-X memory-mapped table */ 1104 u32 phys_addr, table_offset; 1105 u16 control; 1106 u8 bir; 1107 1108 pci_read_config_word(dev, msi_control_reg(pos), 1109 &control); 1110 pci_read_config_dword(dev, msix_table_offset_reg(pos), 1111 &table_offset); 1112 bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); 1113 phys_addr = pci_resource_start (dev, bir); 1114 phys_addr += (u32)(table_offset & 1115 ~PCI_MSIX_FLAGS_BIRMASK); 1116 iounmap(base); 1117 printk(KERN_WARNING "PCI: %s: msi_remove_pci_irq_vectors() " 1118 "called without free_irq() on all MSI-X vectors\n", 1119 pci_name(dev)); 1120 BUG_ON(warning > 0); 1121 } 1122 dev->irq = temp; /* Restore IOAPIC IRQ */ 1123 } 1124} 1125 1126EXPORT_SYMBOL(pci_enable_msi); 1127EXPORT_SYMBOL(pci_disable_msi); 1128EXPORT_SYMBOL(pci_enable_msix); 1129EXPORT_SYMBOL(pci_disable_msix);