Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.11-rc2 1536 lines 40 kB view raw
1/* 2 * File: msi.c 3 * Purpose: PCI Message Signaled Interrupt (MSI) 4 * 5 * Copyright (C) 2003-2004 Intel 6 * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) 7 * Copyright (C) 2016 Christoph Hellwig. 8 */ 9 10#include <linux/err.h> 11#include <linux/mm.h> 12#include <linux/irq.h> 13#include <linux/interrupt.h> 14#include <linux/export.h> 15#include <linux/ioport.h> 16#include <linux/pci.h> 17#include <linux/proc_fs.h> 18#include <linux/msi.h> 19#include <linux/smp.h> 20#include <linux/errno.h> 21#include <linux/io.h> 22#include <linux/acpi_iort.h> 23#include <linux/slab.h> 24#include <linux/irqdomain.h> 25#include <linux/of_irq.h> 26 27#include "pci.h" 28 29static int pci_msi_enable = 1; 30int pci_msi_ignore_mask; 31 32#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) 33 34#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 35static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 36{ 37 struct irq_domain *domain; 38 39 domain = dev_get_msi_domain(&dev->dev); 40 if (domain && irq_domain_is_hierarchy(domain)) 41 return msi_domain_alloc_irqs(domain, &dev->dev, nvec); 42 43 return arch_setup_msi_irqs(dev, nvec, type); 44} 45 46static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) 47{ 48 struct irq_domain *domain; 49 50 domain = dev_get_msi_domain(&dev->dev); 51 if (domain && irq_domain_is_hierarchy(domain)) 52 msi_domain_free_irqs(domain, &dev->dev); 53 else 54 arch_teardown_msi_irqs(dev); 55} 56#else 57#define pci_msi_setup_msi_irqs arch_setup_msi_irqs 58#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs 59#endif 60 61/* Arch hooks */ 62 63int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) 64{ 65 struct msi_controller *chip = dev->bus->msi; 66 int err; 67 68 if (!chip || !chip->setup_irq) 69 return -EINVAL; 70 71 err = chip->setup_irq(chip, dev, desc); 72 if (err < 0) 73 return err; 74 75 irq_set_chip_data(desc->irq, chip); 76 77 return 0; 78} 79 80void __weak arch_teardown_msi_irq(unsigned int irq) 81{ 82 struct msi_controller *chip = irq_get_chip_data(irq); 83 84 if (!chip || !chip->teardown_irq) 85 return; 86 87 chip->teardown_irq(chip, irq); 88} 89 90int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 91{ 92 struct msi_controller *chip = dev->bus->msi; 93 struct msi_desc *entry; 94 int ret; 95 96 if (chip && chip->setup_irqs) 97 return chip->setup_irqs(chip, dev, nvec, type); 98 /* 99 * If an architecture wants to support multiple MSI, it needs to 100 * override arch_setup_msi_irqs() 101 */ 102 if (type == PCI_CAP_ID_MSI && nvec > 1) 103 return 1; 104 105 for_each_pci_msi_entry(entry, dev) { 106 ret = arch_setup_msi_irq(dev, entry); 107 if (ret < 0) 108 return ret; 109 if (ret > 0) 110 return -ENOSPC; 111 } 112 113 return 0; 114} 115 116/* 117 * We have a default implementation available as a separate non-weak 118 * function, as it is used by the Xen x86 PCI code 119 */ 120void default_teardown_msi_irqs(struct pci_dev *dev) 121{ 122 int i; 123 struct msi_desc *entry; 124 125 for_each_pci_msi_entry(entry, dev) 126 if (entry->irq) 127 for (i = 0; i < entry->nvec_used; i++) 128 arch_teardown_msi_irq(entry->irq + i); 129} 130 131void __weak arch_teardown_msi_irqs(struct pci_dev *dev) 132{ 133 return default_teardown_msi_irqs(dev); 134} 135 136static void default_restore_msi_irq(struct pci_dev *dev, int irq) 137{ 138 struct msi_desc *entry; 139 140 entry = NULL; 141 if (dev->msix_enabled) { 142 for_each_pci_msi_entry(entry, dev) { 143 if (irq == entry->irq) 144 break; 145 } 146 } else if (dev->msi_enabled) { 147 entry = irq_get_msi_desc(irq); 148 } 149 150 if (entry) 151 __pci_write_msi_msg(entry, &entry->msg); 152} 153 154void __weak arch_restore_msi_irqs(struct pci_dev *dev) 155{ 156 return default_restore_msi_irqs(dev); 157} 158 159static inline __attribute_const__ u32 msi_mask(unsigned x) 160{ 161 /* Don't shift by >= width of type */ 162 if (x >= 5) 163 return 0xffffffff; 164 return (1 << (1 << x)) - 1; 165} 166 167/* 168 * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to 169 * mask all MSI interrupts by clearing the MSI enable bit does not work 170 * reliably as devices without an INTx disable bit will then generate a 171 * level IRQ which will never be cleared. 172 */ 173u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 174{ 175 u32 mask_bits = desc->masked; 176 177 if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) 178 return 0; 179 180 mask_bits &= ~mask; 181 mask_bits |= flag; 182 pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, 183 mask_bits); 184 185 return mask_bits; 186} 187 188static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) 189{ 190 desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); 191} 192 193static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) 194{ 195 return desc->mask_base + 196 desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; 197} 198 199/* 200 * This internal function does not flush PCI writes to the device. 201 * All users must ensure that they read from the device before either 202 * assuming that the device state is up to date, or returning out of this 203 * file. This saves a few milliseconds when initialising devices with lots 204 * of MSI-X interrupts. 205 */ 206u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) 207{ 208 u32 mask_bits = desc->masked; 209 210 if (pci_msi_ignore_mask) 211 return 0; 212 213 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; 214 if (flag) 215 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; 216 writel(mask_bits, pci_msix_desc_addr(desc) + PCI_MSIX_ENTRY_VECTOR_CTRL); 217 218 return mask_bits; 219} 220 221static void msix_mask_irq(struct msi_desc *desc, u32 flag) 222{ 223 desc->masked = __pci_msix_desc_mask_irq(desc, flag); 224} 225 226static void msi_set_mask_bit(struct irq_data *data, u32 flag) 227{ 228 struct msi_desc *desc = irq_data_get_msi_desc(data); 229 230 if (desc->msi_attrib.is_msix) { 231 msix_mask_irq(desc, flag); 232 readl(desc->mask_base); /* Flush write to device */ 233 } else { 234 unsigned offset = data->irq - desc->irq; 235 msi_mask_irq(desc, 1 << offset, flag << offset); 236 } 237} 238 239/** 240 * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts 241 * @data: pointer to irqdata associated to that interrupt 242 */ 243void pci_msi_mask_irq(struct irq_data *data) 244{ 245 msi_set_mask_bit(data, 1); 246} 247EXPORT_SYMBOL_GPL(pci_msi_mask_irq); 248 249/** 250 * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts 251 * @data: pointer to irqdata associated to that interrupt 252 */ 253void pci_msi_unmask_irq(struct irq_data *data) 254{ 255 msi_set_mask_bit(data, 0); 256} 257EXPORT_SYMBOL_GPL(pci_msi_unmask_irq); 258 259void default_restore_msi_irqs(struct pci_dev *dev) 260{ 261 struct msi_desc *entry; 262 263 for_each_pci_msi_entry(entry, dev) 264 default_restore_msi_irq(dev, entry->irq); 265} 266 267void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 268{ 269 struct pci_dev *dev = msi_desc_to_pci_dev(entry); 270 271 BUG_ON(dev->current_state != PCI_D0); 272 273 if (entry->msi_attrib.is_msix) { 274 void __iomem *base = pci_msix_desc_addr(entry); 275 276 msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); 277 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); 278 msg->data = readl(base + PCI_MSIX_ENTRY_DATA); 279 } else { 280 int pos = dev->msi_cap; 281 u16 data; 282 283 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, 284 &msg->address_lo); 285 if (entry->msi_attrib.is_64) { 286 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, 287 &msg->address_hi); 288 pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); 289 } else { 290 msg->address_hi = 0; 291 pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data); 292 } 293 msg->data = data; 294 } 295} 296 297void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 298{ 299 struct pci_dev *dev = msi_desc_to_pci_dev(entry); 300 301 if (dev->current_state != PCI_D0) { 302 /* Don't touch the hardware now */ 303 } else if (entry->msi_attrib.is_msix) { 304 void __iomem *base = pci_msix_desc_addr(entry); 305 306 writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); 307 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); 308 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); 309 } else { 310 int pos = dev->msi_cap; 311 u16 msgctl; 312 313 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); 314 msgctl &= ~PCI_MSI_FLAGS_QSIZE; 315 msgctl |= entry->msi_attrib.multiple << 4; 316 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); 317 318 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, 319 msg->address_lo); 320 if (entry->msi_attrib.is_64) { 321 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, 322 msg->address_hi); 323 pci_write_config_word(dev, pos + PCI_MSI_DATA_64, 324 msg->data); 325 } else { 326 pci_write_config_word(dev, pos + PCI_MSI_DATA_32, 327 msg->data); 328 } 329 } 330 entry->msg = *msg; 331} 332 333void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) 334{ 335 struct msi_desc *entry = irq_get_msi_desc(irq); 336 337 __pci_write_msi_msg(entry, msg); 338} 339EXPORT_SYMBOL_GPL(pci_write_msi_msg); 340 341static void free_msi_irqs(struct pci_dev *dev) 342{ 343 struct list_head *msi_list = dev_to_msi_list(&dev->dev); 344 struct msi_desc *entry, *tmp; 345 struct attribute **msi_attrs; 346 struct device_attribute *dev_attr; 347 int i, count = 0; 348 349 for_each_pci_msi_entry(entry, dev) 350 if (entry->irq) 351 for (i = 0; i < entry->nvec_used; i++) 352 BUG_ON(irq_has_action(entry->irq + i)); 353 354 pci_msi_teardown_msi_irqs(dev); 355 356 list_for_each_entry_safe(entry, tmp, msi_list, list) { 357 if (entry->msi_attrib.is_msix) { 358 if (list_is_last(&entry->list, msi_list)) 359 iounmap(entry->mask_base); 360 } 361 362 list_del(&entry->list); 363 free_msi_entry(entry); 364 } 365 366 if (dev->msi_irq_groups) { 367 sysfs_remove_groups(&dev->dev.kobj, dev->msi_irq_groups); 368 msi_attrs = dev->msi_irq_groups[0]->attrs; 369 while (msi_attrs[count]) { 370 dev_attr = container_of(msi_attrs[count], 371 struct device_attribute, attr); 372 kfree(dev_attr->attr.name); 373 kfree(dev_attr); 374 ++count; 375 } 376 kfree(msi_attrs); 377 kfree(dev->msi_irq_groups[0]); 378 kfree(dev->msi_irq_groups); 379 dev->msi_irq_groups = NULL; 380 } 381} 382 383static void pci_intx_for_msi(struct pci_dev *dev, int enable) 384{ 385 if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) 386 pci_intx(dev, enable); 387} 388 389static void __pci_restore_msi_state(struct pci_dev *dev) 390{ 391 u16 control; 392 struct msi_desc *entry; 393 394 if (!dev->msi_enabled) 395 return; 396 397 entry = irq_get_msi_desc(dev->irq); 398 399 pci_intx_for_msi(dev, 0); 400 pci_msi_set_enable(dev, 0); 401 arch_restore_msi_irqs(dev); 402 403 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 404 msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap), 405 entry->masked); 406 control &= ~PCI_MSI_FLAGS_QSIZE; 407 control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; 408 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 409} 410 411static void __pci_restore_msix_state(struct pci_dev *dev) 412{ 413 struct msi_desc *entry; 414 415 if (!dev->msix_enabled) 416 return; 417 BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); 418 419 /* route the table */ 420 pci_intx_for_msi(dev, 0); 421 pci_msix_clear_and_set_ctrl(dev, 0, 422 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); 423 424 arch_restore_msi_irqs(dev); 425 for_each_pci_msi_entry(entry, dev) 426 msix_mask_irq(entry, entry->masked); 427 428 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 429} 430 431void pci_restore_msi_state(struct pci_dev *dev) 432{ 433 __pci_restore_msi_state(dev); 434 __pci_restore_msix_state(dev); 435} 436EXPORT_SYMBOL_GPL(pci_restore_msi_state); 437 438static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, 439 char *buf) 440{ 441 struct msi_desc *entry; 442 unsigned long irq; 443 int retval; 444 445 retval = kstrtoul(attr->attr.name, 10, &irq); 446 if (retval) 447 return retval; 448 449 entry = irq_get_msi_desc(irq); 450 if (entry) 451 return sprintf(buf, "%s\n", 452 entry->msi_attrib.is_msix ? "msix" : "msi"); 453 454 return -ENODEV; 455} 456 457static int populate_msi_sysfs(struct pci_dev *pdev) 458{ 459 struct attribute **msi_attrs; 460 struct attribute *msi_attr; 461 struct device_attribute *msi_dev_attr; 462 struct attribute_group *msi_irq_group; 463 const struct attribute_group **msi_irq_groups; 464 struct msi_desc *entry; 465 int ret = -ENOMEM; 466 int num_msi = 0; 467 int count = 0; 468 int i; 469 470 /* Determine how many msi entries we have */ 471 for_each_pci_msi_entry(entry, pdev) 472 num_msi += entry->nvec_used; 473 if (!num_msi) 474 return 0; 475 476 /* Dynamically create the MSI attributes for the PCI device */ 477 msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); 478 if (!msi_attrs) 479 return -ENOMEM; 480 for_each_pci_msi_entry(entry, pdev) { 481 for (i = 0; i < entry->nvec_used; i++) { 482 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 483 if (!msi_dev_attr) 484 goto error_attrs; 485 msi_attrs[count] = &msi_dev_attr->attr; 486 487 sysfs_attr_init(&msi_dev_attr->attr); 488 msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", 489 entry->irq + i); 490 if (!msi_dev_attr->attr.name) 491 goto error_attrs; 492 msi_dev_attr->attr.mode = S_IRUGO; 493 msi_dev_attr->show = msi_mode_show; 494 ++count; 495 } 496 } 497 498 msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL); 499 if (!msi_irq_group) 500 goto error_attrs; 501 msi_irq_group->name = "msi_irqs"; 502 msi_irq_group->attrs = msi_attrs; 503 504 msi_irq_groups = kzalloc(sizeof(void *) * 2, GFP_KERNEL); 505 if (!msi_irq_groups) 506 goto error_irq_group; 507 msi_irq_groups[0] = msi_irq_group; 508 509 ret = sysfs_create_groups(&pdev->dev.kobj, msi_irq_groups); 510 if (ret) 511 goto error_irq_groups; 512 pdev->msi_irq_groups = msi_irq_groups; 513 514 return 0; 515 516error_irq_groups: 517 kfree(msi_irq_groups); 518error_irq_group: 519 kfree(msi_irq_group); 520error_attrs: 521 count = 0; 522 msi_attr = msi_attrs[count]; 523 while (msi_attr) { 524 msi_dev_attr = container_of(msi_attr, struct device_attribute, attr); 525 kfree(msi_attr->name); 526 kfree(msi_dev_attr); 527 ++count; 528 msi_attr = msi_attrs[count]; 529 } 530 kfree(msi_attrs); 531 return ret; 532} 533 534static struct msi_desc * 535msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) 536{ 537 struct cpumask *masks = NULL; 538 struct msi_desc *entry; 539 u16 control; 540 541 if (affd) { 542 masks = irq_create_affinity_masks(nvec, affd); 543 if (!masks) 544 pr_err("Unable to allocate affinity masks, ignoring\n"); 545 } 546 547 /* MSI Entry Initialization */ 548 entry = alloc_msi_entry(&dev->dev, nvec, masks); 549 if (!entry) 550 goto out; 551 552 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 553 554 entry->msi_attrib.is_msix = 0; 555 entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); 556 entry->msi_attrib.entry_nr = 0; 557 entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); 558 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 559 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; 560 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); 561 562 if (control & PCI_MSI_FLAGS_64BIT) 563 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; 564 else 565 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; 566 567 /* Save the initial mask status */ 568 if (entry->msi_attrib.maskbit) 569 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); 570 571out: 572 kfree(masks); 573 return entry; 574} 575 576static int msi_verify_entries(struct pci_dev *dev) 577{ 578 struct msi_desc *entry; 579 580 for_each_pci_msi_entry(entry, dev) { 581 if (!dev->no_64bit_msi || !entry->msg.address_hi) 582 continue; 583 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" 584 " tried to assign one above 4G\n"); 585 return -EIO; 586 } 587 return 0; 588} 589 590/** 591 * msi_capability_init - configure device's MSI capability structure 592 * @dev: pointer to the pci_dev data structure of MSI device function 593 * @nvec: number of interrupts to allocate 594 * @affd: description of automatic irq affinity assignments (may be %NULL) 595 * 596 * Setup the MSI capability structure of the device with the requested 597 * number of interrupts. A return value of zero indicates the successful 598 * setup of an entry with the new MSI irq. A negative return value indicates 599 * an error, and a positive return value indicates the number of interrupts 600 * which could have been allocated. 601 */ 602static int msi_capability_init(struct pci_dev *dev, int nvec, 603 const struct irq_affinity *affd) 604{ 605 struct msi_desc *entry; 606 int ret; 607 unsigned mask; 608 609 pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ 610 611 entry = msi_setup_entry(dev, nvec, affd); 612 if (!entry) 613 return -ENOMEM; 614 615 /* All MSIs are unmasked by default, Mask them all */ 616 mask = msi_mask(entry->msi_attrib.multi_cap); 617 msi_mask_irq(entry, mask, mask); 618 619 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 620 621 /* Configure MSI capability structure */ 622 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); 623 if (ret) { 624 msi_mask_irq(entry, mask, ~mask); 625 free_msi_irqs(dev); 626 return ret; 627 } 628 629 ret = msi_verify_entries(dev); 630 if (ret) { 631 msi_mask_irq(entry, mask, ~mask); 632 free_msi_irqs(dev); 633 return ret; 634 } 635 636 ret = populate_msi_sysfs(dev); 637 if (ret) { 638 msi_mask_irq(entry, mask, ~mask); 639 free_msi_irqs(dev); 640 return ret; 641 } 642 643 /* Set MSI enabled bits */ 644 pci_intx_for_msi(dev, 0); 645 pci_msi_set_enable(dev, 1); 646 dev->msi_enabled = 1; 647 648 pcibios_free_irq(dev); 649 dev->irq = entry->irq; 650 return 0; 651} 652 653static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) 654{ 655 resource_size_t phys_addr; 656 u32 table_offset; 657 unsigned long flags; 658 u8 bir; 659 660 pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, 661 &table_offset); 662 bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); 663 flags = pci_resource_flags(dev, bir); 664 if (!flags || (flags & IORESOURCE_UNSET)) 665 return NULL; 666 667 table_offset &= PCI_MSIX_TABLE_OFFSET; 668 phys_addr = pci_resource_start(dev, bir) + table_offset; 669 670 return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); 671} 672 673static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, 674 struct msix_entry *entries, int nvec, 675 const struct irq_affinity *affd) 676{ 677 struct cpumask *curmsk, *masks = NULL; 678 struct msi_desc *entry; 679 int ret, i; 680 681 if (affd) { 682 masks = irq_create_affinity_masks(nvec, affd); 683 if (!masks) 684 pr_err("Unable to allocate affinity masks, ignoring\n"); 685 } 686 687 for (i = 0, curmsk = masks; i < nvec; i++) { 688 entry = alloc_msi_entry(&dev->dev, 1, curmsk); 689 if (!entry) { 690 if (!i) 691 iounmap(base); 692 else 693 free_msi_irqs(dev); 694 /* No enough memory. Don't try again */ 695 ret = -ENOMEM; 696 goto out; 697 } 698 699 entry->msi_attrib.is_msix = 1; 700 entry->msi_attrib.is_64 = 1; 701 if (entries) 702 entry->msi_attrib.entry_nr = entries[i].entry; 703 else 704 entry->msi_attrib.entry_nr = i; 705 entry->msi_attrib.default_irq = dev->irq; 706 entry->mask_base = base; 707 708 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 709 if (masks) 710 curmsk++; 711 } 712 ret = 0; 713out: 714 kfree(masks); 715 return ret; 716} 717 718static void msix_program_entries(struct pci_dev *dev, 719 struct msix_entry *entries) 720{ 721 struct msi_desc *entry; 722 int i = 0; 723 724 for_each_pci_msi_entry(entry, dev) { 725 if (entries) 726 entries[i++].vector = entry->irq; 727 entry->masked = readl(pci_msix_desc_addr(entry) + 728 PCI_MSIX_ENTRY_VECTOR_CTRL); 729 msix_mask_irq(entry, 1); 730 } 731} 732 733/** 734 * msix_capability_init - configure device's MSI-X capability 735 * @dev: pointer to the pci_dev data structure of MSI-X device function 736 * @entries: pointer to an array of struct msix_entry entries 737 * @nvec: number of @entries 738 * @affd: Optional pointer to enable automatic affinity assignement 739 * 740 * Setup the MSI-X capability structure of device function with a 741 * single MSI-X irq. A return of zero indicates the successful setup of 742 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 743 **/ 744static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, 745 int nvec, const struct irq_affinity *affd) 746{ 747 int ret; 748 u16 control; 749 void __iomem *base; 750 751 /* Ensure MSI-X is disabled while it is set up */ 752 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 753 754 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); 755 /* Request & Map MSI-X table region */ 756 base = msix_map_region(dev, msix_table_size(control)); 757 if (!base) 758 return -ENOMEM; 759 760 ret = msix_setup_entries(dev, base, entries, nvec, affd); 761 if (ret) 762 return ret; 763 764 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); 765 if (ret) 766 goto out_avail; 767 768 /* Check if all MSI entries honor device restrictions */ 769 ret = msi_verify_entries(dev); 770 if (ret) 771 goto out_free; 772 773 /* 774 * Some devices require MSI-X to be enabled before we can touch the 775 * MSI-X registers. We need to mask all the vectors to prevent 776 * interrupts coming in before they're fully set up. 777 */ 778 pci_msix_clear_and_set_ctrl(dev, 0, 779 PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); 780 781 msix_program_entries(dev, entries); 782 783 ret = populate_msi_sysfs(dev); 784 if (ret) 785 goto out_free; 786 787 /* Set MSI-X enabled bits and unmask the function */ 788 pci_intx_for_msi(dev, 0); 789 dev->msix_enabled = 1; 790 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); 791 792 pcibios_free_irq(dev); 793 return 0; 794 795out_avail: 796 if (ret < 0) { 797 /* 798 * If we had some success, report the number of irqs 799 * we succeeded in setting up. 800 */ 801 struct msi_desc *entry; 802 int avail = 0; 803 804 for_each_pci_msi_entry(entry, dev) { 805 if (entry->irq != 0) 806 avail++; 807 } 808 if (avail != 0) 809 ret = avail; 810 } 811 812out_free: 813 free_msi_irqs(dev); 814 815 return ret; 816} 817 818/** 819 * pci_msi_supported - check whether MSI may be enabled on a device 820 * @dev: pointer to the pci_dev data structure of MSI device function 821 * @nvec: how many MSIs have been requested ? 822 * 823 * Look at global flags, the device itself, and its parent buses 824 * to determine if MSI/-X are supported for the device. If MSI/-X is 825 * supported return 1, else return 0. 826 **/ 827static int pci_msi_supported(struct pci_dev *dev, int nvec) 828{ 829 struct pci_bus *bus; 830 831 /* MSI must be globally enabled and supported by the device */ 832 if (!pci_msi_enable) 833 return 0; 834 835 if (!dev || dev->no_msi || dev->current_state != PCI_D0) 836 return 0; 837 838 /* 839 * You can't ask to have 0 or less MSIs configured. 840 * a) it's stupid .. 841 * b) the list manipulation code assumes nvec >= 1. 842 */ 843 if (nvec < 1) 844 return 0; 845 846 /* 847 * Any bridge which does NOT route MSI transactions from its 848 * secondary bus to its primary bus must set NO_MSI flag on 849 * the secondary pci_bus. 850 * We expect only arch-specific PCI host bus controller driver 851 * or quirks for specific PCI bridges to be setting NO_MSI. 852 */ 853 for (bus = dev->bus; bus; bus = bus->parent) 854 if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) 855 return 0; 856 857 return 1; 858} 859 860/** 861 * pci_msi_vec_count - Return the number of MSI vectors a device can send 862 * @dev: device to report about 863 * 864 * This function returns the number of MSI vectors a device requested via 865 * Multiple Message Capable register. It returns a negative errno if the 866 * device is not capable sending MSI interrupts. Otherwise, the call succeeds 867 * and returns a power of two, up to a maximum of 2^5 (32), according to the 868 * MSI specification. 869 **/ 870int pci_msi_vec_count(struct pci_dev *dev) 871{ 872 int ret; 873 u16 msgctl; 874 875 if (!dev->msi_cap) 876 return -EINVAL; 877 878 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); 879 ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); 880 881 return ret; 882} 883EXPORT_SYMBOL(pci_msi_vec_count); 884 885void pci_msi_shutdown(struct pci_dev *dev) 886{ 887 struct msi_desc *desc; 888 u32 mask; 889 890 if (!pci_msi_enable || !dev || !dev->msi_enabled) 891 return; 892 893 BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); 894 desc = first_pci_msi_entry(dev); 895 896 pci_msi_set_enable(dev, 0); 897 pci_intx_for_msi(dev, 1); 898 dev->msi_enabled = 0; 899 900 /* Return the device with MSI unmasked as initial states */ 901 mask = msi_mask(desc->msi_attrib.multi_cap); 902 /* Keep cached state to be restored */ 903 __pci_msi_desc_mask_irq(desc, mask, ~mask); 904 905 /* Restore dev->irq to its default pin-assertion irq */ 906 dev->irq = desc->msi_attrib.default_irq; 907 pcibios_alloc_irq(dev); 908} 909 910void pci_disable_msi(struct pci_dev *dev) 911{ 912 if (!pci_msi_enable || !dev || !dev->msi_enabled) 913 return; 914 915 pci_msi_shutdown(dev); 916 free_msi_irqs(dev); 917} 918EXPORT_SYMBOL(pci_disable_msi); 919 920/** 921 * pci_msix_vec_count - return the number of device's MSI-X table entries 922 * @dev: pointer to the pci_dev data structure of MSI-X device function 923 * This function returns the number of device's MSI-X table entries and 924 * therefore the number of MSI-X vectors device is capable of sending. 925 * It returns a negative errno if the device is not capable of sending MSI-X 926 * interrupts. 927 **/ 928int pci_msix_vec_count(struct pci_dev *dev) 929{ 930 u16 control; 931 932 if (!dev->msix_cap) 933 return -EINVAL; 934 935 pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); 936 return msix_table_size(control); 937} 938EXPORT_SYMBOL(pci_msix_vec_count); 939 940static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, 941 int nvec, const struct irq_affinity *affd) 942{ 943 int nr_entries; 944 int i, j; 945 946 if (!pci_msi_supported(dev, nvec)) 947 return -EINVAL; 948 949 nr_entries = pci_msix_vec_count(dev); 950 if (nr_entries < 0) 951 return nr_entries; 952 if (nvec > nr_entries) 953 return nr_entries; 954 955 if (entries) { 956 /* Check for any invalid entries */ 957 for (i = 0; i < nvec; i++) { 958 if (entries[i].entry >= nr_entries) 959 return -EINVAL; /* invalid entry */ 960 for (j = i + 1; j < nvec; j++) { 961 if (entries[i].entry == entries[j].entry) 962 return -EINVAL; /* duplicate entry */ 963 } 964 } 965 } 966 WARN_ON(!!dev->msix_enabled); 967 968 /* Check whether driver already requested for MSI irq */ 969 if (dev->msi_enabled) { 970 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); 971 return -EINVAL; 972 } 973 return msix_capability_init(dev, entries, nvec, affd); 974} 975 976/** 977 * pci_enable_msix - configure device's MSI-X capability structure 978 * @dev: pointer to the pci_dev data structure of MSI-X device function 979 * @entries: pointer to an array of MSI-X entries (optional) 980 * @nvec: number of MSI-X irqs requested for allocation by device driver 981 * 982 * Setup the MSI-X capability structure of device function with the number 983 * of requested irqs upon its software driver call to request for 984 * MSI-X mode enabled on its hardware device function. A return of zero 985 * indicates the successful configuration of MSI-X capability structure 986 * with new allocated MSI-X irqs. A return of < 0 indicates a failure. 987 * Or a return of > 0 indicates that driver request is exceeding the number 988 * of irqs or MSI-X vectors available. Driver should use the returned value to 989 * re-send its request. 990 **/ 991int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) 992{ 993 return __pci_enable_msix(dev, entries, nvec, NULL); 994} 995EXPORT_SYMBOL(pci_enable_msix); 996 997void pci_msix_shutdown(struct pci_dev *dev) 998{ 999 struct msi_desc *entry; 1000 1001 if (!pci_msi_enable || !dev || !dev->msix_enabled) 1002 return; 1003 1004 /* Return the device with MSI-X masked as initial states */ 1005 for_each_pci_msi_entry(entry, dev) { 1006 /* Keep cached states to be restored */ 1007 __pci_msix_desc_mask_irq(entry, 1); 1008 } 1009 1010 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); 1011 pci_intx_for_msi(dev, 1); 1012 dev->msix_enabled = 0; 1013 pcibios_alloc_irq(dev); 1014} 1015 1016void pci_disable_msix(struct pci_dev *dev) 1017{ 1018 if (!pci_msi_enable || !dev || !dev->msix_enabled) 1019 return; 1020 1021 pci_msix_shutdown(dev); 1022 free_msi_irqs(dev); 1023} 1024EXPORT_SYMBOL(pci_disable_msix); 1025 1026void pci_no_msi(void) 1027{ 1028 pci_msi_enable = 0; 1029} 1030 1031/** 1032 * pci_msi_enabled - is MSI enabled? 1033 * 1034 * Returns true if MSI has not been disabled by the command-line option 1035 * pci=nomsi. 1036 **/ 1037int pci_msi_enabled(void) 1038{ 1039 return pci_msi_enable; 1040} 1041EXPORT_SYMBOL(pci_msi_enabled); 1042 1043static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, 1044 const struct irq_affinity *affd) 1045{ 1046 int nvec; 1047 int rc; 1048 1049 if (!pci_msi_supported(dev, minvec)) 1050 return -EINVAL; 1051 1052 WARN_ON(!!dev->msi_enabled); 1053 1054 /* Check whether driver already requested MSI-X irqs */ 1055 if (dev->msix_enabled) { 1056 dev_info(&dev->dev, 1057 "can't enable MSI (MSI-X already enabled)\n"); 1058 return -EINVAL; 1059 } 1060 1061 if (maxvec < minvec) 1062 return -ERANGE; 1063 1064 nvec = pci_msi_vec_count(dev); 1065 if (nvec < 0) 1066 return nvec; 1067 if (nvec < minvec) 1068 return -ENOSPC; 1069 1070 if (nvec > maxvec) 1071 nvec = maxvec; 1072 1073 for (;;) { 1074 if (affd) { 1075 nvec = irq_calc_affinity_vectors(nvec, affd); 1076 if (nvec < minvec) 1077 return -ENOSPC; 1078 } 1079 1080 rc = msi_capability_init(dev, nvec, affd); 1081 if (rc == 0) 1082 return nvec; 1083 1084 if (rc < 0) 1085 return rc; 1086 if (rc < minvec) 1087 return -ENOSPC; 1088 1089 nvec = rc; 1090 } 1091} 1092 1093/* deprecated, don't use */ 1094int pci_enable_msi(struct pci_dev *dev) 1095{ 1096 int rc = __pci_enable_msi_range(dev, 1, 1, NULL); 1097 if (rc < 0) 1098 return rc; 1099 return 0; 1100} 1101EXPORT_SYMBOL(pci_enable_msi); 1102 1103static int __pci_enable_msix_range(struct pci_dev *dev, 1104 struct msix_entry *entries, int minvec, 1105 int maxvec, const struct irq_affinity *affd) 1106{ 1107 int rc, nvec = maxvec; 1108 1109 if (maxvec < minvec) 1110 return -ERANGE; 1111 1112 for (;;) { 1113 if (affd) { 1114 nvec = irq_calc_affinity_vectors(nvec, affd); 1115 if (nvec < minvec) 1116 return -ENOSPC; 1117 } 1118 1119 rc = __pci_enable_msix(dev, entries, nvec, affd); 1120 if (rc == 0) 1121 return nvec; 1122 1123 if (rc < 0) 1124 return rc; 1125 if (rc < minvec) 1126 return -ENOSPC; 1127 1128 nvec = rc; 1129 } 1130} 1131 1132/** 1133 * pci_enable_msix_range - configure device's MSI-X capability structure 1134 * @dev: pointer to the pci_dev data structure of MSI-X device function 1135 * @entries: pointer to an array of MSI-X entries 1136 * @minvec: minimum number of MSI-X irqs requested 1137 * @maxvec: maximum number of MSI-X irqs requested 1138 * 1139 * Setup the MSI-X capability structure of device function with a maximum 1140 * possible number of interrupts in the range between @minvec and @maxvec 1141 * upon its software driver call to request for MSI-X mode enabled on its 1142 * hardware device function. It returns a negative errno if an error occurs. 1143 * If it succeeds, it returns the actual number of interrupts allocated and 1144 * indicates the successful configuration of MSI-X capability structure 1145 * with new allocated MSI-X interrupts. 1146 **/ 1147int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, 1148 int minvec, int maxvec) 1149{ 1150 return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL); 1151} 1152EXPORT_SYMBOL(pci_enable_msix_range); 1153 1154/** 1155 * pci_alloc_irq_vectors_affinity - allocate multiple IRQs for a device 1156 * @dev: PCI device to operate on 1157 * @min_vecs: minimum number of vectors required (must be >= 1) 1158 * @max_vecs: maximum (desired) number of vectors 1159 * @flags: flags or quirks for the allocation 1160 * @affd: optional description of the affinity requirements 1161 * 1162 * Allocate up to @max_vecs interrupt vectors for @dev, using MSI-X or MSI 1163 * vectors if available, and fall back to a single legacy vector 1164 * if neither is available. Return the number of vectors allocated, 1165 * (which might be smaller than @max_vecs) if successful, or a negative 1166 * error code on error. If less than @min_vecs interrupt vectors are 1167 * available for @dev the function will fail with -ENOSPC. 1168 * 1169 * To get the Linux IRQ number used for a vector that can be passed to 1170 * request_irq() use the pci_irq_vector() helper. 1171 */ 1172int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, 1173 unsigned int max_vecs, unsigned int flags, 1174 const struct irq_affinity *affd) 1175{ 1176 static const struct irq_affinity msi_default_affd; 1177 int vecs = -ENOSPC; 1178 1179 if (flags & PCI_IRQ_AFFINITY) { 1180 if (!affd) 1181 affd = &msi_default_affd; 1182 1183 if (affd->pre_vectors + affd->post_vectors > min_vecs) 1184 return -EINVAL; 1185 1186 /* 1187 * If there aren't any vectors left after applying the pre/post 1188 * vectors don't bother with assigning affinity. 1189 */ 1190 if (affd->pre_vectors + affd->post_vectors == min_vecs) 1191 affd = NULL; 1192 } else { 1193 if (WARN_ON(affd)) 1194 affd = NULL; 1195 } 1196 1197 if (flags & PCI_IRQ_MSIX) { 1198 vecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, 1199 affd); 1200 if (vecs > 0) 1201 return vecs; 1202 } 1203 1204 if (flags & PCI_IRQ_MSI) { 1205 vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); 1206 if (vecs > 0) 1207 return vecs; 1208 } 1209 1210 /* use legacy irq if allowed */ 1211 if (flags & PCI_IRQ_LEGACY) { 1212 if (min_vecs == 1 && dev->irq) { 1213 pci_intx(dev, 1); 1214 return 1; 1215 } 1216 } 1217 1218 return vecs; 1219} 1220EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); 1221 1222/** 1223 * pci_free_irq_vectors - free previously allocated IRQs for a device 1224 * @dev: PCI device to operate on 1225 * 1226 * Undoes the allocations and enabling in pci_alloc_irq_vectors(). 1227 */ 1228void pci_free_irq_vectors(struct pci_dev *dev) 1229{ 1230 pci_disable_msix(dev); 1231 pci_disable_msi(dev); 1232} 1233EXPORT_SYMBOL(pci_free_irq_vectors); 1234 1235/** 1236 * pci_irq_vector - return Linux IRQ number of a device vector 1237 * @dev: PCI device to operate on 1238 * @nr: device-relative interrupt vector index (0-based). 1239 */ 1240int pci_irq_vector(struct pci_dev *dev, unsigned int nr) 1241{ 1242 if (dev->msix_enabled) { 1243 struct msi_desc *entry; 1244 int i = 0; 1245 1246 for_each_pci_msi_entry(entry, dev) { 1247 if (i == nr) 1248 return entry->irq; 1249 i++; 1250 } 1251 WARN_ON_ONCE(1); 1252 return -EINVAL; 1253 } 1254 1255 if (dev->msi_enabled) { 1256 struct msi_desc *entry = first_pci_msi_entry(dev); 1257 1258 if (WARN_ON_ONCE(nr >= entry->nvec_used)) 1259 return -EINVAL; 1260 } else { 1261 if (WARN_ON_ONCE(nr > 0)) 1262 return -EINVAL; 1263 } 1264 1265 return dev->irq + nr; 1266} 1267EXPORT_SYMBOL(pci_irq_vector); 1268 1269/** 1270 * pci_irq_get_affinity - return the affinity of a particular msi vector 1271 * @dev: PCI device to operate on 1272 * @nr: device-relative interrupt vector index (0-based). 1273 */ 1274const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) 1275{ 1276 if (dev->msix_enabled) { 1277 struct msi_desc *entry; 1278 int i = 0; 1279 1280 for_each_pci_msi_entry(entry, dev) { 1281 if (i == nr) 1282 return entry->affinity; 1283 i++; 1284 } 1285 WARN_ON_ONCE(1); 1286 return NULL; 1287 } else if (dev->msi_enabled) { 1288 struct msi_desc *entry = first_pci_msi_entry(dev); 1289 1290 if (WARN_ON_ONCE(!entry || !entry->affinity || 1291 nr >= entry->nvec_used)) 1292 return NULL; 1293 1294 return &entry->affinity[nr]; 1295 } else { 1296 return cpu_possible_mask; 1297 } 1298} 1299EXPORT_SYMBOL(pci_irq_get_affinity); 1300 1301/** 1302 * pci_irq_get_node - return the numa node of a particular msi vector 1303 * @pdev: PCI device to operate on 1304 * @vec: device-relative interrupt vector index (0-based). 1305 */ 1306int pci_irq_get_node(struct pci_dev *pdev, int vec) 1307{ 1308 const struct cpumask *mask; 1309 1310 mask = pci_irq_get_affinity(pdev, vec); 1311 if (mask) 1312 return local_memory_node(cpu_to_node(cpumask_first(mask))); 1313 return dev_to_node(&pdev->dev); 1314} 1315EXPORT_SYMBOL(pci_irq_get_node); 1316 1317struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 1318{ 1319 return to_pci_dev(desc->dev); 1320} 1321EXPORT_SYMBOL(msi_desc_to_pci_dev); 1322 1323void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 1324{ 1325 struct pci_dev *dev = msi_desc_to_pci_dev(desc); 1326 1327 return dev->bus->sysdata; 1328} 1329EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); 1330 1331#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 1332/** 1333 * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space 1334 * @irq_data: Pointer to interrupt data of the MSI interrupt 1335 * @msg: Pointer to the message 1336 */ 1337void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) 1338{ 1339 struct msi_desc *desc = irq_data_get_msi_desc(irq_data); 1340 1341 /* 1342 * For MSI-X desc->irq is always equal to irq_data->irq. For 1343 * MSI only the first interrupt of MULTI MSI passes the test. 1344 */ 1345 if (desc->irq == irq_data->irq) 1346 __pci_write_msi_msg(desc, msg); 1347} 1348 1349/** 1350 * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source 1351 * @dev: Pointer to the PCI device 1352 * @desc: Pointer to the msi descriptor 1353 * 1354 * The ID number is only used within the irqdomain. 1355 */ 1356irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, 1357 struct msi_desc *desc) 1358{ 1359 return (irq_hw_number_t)desc->msi_attrib.entry_nr | 1360 PCI_DEVID(dev->bus->number, dev->devfn) << 11 | 1361 (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; 1362} 1363 1364static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) 1365{ 1366 return !desc->msi_attrib.is_msix && desc->nvec_used > 1; 1367} 1368 1369/** 1370 * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev 1371 * @domain: The interrupt domain to check 1372 * @info: The domain info for verification 1373 * @dev: The device to check 1374 * 1375 * Returns: 1376 * 0 if the functionality is supported 1377 * 1 if Multi MSI is requested, but the domain does not support it 1378 * -ENOTSUPP otherwise 1379 */ 1380int pci_msi_domain_check_cap(struct irq_domain *domain, 1381 struct msi_domain_info *info, struct device *dev) 1382{ 1383 struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); 1384 1385 /* Special handling to support __pci_enable_msi_range() */ 1386 if (pci_msi_desc_is_multi_msi(desc) && 1387 !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) 1388 return 1; 1389 else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) 1390 return -ENOTSUPP; 1391 1392 return 0; 1393} 1394 1395static int pci_msi_domain_handle_error(struct irq_domain *domain, 1396 struct msi_desc *desc, int error) 1397{ 1398 /* Special handling to support __pci_enable_msi_range() */ 1399 if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) 1400 return 1; 1401 1402 return error; 1403} 1404 1405#ifdef GENERIC_MSI_DOMAIN_OPS 1406static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, 1407 struct msi_desc *desc) 1408{ 1409 arg->desc = desc; 1410 arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), 1411 desc); 1412} 1413#else 1414#define pci_msi_domain_set_desc NULL 1415#endif 1416 1417static struct msi_domain_ops pci_msi_domain_ops_default = { 1418 .set_desc = pci_msi_domain_set_desc, 1419 .msi_check = pci_msi_domain_check_cap, 1420 .handle_error = pci_msi_domain_handle_error, 1421}; 1422 1423static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) 1424{ 1425 struct msi_domain_ops *ops = info->ops; 1426 1427 if (ops == NULL) { 1428 info->ops = &pci_msi_domain_ops_default; 1429 } else { 1430 if (ops->set_desc == NULL) 1431 ops->set_desc = pci_msi_domain_set_desc; 1432 if (ops->msi_check == NULL) 1433 ops->msi_check = pci_msi_domain_check_cap; 1434 if (ops->handle_error == NULL) 1435 ops->handle_error = pci_msi_domain_handle_error; 1436 } 1437} 1438 1439static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) 1440{ 1441 struct irq_chip *chip = info->chip; 1442 1443 BUG_ON(!chip); 1444 if (!chip->irq_write_msi_msg) 1445 chip->irq_write_msi_msg = pci_msi_domain_write_msg; 1446 if (!chip->irq_mask) 1447 chip->irq_mask = pci_msi_mask_irq; 1448 if (!chip->irq_unmask) 1449 chip->irq_unmask = pci_msi_unmask_irq; 1450} 1451 1452/** 1453 * pci_msi_create_irq_domain - Create a MSI interrupt domain 1454 * @fwnode: Optional fwnode of the interrupt controller 1455 * @info: MSI domain info 1456 * @parent: Parent irq domain 1457 * 1458 * Updates the domain and chip ops and creates a MSI interrupt domain. 1459 * 1460 * Returns: 1461 * A domain pointer or NULL in case of failure. 1462 */ 1463struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, 1464 struct msi_domain_info *info, 1465 struct irq_domain *parent) 1466{ 1467 struct irq_domain *domain; 1468 1469 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 1470 pci_msi_domain_update_dom_ops(info); 1471 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 1472 pci_msi_domain_update_chip_ops(info); 1473 1474 info->flags |= MSI_FLAG_ACTIVATE_EARLY; 1475 1476 domain = msi_create_irq_domain(fwnode, info, parent); 1477 if (!domain) 1478 return NULL; 1479 1480 domain->bus_token = DOMAIN_BUS_PCI_MSI; 1481 return domain; 1482} 1483EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); 1484 1485static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) 1486{ 1487 u32 *pa = data; 1488 1489 *pa = alias; 1490 return 0; 1491} 1492/** 1493 * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) 1494 * @domain: The interrupt domain 1495 * @pdev: The PCI device. 1496 * 1497 * The RID for a device is formed from the alias, with a firmware 1498 * supplied mapping applied 1499 * 1500 * Returns: The RID. 1501 */ 1502u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) 1503{ 1504 struct device_node *of_node; 1505 u32 rid = 0; 1506 1507 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); 1508 1509 of_node = irq_domain_get_of_node(domain); 1510 rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) : 1511 iort_msi_map_rid(&pdev->dev, rid); 1512 1513 return rid; 1514} 1515 1516/** 1517 * pci_msi_get_device_domain - Get the MSI domain for a given PCI device 1518 * @pdev: The PCI device 1519 * 1520 * Use the firmware data to find a device-specific MSI domain 1521 * (i.e. not one that is ste as a default). 1522 * 1523 * Returns: The coresponding MSI domain or NULL if none has been found. 1524 */ 1525struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) 1526{ 1527 struct irq_domain *dom; 1528 u32 rid = 0; 1529 1530 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); 1531 dom = of_msi_map_get_device_domain(&pdev->dev, rid); 1532 if (!dom) 1533 dom = iort_get_device_domain(&pdev->dev, rid); 1534 return dom; 1535} 1536#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */