Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

s390/pci: Migrate s390 IRQ logic to IRQ domain API

s390 is one of the last architectures using the legacy API for setup and
teardown of PCI MSI IRQs. Migrate the s390 IRQ allocation and teardown
to the MSI parent domain API. For details, see:

https://lore.kernel.org/lkml/20221111120501.026511281@linutronix.de

In detail, create an MSI parent domain for each PCI domain. When a PCI
device sets up MSI or MSI-X IRQs, the library creates a per-device IRQ
domain for this device, which is used by the device for allocating and
freeing IRQs.

The per-device domain delegates this allocation and freeing to the
parent-domain. In the end, the corresponding callbacks of the parent
domain are responsible for allocating and freeing the IRQs.

The allocation is split into two parts:
- zpci_msi_prepare() is called once for each device and allocates the
required resources. On s390, each PCI function has its own airq
vector and a summary bit, which must be configured once per function.
This is done in prepare().
- zpci_msi_alloc() can be called multiple times for allocating one or
more MSI/MSI-X IRQs. This creates a mapping between the virtual IRQ
number in the kernel and the hardware IRQ number.

Freeing is split into two counterparts:
- zpci_msi_free() reverts the effects of zpci_msi_alloc() and
- zpci_msi_teardown() reverts the effects of zpci_msi_prepare(). This is
called once when all IRQs are freed before a device is removed.

Since the parent domain in the end allocates the IRQs, the hwirq
encoding must be unambiguous for all IRQs of all devices. This is
achieved by encoding the hwirq using the devfn and the MSI index.

Reviewed-by: Niklas Schnelle <schnelle@linux.ibm.com>
Reviewed-by: Farhan Ali <alifm@linux.ibm.com>
Signed-off-by: Tobias Schumacher <ts@linux.ibm.com>
Reviewed-by: Gerd Bayer <gbayer@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

authored by

Tobias Schumacher and committed by
Heiko Carstens
f770950a 455a6526

+277 -145
+1
arch/s390/Kconfig
··· 255 255 select HOTPLUG_SMT 256 256 select IOMMU_HELPER if PCI 257 257 select IOMMU_SUPPORT if PCI 258 + select IRQ_MSI_LIB if PCI 258 259 select KASAN_VMALLOC if KASAN 259 260 select LOCK_MM_AND_FIND_VMA 260 261 select MMU_GATHER_MERGE_VMAS
+5
arch/s390/include/asm/pci.h
··· 5 5 #include <linux/pci.h> 6 6 #include <linux/mutex.h> 7 7 #include <linux/iommu.h> 8 + #include <linux/irqdomain.h> 8 9 #include <linux/pci_hotplug.h> 9 10 #include <asm/pci_clp.h> 10 11 #include <asm/pci_debug.h> ··· 110 109 struct list_head resources; 111 110 struct list_head bus_next; 112 111 struct resource bus_resource; 112 + struct irq_domain *msi_parent_domain; 113 113 int topo; /* TID if topo_is_tid, PCHID otherwise */ 114 114 int domain_nr; 115 115 u8 multifunction : 1; ··· 312 310 /* IRQ */ 313 311 int __init zpci_irq_init(void); 314 312 void __init zpci_irq_exit(void); 313 + int zpci_set_irq(struct zpci_dev *zdev); 314 + int zpci_create_parent_msi_domain(struct zpci_bus *zbus); 315 + void zpci_remove_parent_msi_domain(struct zpci_bus *zbus); 315 316 316 317 /* FMB */ 317 318 int zpci_fmb_enable_device(struct zpci_dev *);
+6
arch/s390/pci/pci.c
··· 708 708 if (rc) 709 709 return rc; 710 710 711 + if (zdev->msi_nr_irqs > 0) { 712 + rc = zpci_set_irq(zdev); 713 + if (rc) 714 + return rc; 715 + } 716 + 711 717 rc = zpci_iommu_register_ioat(zdev, &status); 712 718 if (rc) 713 719 zpci_disable_device(zdev);
+14 -4
arch/s390/pci/pci_bus.c
··· 14 14 #include <linux/err.h> 15 15 #include <linux/delay.h> 16 16 #include <linux/seq_file.h> 17 + #include <linux/irqdomain.h> 17 18 #include <linux/jump_label.h> 18 19 #include <linux/pci.h> 19 20 #include <linux/printk.h> ··· 199 198 zbus->multifunction = zpci_bus_is_multifunction_root(fr); 200 199 zbus->max_bus_speed = fr->max_bus_speed; 201 200 201 + if (zpci_create_parent_msi_domain(zbus)) 202 + goto out_free_domain; 203 + 202 204 /* 203 205 * Note that the zbus->resources are taken over and zbus->resources 204 206 * is empty after a successful call 205 207 */ 206 208 bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources); 207 - if (!bus) { 208 - zpci_free_domain(zbus->domain_nr); 209 - return -EFAULT; 210 - } 209 + if (!bus) 210 + goto out_remove_msi_domain; 211 211 212 212 zbus->bus = bus; 213 + dev_set_msi_domain(&zbus->bus->dev, zbus->msi_parent_domain); 213 214 214 215 return 0; 216 + 217 + out_remove_msi_domain: 218 + zpci_remove_parent_msi_domain(zbus); 219 + out_free_domain: 220 + zpci_free_domain(zbus->domain_nr); 221 + return -ENOMEM; 215 222 } 216 223 217 224 static void zpci_bus_release(struct kref *kref) ··· 240 231 mutex_lock(&zbus_list_lock); 241 232 list_del(&zbus->bus_next); 242 233 mutex_unlock(&zbus_list_lock); 234 + zpci_remove_parent_msi_domain(zbus); 243 235 kfree(zbus); 244 236 } 245 237
+251 -141
arch/s390/pci/pci_irq.c
··· 6 6 #include <linux/kernel_stat.h> 7 7 #include <linux/pci.h> 8 8 #include <linux/msi.h> 9 + #include <linux/irqchip/irq-msi-lib.h> 9 10 #include <linux/smp.h> 10 11 11 12 #include <asm/isc.h> ··· 98 97 } 99 98 100 99 /* Register adapter interruptions */ 101 - static int zpci_set_irq(struct zpci_dev *zdev) 100 + int zpci_set_irq(struct zpci_dev *zdev) 102 101 { 103 102 int rc; 104 103 ··· 126 125 static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest, 127 126 bool force) 128 127 { 129 - struct msi_desc *entry = irq_data_get_msi_desc(data); 130 - struct msi_msg msg = entry->msg; 131 - int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest)); 132 - 133 - msg.address_lo &= 0xff0000ff; 134 - msg.address_lo |= (cpu_addr << 8); 135 - pci_write_msi_msg(data->irq, &msg); 136 - 128 + irq_data_update_affinity(data, dest); 137 129 return IRQ_SET_MASK_OK; 130 + } 131 + 132 + /* 133 + * Encode the hwirq number for the parent domain. The encoding must be unique 134 + * for each IRQ of each device in the parent domain, so it uses the devfn to 135 + * identify the device and the msi_index to identify the IRQ within that device. 136 + */ 137 + static inline u32 zpci_encode_hwirq(u8 devfn, u16 msi_index) 138 + { 139 + return (devfn << 16) | msi_index; 140 + } 141 + 142 + static inline u16 zpci_decode_hwirq_msi_index(irq_hw_number_t hwirq) 143 + { 144 + return hwirq & 0xffff; 145 + } 146 + 147 + static void zpci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 148 + { 149 + struct msi_desc *desc = irq_data_get_msi_desc(data); 150 + struct zpci_dev *zdev = to_zpci_dev(desc->dev); 151 + 152 + if (irq_delivery == DIRECTED) { 153 + int cpu = cpumask_first(irq_data_get_affinity_mask(data)); 154 + 155 + msg->address_lo = zdev->msi_addr & 0xff0000ff; 156 + msg->address_lo |= (smp_cpu_get_cpu_address(cpu) << 8); 157 + } else { 158 + msg->address_lo = zdev->msi_addr & 0xffffffff; 159 + } 160 + msg->address_hi = zdev->msi_addr >> 32; 161 + msg->data = zpci_decode_hwirq_msi_index(data->hwirq); 138 162 } 139 163 140 164 static struct irq_chip zpci_irq_chip = { 141 165 .name = "PCI-MSI", 142 - .irq_unmask = pci_msi_unmask_irq, 143 - .irq_mask = pci_msi_mask_irq, 166 + .irq_compose_msi_msg = zpci_compose_msi_msg, 144 167 }; 145 168 146 169 static void zpci_handle_cpu_local_irq(bool rescan) 147 170 { 148 171 struct airq_iv *dibv = zpci_ibv[smp_processor_id()]; 149 172 union zpci_sic_iib iib = {{0}}; 173 + struct irq_domain *msi_domain; 174 + irq_hw_number_t hwirq; 150 175 unsigned long bit; 151 176 int irqs_on = 0; 152 177 ··· 190 163 continue; 191 164 } 192 165 inc_irq_stat(IRQIO_MSI); 193 - generic_handle_irq(airq_iv_get_data(dibv, bit)); 166 + hwirq = airq_iv_get_data(dibv, bit); 167 + msi_domain = (struct irq_domain *)airq_iv_get_ptr(dibv, bit); 168 + generic_handle_domain_irq(msi_domain, hwirq); 194 169 } 195 170 } 196 171 ··· 257 228 struct tpi_info *tpi_info) 258 229 { 259 230 union zpci_sic_iib iib = {{0}}; 231 + struct irq_domain *msi_domain; 232 + irq_hw_number_t hwirq; 260 233 unsigned long si, ai; 261 234 struct airq_iv *aibv; 262 235 int irqs_on = 0; ··· 286 255 break; 287 256 inc_irq_stat(IRQIO_MSI); 288 257 airq_iv_lock(aibv, ai); 289 - generic_handle_irq(airq_iv_get_data(aibv, ai)); 258 + hwirq = airq_iv_get_data(aibv, ai); 259 + msi_domain = (struct irq_domain *)airq_iv_get_ptr(aibv, ai); 260 + generic_handle_domain_irq(msi_domain, hwirq); 290 261 airq_iv_unlock(aibv, ai); 291 262 } 292 263 } ··· 310 277 zdev->aisb = *bit; 311 278 312 279 /* Create adapter interrupt vector */ 313 - zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL); 280 + zdev->aibv = airq_iv_create(msi_vecs, 281 + AIRQ_IV_PTR | AIRQ_IV_DATA | AIRQ_IV_BITLOCK, 282 + NULL); 314 283 if (!zdev->aibv) 315 284 return -ENOMEM; 316 285 ··· 322 287 *bit = 0; 323 288 } 324 289 return 0; 325 - } 326 - 327 - int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 328 - { 329 - unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu; 330 - struct zpci_dev *zdev = to_zpci(pdev); 331 - struct msi_desc *msi; 332 - struct msi_msg msg; 333 - unsigned long bit; 334 - int cpu_addr; 335 - int rc, irq; 336 - 337 - zdev->aisb = -1UL; 338 - zdev->msi_first_bit = -1U; 339 - 340 - msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); 341 - if (msi_vecs < nvec) { 342 - pr_info("%s requested %d irqs, allocate system limit of %d", 343 - pci_name(pdev), nvec, zdev->max_msi); 344 - } 345 - 346 - rc = __alloc_airq(zdev, msi_vecs, &bit); 347 - if (rc < 0) 348 - return rc; 349 - 350 - /* 351 - * Request MSI interrupts: 352 - * When using MSI, nvec_used interrupt sources and their irq 353 - * descriptors are controlled through one msi descriptor. 354 - * Thus the outer loop over msi descriptors shall run only once, 355 - * while two inner loops iterate over the interrupt vectors. 356 - * When using MSI-X, each interrupt vector/irq descriptor 357 - * is bound to exactly one msi descriptor (nvec_used is one). 358 - * So the inner loops are executed once, while the outer iterates 359 - * over the MSI-X descriptors. 360 - */ 361 - hwirq = bit; 362 - msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) { 363 - if (hwirq - bit >= msi_vecs) 364 - break; 365 - irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used); 366 - irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE, 367 - (irq_delivery == DIRECTED) ? 368 - msi->affinity : NULL); 369 - if (irq < 0) 370 - return -ENOMEM; 371 - 372 - for (i = 0; i < irqs_per_msi; i++) { 373 - rc = irq_set_msi_desc_off(irq, i, msi); 374 - if (rc) 375 - return rc; 376 - irq_set_chip_and_handler(irq + i, &zpci_irq_chip, 377 - handle_percpu_irq); 378 - } 379 - 380 - msg.data = hwirq - bit; 381 - if (irq_delivery == DIRECTED) { 382 - if (msi->affinity) 383 - cpu = cpumask_first(&msi->affinity->mask); 384 - else 385 - cpu = 0; 386 - cpu_addr = smp_cpu_get_cpu_address(cpu); 387 - 388 - msg.address_lo = zdev->msi_addr & 0xff0000ff; 389 - msg.address_lo |= (cpu_addr << 8); 390 - 391 - for_each_possible_cpu(cpu) { 392 - for (i = 0; i < irqs_per_msi; i++) 393 - airq_iv_set_data(zpci_ibv[cpu], 394 - hwirq + i, irq + i); 395 - } 396 - } else { 397 - msg.address_lo = zdev->msi_addr & 0xffffffff; 398 - for (i = 0; i < irqs_per_msi; i++) 399 - airq_iv_set_data(zdev->aibv, hwirq + i, irq + i); 400 - } 401 - msg.address_hi = zdev->msi_addr >> 32; 402 - pci_write_msi_msg(irq, &msg); 403 - hwirq += irqs_per_msi; 404 - } 405 - 406 - zdev->msi_first_bit = bit; 407 - zdev->msi_nr_irqs = hwirq - bit; 408 - 409 - rc = zpci_set_irq(zdev); 410 - if (rc) 411 - return rc; 412 - 413 - return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs; 414 - } 415 - 416 - void arch_teardown_msi_irqs(struct pci_dev *pdev) 417 - { 418 - struct zpci_dev *zdev = to_zpci(pdev); 419 - struct msi_desc *msi; 420 - unsigned int i; 421 - int rc; 422 - 423 - /* Disable interrupts */ 424 - rc = zpci_clear_irq(zdev); 425 - if (rc) 426 - return; 427 - 428 - /* Release MSI interrupts */ 429 - msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) { 430 - for (i = 0; i < msi->nvec_used; i++) { 431 - irq_set_msi_desc(msi->irq + i, NULL); 432 - irq_free_desc(msi->irq + i); 433 - } 434 - msi->msg.address_lo = 0; 435 - msi->msg.address_hi = 0; 436 - msi->msg.data = 0; 437 - msi->irq = 0; 438 - } 439 - 440 - if (zdev->aisb != -1UL) { 441 - zpci_ibv[zdev->aisb] = NULL; 442 - airq_iv_free_bit(zpci_sbv, zdev->aisb); 443 - zdev->aisb = -1UL; 444 - } 445 - if (zdev->aibv) { 446 - airq_iv_release(zdev->aibv); 447 - zdev->aibv = NULL; 448 - } 449 - 450 - if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U) 451 - airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs); 452 290 } 453 291 454 292 bool arch_restore_msi_irqs(struct pci_dev *pdev) ··· 336 428 .handler = zpci_floating_irq_handler, 337 429 .isc = PCI_ISC, 338 430 }; 431 + 432 + static void zpci_msi_teardown_directed(struct zpci_dev *zdev) 433 + { 434 + airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->max_msi); 435 + zdev->msi_first_bit = -1U; 436 + zdev->msi_nr_irqs = 0; 437 + } 438 + 439 + static void zpci_msi_teardown_floating(struct zpci_dev *zdev) 440 + { 441 + airq_iv_release(zdev->aibv); 442 + zdev->aibv = NULL; 443 + airq_iv_free_bit(zpci_sbv, zdev->aisb); 444 + zdev->aisb = -1UL; 445 + zdev->msi_first_bit = -1U; 446 + zdev->msi_nr_irqs = 0; 447 + } 448 + 449 + static void zpci_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *arg) 450 + { 451 + struct zpci_dev *zdev = to_zpci_dev(domain->dev); 452 + 453 + zpci_clear_irq(zdev); 454 + if (irq_delivery == DIRECTED) 455 + zpci_msi_teardown_directed(zdev); 456 + else 457 + zpci_msi_teardown_floating(zdev); 458 + } 459 + 460 + static int zpci_msi_prepare(struct irq_domain *domain, 461 + struct device *dev, int nvec, 462 + msi_alloc_info_t *info) 463 + { 464 + struct zpci_dev *zdev = to_zpci_dev(dev); 465 + struct pci_dev *pdev = to_pci_dev(dev); 466 + unsigned long bit; 467 + int msi_vecs, rc; 468 + 469 + msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); 470 + if (msi_vecs < nvec) { 471 + pr_info("%s requested %d IRQs, allocate system limit of %d\n", 472 + pci_name(pdev), nvec, zdev->max_msi); 473 + } 474 + 475 + rc = __alloc_airq(zdev, msi_vecs, &bit); 476 + if (rc) { 477 + pr_err("Allocating adapter IRQs for %s failed\n", pci_name(pdev)); 478 + return rc; 479 + } 480 + 481 + zdev->msi_first_bit = bit; 482 + zdev->msi_nr_irqs = msi_vecs; 483 + rc = zpci_set_irq(zdev); 484 + if (rc) { 485 + pr_err("Registering adapter IRQs for %s failed\n", 486 + pci_name(pdev)); 487 + 488 + if (irq_delivery == DIRECTED) 489 + zpci_msi_teardown_directed(zdev); 490 + else 491 + zpci_msi_teardown_floating(zdev); 492 + return rc; 493 + } 494 + return 0; 495 + } 496 + 497 + static int zpci_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, 498 + unsigned int nr_irqs, void *args) 499 + { 500 + struct msi_desc *desc = ((msi_alloc_info_t *)args)->desc; 501 + struct zpci_dev *zdev = to_zpci_dev(desc->dev); 502 + struct zpci_bus *zbus = zdev->zbus; 503 + unsigned int cpu, hwirq; 504 + unsigned long bit; 505 + int i; 506 + 507 + bit = zdev->msi_first_bit + desc->msi_index; 508 + hwirq = zpci_encode_hwirq(zdev->devfn, desc->msi_index); 509 + 510 + if (desc->msi_index + nr_irqs > zdev->max_msi) 511 + return -EINVAL; 512 + 513 + for (i = 0; i < nr_irqs; i++) { 514 + irq_domain_set_info(domain, virq + i, hwirq + i, 515 + &zpci_irq_chip, zdev, 516 + handle_percpu_irq, NULL, NULL); 517 + 518 + if (irq_delivery == DIRECTED) { 519 + for_each_possible_cpu(cpu) { 520 + airq_iv_set_ptr(zpci_ibv[cpu], bit + i, 521 + (unsigned long)zbus->msi_parent_domain); 522 + airq_iv_set_data(zpci_ibv[cpu], bit + i, hwirq + i); 523 + } 524 + } else { 525 + airq_iv_set_ptr(zdev->aibv, bit + i, 526 + (unsigned long)zbus->msi_parent_domain); 527 + airq_iv_set_data(zdev->aibv, bit + i, hwirq + i); 528 + } 529 + } 530 + 531 + return 0; 532 + } 533 + 534 + static void zpci_msi_clear_airq(struct irq_data *d, int i) 535 + { 536 + struct msi_desc *desc = irq_data_get_msi_desc(d); 537 + struct zpci_dev *zdev = to_zpci_dev(desc->dev); 538 + unsigned long bit; 539 + unsigned int cpu; 540 + u16 msi_index; 541 + 542 + msi_index = zpci_decode_hwirq_msi_index(d->hwirq); 543 + bit = zdev->msi_first_bit + msi_index; 544 + 545 + if (irq_delivery == DIRECTED) { 546 + for_each_possible_cpu(cpu) { 547 + airq_iv_set_ptr(zpci_ibv[cpu], bit + i, 0); 548 + airq_iv_set_data(zpci_ibv[cpu], bit + i, 0); 549 + } 550 + } else { 551 + airq_iv_set_ptr(zdev->aibv, bit + i, 0); 552 + airq_iv_set_data(zdev->aibv, bit + i, 0); 553 + } 554 + } 555 + 556 + static void zpci_msi_domain_free(struct irq_domain *domain, unsigned int virq, 557 + unsigned int nr_irqs) 558 + { 559 + struct irq_data *d; 560 + int i; 561 + 562 + for (i = 0; i < nr_irqs; i++) { 563 + d = irq_domain_get_irq_data(domain, virq + i); 564 + zpci_msi_clear_airq(d, i); 565 + irq_domain_reset_irq_data(d); 566 + } 567 + } 568 + 569 + static const struct irq_domain_ops zpci_msi_domain_ops = { 570 + .alloc = zpci_msi_domain_alloc, 571 + .free = zpci_msi_domain_free, 572 + }; 573 + 574 + static bool zpci_init_dev_msi_info(struct device *dev, struct irq_domain *domain, 575 + struct irq_domain *real_parent, 576 + struct msi_domain_info *info) 577 + { 578 + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) 579 + return false; 580 + 581 + info->ops->msi_prepare = zpci_msi_prepare; 582 + info->ops->msi_teardown = zpci_msi_teardown; 583 + 584 + return true; 585 + } 586 + 587 + static struct msi_parent_ops zpci_msi_parent_ops = { 588 + .supported_flags = MSI_GENERIC_FLAGS_MASK | 589 + MSI_FLAG_PCI_MSIX | 590 + MSI_FLAG_MULTI_PCI_MSI, 591 + .required_flags = MSI_FLAG_USE_DEF_DOM_OPS | 592 + MSI_FLAG_USE_DEF_CHIP_OPS, 593 + .init_dev_msi_info = zpci_init_dev_msi_info, 594 + }; 595 + 596 + int zpci_create_parent_msi_domain(struct zpci_bus *zbus) 597 + { 598 + char fwnode_name[18]; 599 + 600 + snprintf(fwnode_name, sizeof(fwnode_name), "ZPCI_MSI_DOM_%04x", zbus->domain_nr); 601 + struct irq_domain_info info = { 602 + .fwnode = irq_domain_alloc_named_fwnode(fwnode_name), 603 + .ops = &zpci_msi_domain_ops, 604 + }; 605 + 606 + if (!info.fwnode) { 607 + pr_err("Failed to allocate fwnode for MSI IRQ domain\n"); 608 + return -ENOMEM; 609 + } 610 + 611 + if (irq_delivery == FLOATING) 612 + zpci_msi_parent_ops.required_flags |= MSI_FLAG_NO_AFFINITY; 613 + 614 + zbus->msi_parent_domain = msi_create_parent_irq_domain(&info, &zpci_msi_parent_ops); 615 + if (!zbus->msi_parent_domain) { 616 + irq_domain_free_fwnode(info.fwnode); 617 + pr_err("Failed to create MSI IRQ domain\n"); 618 + return -ENOMEM; 619 + } 620 + 621 + return 0; 622 + } 623 + 624 + void zpci_remove_parent_msi_domain(struct zpci_bus *zbus) 625 + { 626 + struct fwnode_handle *fn; 627 + 628 + fn = zbus->msi_parent_domain->fwnode; 629 + irq_domain_remove(zbus->msi_parent_domain); 630 + irq_domain_free_fwnode(fn); 631 + } 339 632 340 633 static void __init cpu_enable_directed_irq(void *unused) 341 634 { ··· 574 465 * is only done on the first vector. 575 466 */ 576 467 zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE, 468 + AIRQ_IV_PTR | 577 469 AIRQ_IV_DATA | 578 470 AIRQ_IV_CACHELINE | 579 471 (!cpu ? AIRQ_IV_ALLOC : 0), NULL);