Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
"A set of fixes mostly for the ARM/GIC world:

- Fix the MSI affinity handling in the ls-scfg irq chip driver so it
updates and uses the effective affinity mask correctly

- Prevent binding LPIs to offline CPUs and respect the Cavium erratum
which requires that LPIs which belong to an offline NUMA node are
not bound to a CPU on a different NUMA node.

- Free only the amount of allocated interrupts in the GIC-V2M driver
instead of trying to free log2(nrirqs).

- Prevent emitting SYNC and VSYNC targetting non existing interrupt
collections in the GIC-V3 ITS driver

- Ensure that the GIV-V3 interrupt redistributor is correctly
reprogrammed on CPU hotplug

- Remove a stale unused helper function"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irqdesc: Delete irq_desc_get_msi_desc()
irqchip/gic-v3-its: Fix reprogramming of redistributors on CPU hotplug
irqchip/gic-v3-its: Only emit VSYNC if targetting a valid collection
irqchip/gic-v3-its: Only emit SYNC if targetting a valid collection
irqchip/gic-v3-its: Don't bind LPI to unavailable NUMA node
irqchip/gic-v2m: Fix SPI release on error path
irqchip/ls-scfg-msi: Fix MSI affinity handling
genirq/debugfs: Add missing IRQCHIP_SUPPORTS_LEVEL_MSI debug

+60 -21
+1 -1
drivers/irqchip/irq-gic-v2m.c
··· 199 199 200 200 fail: 201 201 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 202 - gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); 202 + gicv2m_unalloc_msi(v2m, hwirq, nr_irqs); 203 203 return err; 204 204 } 205 205
+50 -12
drivers/irqchip/irq-gic-v3-its.c
··· 182 182 return its->collections + its_dev->event_map.col_map[event]; 183 183 } 184 184 185 + static struct its_collection *valid_col(struct its_collection *col) 186 + { 187 + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) 188 + return NULL; 189 + 190 + return col; 191 + } 192 + 193 + static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) 194 + { 195 + if (valid_col(its->collections + vpe->col_idx)) 196 + return vpe; 197 + 198 + return NULL; 199 + } 200 + 185 201 /* 186 202 * ITS command descriptors - parameters to be encoded in a command 187 203 * block. ··· 455 439 456 440 its_fixup_cmd(cmd); 457 441 458 - return col; 442 + return valid_col(col); 459 443 } 460 444 461 445 static struct its_collection *its_build_movi_cmd(struct its_node *its, ··· 474 458 475 459 its_fixup_cmd(cmd); 476 460 477 - return col; 461 + return valid_col(col); 478 462 } 479 463 480 464 static struct its_collection *its_build_discard_cmd(struct its_node *its, ··· 492 476 493 477 its_fixup_cmd(cmd); 494 478 495 - return col; 479 + return valid_col(col); 496 480 } 497 481 498 482 static struct its_collection *its_build_inv_cmd(struct its_node *its, ··· 510 494 511 495 its_fixup_cmd(cmd); 512 496 513 - return col; 497 + return valid_col(col); 514 498 } 515 499 516 500 static struct its_collection *its_build_int_cmd(struct its_node *its, ··· 528 512 529 513 its_fixup_cmd(cmd); 530 514 531 - return col; 515 + return valid_col(col); 532 516 } 533 517 534 518 static struct its_collection *its_build_clear_cmd(struct its_node *its, ··· 546 530 547 531 its_fixup_cmd(cmd); 548 532 549 - return col; 533 + return valid_col(col); 550 534 } 551 535 552 536 static struct its_collection *its_build_invall_cmd(struct its_node *its, ··· 570 554 571 555 its_fixup_cmd(cmd); 572 556 573 - return desc->its_vinvall_cmd.vpe; 557 + return valid_vpe(its, desc->its_vinvall_cmd.vpe); 574 558 } 575 559 576 560 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, ··· 592 576 593 577 its_fixup_cmd(cmd); 594 578 595 - return desc->its_vmapp_cmd.vpe; 579 + return valid_vpe(its, desc->its_vmapp_cmd.vpe); 596 580 } 597 581 598 582 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, ··· 615 599 616 600 its_fixup_cmd(cmd); 617 601 618 - return desc->its_vmapti_cmd.vpe; 602 + return valid_vpe(its, desc->its_vmapti_cmd.vpe); 619 603 } 620 604 621 605 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, ··· 638 622 639 623 its_fixup_cmd(cmd); 640 624 641 - return desc->its_vmovi_cmd.vpe; 625 + return valid_vpe(its, desc->its_vmovi_cmd.vpe); 642 626 } 643 627 644 628 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, ··· 656 640 657 641 its_fixup_cmd(cmd); 658 642 659 - return desc->its_vmovp_cmd.vpe; 643 + return valid_vpe(its, desc->its_vmovp_cmd.vpe); 660 644 } 661 645 662 646 static u64 its_cmd_ptr_to_offset(struct its_node *its, ··· 1840 1824 1841 1825 static int its_alloc_collections(struct its_node *its) 1842 1826 { 1827 + int i; 1828 + 1843 1829 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), 1844 1830 GFP_KERNEL); 1845 1831 if (!its->collections) 1846 1832 return -ENOMEM; 1833 + 1834 + for (i = 0; i < nr_cpu_ids; i++) 1835 + its->collections[i].target_address = ~0ULL; 1847 1836 1848 1837 return 0; 1849 1838 } ··· 2331 2310 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2332 2311 2333 2312 /* Bind the LPI to the first possible CPU */ 2334 - cpu = cpumask_first(cpu_mask); 2313 + cpu = cpumask_first_and(cpu_mask, cpu_online_mask); 2314 + if (cpu >= nr_cpu_ids) { 2315 + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) 2316 + return -EINVAL; 2317 + 2318 + cpu = cpumask_first(cpu_online_mask); 2319 + } 2320 + 2335 2321 its_dev->event_map.col_map[event] = cpu; 2336 2322 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2337 2323 ··· 3426 3398 void __iomem *rbase = gic_data_rdist_rd_base(); 3427 3399 u64 timeout = USEC_PER_SEC; 3428 3400 u64 val; 3401 + 3402 + /* 3403 + * If coming via a CPU hotplug event, we don't need to disable 3404 + * LPIs before trying to re-enable them. They are already 3405 + * configured and all is well in the world. Detect this case 3406 + * by checking the allocation of the pending table for the 3407 + * current CPU. 3408 + */ 3409 + if (gic_data_rdist()->pend_page) 3410 + return 0; 3429 3411 3430 3412 if (!gic_rdists_supports_plpis()) { 3431 3413 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+7 -3
drivers/irqchip/irq-ls-scfg-msi.c
··· 93 93 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 94 94 msg->data = data->hwirq; 95 95 96 - if (msi_affinity_flag) 97 - msg->data |= cpumask_first(data->common->affinity); 96 + if (msi_affinity_flag) { 97 + const struct cpumask *mask; 98 + 99 + mask = irq_data_get_effective_affinity_mask(data); 100 + msg->data |= cpumask_first(mask); 101 + } 98 102 99 103 iommu_dma_map_msi_msg(data->irq, msg); 100 104 } ··· 125 121 return -EINVAL; 126 122 } 127 123 128 - cpumask_copy(irq_data->common->affinity, mask); 124 + irq_data_update_effective_affinity(irq_data, cpumask_of(cpu)); 129 125 130 126 return IRQ_SET_MASK_OK; 131 127 }
+1
include/linux/irq.h
··· 503 503 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip 504 504 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask 505 505 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode 506 + * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs 506 507 */ 507 508 enum { 508 509 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
-5
include/linux/irqdesc.h
··· 145 145 return desc->irq_common_data.handler_data; 146 146 } 147 147 148 - static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) 149 - { 150 - return desc->irq_common_data.msi_desc; 151 - } 152 - 153 148 /* 154 149 * Architectures call this to let the generic IRQ layer 155 150 * handle an interrupt.
+1
kernel/irq/debugfs.c
··· 55 55 BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), 56 56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), 57 57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), 58 + BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), 58 59 }; 59 60 60 61 static void