Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
"A set of fixes mostly for the ARM/GIC world:

- Fix the MSI affinity handling in the ls-scfg irq chip driver so it
updates and uses the effective affinity mask correctly

- Prevent binding LPIs to offline CPUs and respect the Cavium erratum
which requires that LPIs which belong to an offline NUMA node are
not bound to a CPU on a different NUMA node.

- Free only the amount of allocated interrupts in the GIC-V2M driver
instead of trying to free log2(nrirqs).

- Prevent emitting SYNC and VSYNC targetting non existing interrupt
collections in the GIC-V3 ITS driver

- Ensure that the GIV-V3 interrupt redistributor is correctly
reprogrammed on CPU hotplug

- Remove a stale unused helper function"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irqdesc: Delete irq_desc_get_msi_desc()
irqchip/gic-v3-its: Fix reprogramming of redistributors on CPU hotplug
irqchip/gic-v3-its: Only emit VSYNC if targetting a valid collection
irqchip/gic-v3-its: Only emit SYNC if targetting a valid collection
irqchip/gic-v3-its: Don't bind LPI to unavailable NUMA node
irqchip/gic-v2m: Fix SPI release on error path
irqchip/ls-scfg-msi: Fix MSI affinity handling
genirq/debugfs: Add missing IRQCHIP_SUPPORTS_LEVEL_MSI debug

+60 -21
+1 -1
drivers/irqchip/irq-gic-v2m.c
··· 199 200 fail: 201 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 202 - gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs)); 203 return err; 204 } 205
··· 199 200 fail: 201 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 202 + gicv2m_unalloc_msi(v2m, hwirq, nr_irqs); 203 return err; 204 } 205
+50 -12
drivers/irqchip/irq-gic-v3-its.c
··· 182 return its->collections + its_dev->event_map.col_map[event]; 183 } 184 185 /* 186 * ITS command descriptors - parameters to be encoded in a command 187 * block. ··· 455 456 its_fixup_cmd(cmd); 457 458 - return col; 459 } 460 461 static struct its_collection *its_build_movi_cmd(struct its_node *its, ··· 474 475 its_fixup_cmd(cmd); 476 477 - return col; 478 } 479 480 static struct its_collection *its_build_discard_cmd(struct its_node *its, ··· 492 493 its_fixup_cmd(cmd); 494 495 - return col; 496 } 497 498 static struct its_collection *its_build_inv_cmd(struct its_node *its, ··· 510 511 its_fixup_cmd(cmd); 512 513 - return col; 514 } 515 516 static struct its_collection *its_build_int_cmd(struct its_node *its, ··· 528 529 its_fixup_cmd(cmd); 530 531 - return col; 532 } 533 534 static struct its_collection *its_build_clear_cmd(struct its_node *its, ··· 546 547 its_fixup_cmd(cmd); 548 549 - return col; 550 } 551 552 static struct its_collection *its_build_invall_cmd(struct its_node *its, ··· 570 571 its_fixup_cmd(cmd); 572 573 - return desc->its_vinvall_cmd.vpe; 574 } 575 576 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, ··· 592 593 its_fixup_cmd(cmd); 594 595 - return desc->its_vmapp_cmd.vpe; 596 } 597 598 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, ··· 615 616 its_fixup_cmd(cmd); 617 618 - return desc->its_vmapti_cmd.vpe; 619 } 620 621 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, ··· 638 639 its_fixup_cmd(cmd); 640 641 - return desc->its_vmovi_cmd.vpe; 642 } 643 644 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, ··· 656 657 its_fixup_cmd(cmd); 658 659 - return desc->its_vmovp_cmd.vpe; 660 } 661 662 static u64 its_cmd_ptr_to_offset(struct its_node *its, ··· 1840 1841 static int its_alloc_collections(struct its_node *its) 1842 { 1843 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), 1844 GFP_KERNEL); 1845 if (!its->collections) 1846 return -ENOMEM; 1847 1848 return 0; 1849 } ··· 2331 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2332 2333 /* Bind the LPI to the first possible CPU */ 2334 - cpu = cpumask_first(cpu_mask); 2335 its_dev->event_map.col_map[event] = cpu; 2336 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2337 ··· 3426 void __iomem *rbase = gic_data_rdist_rd_base(); 3427 u64 timeout = USEC_PER_SEC; 3428 u64 val; 3429 3430 if (!gic_rdists_supports_plpis()) { 3431 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
··· 182 return its->collections + its_dev->event_map.col_map[event]; 183 } 184 185 + static struct its_collection *valid_col(struct its_collection *col) 186 + { 187 + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15))) 188 + return NULL; 189 + 190 + return col; 191 + } 192 + 193 + static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) 194 + { 195 + if (valid_col(its->collections + vpe->col_idx)) 196 + return vpe; 197 + 198 + return NULL; 199 + } 200 + 201 /* 202 * ITS command descriptors - parameters to be encoded in a command 203 * block. ··· 439 440 its_fixup_cmd(cmd); 441 442 + return valid_col(col); 443 } 444 445 static struct its_collection *its_build_movi_cmd(struct its_node *its, ··· 458 459 its_fixup_cmd(cmd); 460 461 + return valid_col(col); 462 } 463 464 static struct its_collection *its_build_discard_cmd(struct its_node *its, ··· 476 477 its_fixup_cmd(cmd); 478 479 + return valid_col(col); 480 } 481 482 static struct its_collection *its_build_inv_cmd(struct its_node *its, ··· 494 495 its_fixup_cmd(cmd); 496 497 + return valid_col(col); 498 } 499 500 static struct its_collection *its_build_int_cmd(struct its_node *its, ··· 512 513 its_fixup_cmd(cmd); 514 515 + return valid_col(col); 516 } 517 518 static struct its_collection *its_build_clear_cmd(struct its_node *its, ··· 530 531 its_fixup_cmd(cmd); 532 533 + return valid_col(col); 534 } 535 536 static struct its_collection *its_build_invall_cmd(struct its_node *its, ··· 554 555 its_fixup_cmd(cmd); 556 557 + return valid_vpe(its, desc->its_vinvall_cmd.vpe); 558 } 559 560 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, ··· 576 577 its_fixup_cmd(cmd); 578 579 + return valid_vpe(its, desc->its_vmapp_cmd.vpe); 580 } 581 582 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, ··· 599 600 its_fixup_cmd(cmd); 601 602 + return valid_vpe(its, desc->its_vmapti_cmd.vpe); 603 } 604 605 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, ··· 622 623 its_fixup_cmd(cmd); 624 625 + return valid_vpe(its, desc->its_vmovi_cmd.vpe); 626 } 627 628 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, ··· 640 641 its_fixup_cmd(cmd); 642 643 + return valid_vpe(its, desc->its_vmovp_cmd.vpe); 644 } 645 646 static u64 its_cmd_ptr_to_offset(struct its_node *its, ··· 1824 1825 static int its_alloc_collections(struct its_node *its) 1826 { 1827 + int i; 1828 + 1829 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), 1830 GFP_KERNEL); 1831 if (!its->collections) 1832 return -ENOMEM; 1833 + 1834 + for (i = 0; i < nr_cpu_ids; i++) 1835 + its->collections[i].target_address = ~0ULL; 1836 1837 return 0; 1838 } ··· 2310 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2311 2312 /* Bind the LPI to the first possible CPU */ 2313 + cpu = cpumask_first_and(cpu_mask, cpu_online_mask); 2314 + if (cpu >= nr_cpu_ids) { 2315 + if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) 2316 + return -EINVAL; 2317 + 2318 + cpu = cpumask_first(cpu_online_mask); 2319 + } 2320 + 2321 its_dev->event_map.col_map[event] = cpu; 2322 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2323 ··· 3398 void __iomem *rbase = gic_data_rdist_rd_base(); 3399 u64 timeout = USEC_PER_SEC; 3400 u64 val; 3401 + 3402 + /* 3403 + * If coming via a CPU hotplug event, we don't need to disable 3404 + * LPIs before trying to re-enable them. They are already 3405 + * configured and all is well in the world. Detect this case 3406 + * by checking the allocation of the pending table for the 3407 + * current CPU. 3408 + */ 3409 + if (gic_data_rdist()->pend_page) 3410 + return 0; 3411 3412 if (!gic_rdists_supports_plpis()) { 3413 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
+7 -3
drivers/irqchip/irq-ls-scfg-msi.c
··· 93 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 94 msg->data = data->hwirq; 95 96 - if (msi_affinity_flag) 97 - msg->data |= cpumask_first(data->common->affinity); 98 99 iommu_dma_map_msi_msg(data->irq, msg); 100 } ··· 125 return -EINVAL; 126 } 127 128 - cpumask_copy(irq_data->common->affinity, mask); 129 130 return IRQ_SET_MASK_OK; 131 }
··· 93 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 94 msg->data = data->hwirq; 95 96 + if (msi_affinity_flag) { 97 + const struct cpumask *mask; 98 + 99 + mask = irq_data_get_effective_affinity_mask(data); 100 + msg->data |= cpumask_first(mask); 101 + } 102 103 iommu_dma_map_msi_msg(data->irq, msg); 104 } ··· 121 return -EINVAL; 122 } 123 124 + irq_data_update_effective_affinity(irq_data, cpumask_of(cpu)); 125 126 return IRQ_SET_MASK_OK; 127 }
+1
include/linux/irq.h
··· 503 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip 504 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask 505 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode 506 */ 507 enum { 508 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
··· 503 * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip 504 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask 505 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode 506 + * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs 507 */ 508 enum { 509 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
-5
include/linux/irqdesc.h
··· 145 return desc->irq_common_data.handler_data; 146 } 147 148 - static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) 149 - { 150 - return desc->irq_common_data.msi_desc; 151 - } 152 - 153 /* 154 * Architectures call this to let the generic IRQ layer 155 * handle an interrupt.
··· 145 return desc->irq_common_data.handler_data; 146 } 147 148 /* 149 * Architectures call this to let the generic IRQ layer 150 * handle an interrupt.
+1
kernel/irq/debugfs.c
··· 55 BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), 56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), 57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), 58 }; 59 60 static void
··· 55 BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE), 56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), 57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), 58 + BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), 59 }; 60 61 static void