Merge tag 'irq-urgent-2024-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Ingo Molnar:

- Fix possible memory leak the riscv-intc irqchip driver load failures

- Fix boot crash in the sifive-plic irqchip driver caused by recently
changed boot initialization order

- Fix race condition in the gic-v3-its irqchip driver

* tag 'irq-urgent-2024-06-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irqchip/gic-v3-its: Fix potential race condition in its_vlpi_prop_update()
irqchip/sifive-plic: Chain to parent IRQ after handlers are ready
irqchip/riscv-intc: Prevent memory leak when riscv_intc_init_common() fails

+36 -51
+12 -32
drivers/irqchip/irq-gic-v3-its.c
··· 1846 { 1847 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1848 u32 event = its_get_event_id(d); 1849 - int ret = 0; 1850 1851 if (!info->map) 1852 return -EINVAL; 1853 - 1854 - raw_spin_lock(&its_dev->event_map.vlpi_lock); 1855 1856 if (!its_dev->event_map.vm) { 1857 struct its_vlpi_map *maps; 1858 1859 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), 1860 GFP_ATOMIC); 1861 - if (!maps) { 1862 - ret = -ENOMEM; 1863 - goto out; 1864 - } 1865 1866 its_dev->event_map.vm = info->map->vm; 1867 its_dev->event_map.vlpi_maps = maps; 1868 } else if (its_dev->event_map.vm != info->map->vm) { 1869 - ret = -EINVAL; 1870 - goto out; 1871 } 1872 1873 /* Get our private copy of the mapping information */ ··· 1893 its_dev->event_map.nr_vlpis++; 1894 } 1895 1896 - out: 1897 - raw_spin_unlock(&its_dev->event_map.vlpi_lock); 1898 - return ret; 1899 } 1900 1901 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 1902 { 1903 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1904 struct its_vlpi_map *map; 1905 - int ret = 0; 1906 - 1907 - raw_spin_lock(&its_dev->event_map.vlpi_lock); 1908 1909 map = get_vlpi_map(d); 1910 1911 - if (!its_dev->event_map.vm || !map) { 1912 - ret = -EINVAL; 1913 - goto out; 1914 - } 1915 1916 /* Copy our mapping information to the incoming request */ 1917 *info->map = *map; 1918 1919 - out: 1920 - raw_spin_unlock(&its_dev->event_map.vlpi_lock); 1921 - return ret; 1922 } 1923 1924 static int its_vlpi_unmap(struct irq_data *d) 1925 { 1926 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1927 u32 event = its_get_event_id(d); 1928 - int ret = 0; 1929 1930 - raw_spin_lock(&its_dev->event_map.vlpi_lock); 1931 - 1932 - if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 1933 - ret = -EINVAL; 1934 - goto out; 1935 - } 1936 1937 /* Drop the virtual mapping */ 1938 its_send_discard(its_dev, event); ··· 1942 kfree(its_dev->event_map.vlpi_maps); 1943 } 1944 1945 - out: 1946 - raw_spin_unlock(&its_dev->event_map.vlpi_lock); 1947 - return ret; 1948 } 1949 1950 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) ··· 1969 /* Need a v4 ITS */ 1970 if (!is_v4(its_dev->its)) 1971 return -EINVAL; 1972 1973 /* Unmap request? */ 1974 if (!info)
··· 1846 { 1847 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1848 u32 event = its_get_event_id(d); 1849 1850 if (!info->map) 1851 return -EINVAL; 1852 1853 if (!its_dev->event_map.vm) { 1854 struct its_vlpi_map *maps; 1855 1856 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), 1857 GFP_ATOMIC); 1858 + if (!maps) 1859 + return -ENOMEM; 1860 1861 its_dev->event_map.vm = info->map->vm; 1862 its_dev->event_map.vlpi_maps = maps; 1863 } else if (its_dev->event_map.vm != info->map->vm) { 1864 + return -EINVAL; 1865 } 1866 1867 /* Get our private copy of the mapping information */ ··· 1899 its_dev->event_map.nr_vlpis++; 1900 } 1901 1902 + return 0; 1903 } 1904 1905 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 1906 { 1907 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1908 struct its_vlpi_map *map; 1909 1910 map = get_vlpi_map(d); 1911 1912 + if (!its_dev->event_map.vm || !map) 1913 + return -EINVAL; 1914 1915 /* Copy our mapping information to the incoming request */ 1916 *info->map = *map; 1917 1918 + return 0; 1919 } 1920 1921 static int its_vlpi_unmap(struct irq_data *d) 1922 { 1923 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1924 u32 event = its_get_event_id(d); 1925 1926 + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) 1927 + return -EINVAL; 1928 1929 /* Drop the virtual mapping */ 1930 its_send_discard(its_dev, event); ··· 1962 kfree(its_dev->event_map.vlpi_maps); 1963 } 1964 1965 + return 0; 1966 } 1967 1968 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) ··· 1991 /* Need a v4 ITS */ 1992 if (!is_v4(its_dev->its)) 1993 return -EINVAL; 1994 + 1995 + guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock); 1996 1997 /* Unmap request? */ 1998 if (!info)
+7 -2
drivers/irqchip/irq-riscv-intc.c
··· 253 static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, 254 const unsigned long end) 255 { 256 - struct fwnode_handle *fn; 257 struct acpi_madt_rintc *rintc; 258 259 rintc = (struct acpi_madt_rintc *)header; 260 ··· 274 return -ENOMEM; 275 } 276 277 - return riscv_intc_init_common(fn, &riscv_intc_chip); 278 } 279 280 IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
··· 253 static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header, 254 const unsigned long end) 255 { 256 struct acpi_madt_rintc *rintc; 257 + struct fwnode_handle *fn; 258 + int rc; 259 260 rintc = (struct acpi_madt_rintc *)header; 261 ··· 273 return -ENOMEM; 274 } 275 276 + rc = riscv_intc_init_common(fn, &riscv_intc_chip); 277 + if (rc) 278 + irq_domain_free_fwnode(fn); 279 + 280 + return rc; 281 } 282 283 IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL,
+17 -17
drivers/irqchip/irq-sifive-plic.c
··· 85 struct plic_priv *priv; 86 }; 87 static int plic_parent_irq __ro_after_init; 88 - static bool plic_cpuhp_setup_done __ro_after_init; 89 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 90 91 static int plic_irq_set_type(struct irq_data *d, unsigned int type); ··· 487 unsigned long plic_quirks = 0; 488 struct plic_handler *handler; 489 u32 nr_irqs, parent_hwirq; 490 - struct irq_domain *domain; 491 struct plic_priv *priv; 492 irq_hw_number_t hwirq; 493 - bool cpuhp_setup; 494 495 if (is_of_node(dev->fwnode)) { 496 const struct of_device_id *id; ··· 547 continue; 548 } 549 550 - /* Find parent domain and register chained handler */ 551 - domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 552 - if (!plic_parent_irq && domain) { 553 - plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 554 - if (plic_parent_irq) 555 - irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 556 - } 557 - 558 /* 559 * When running in M-mode we need to ignore the S-mode handler. 560 * Here we assume it always comes later, but that might be a ··· 587 goto fail_cleanup_contexts; 588 589 /* 590 - * We can have multiple PLIC instances so setup cpuhp state 591 * and register syscore operations only once after context 592 * handlers of all online CPUs are initialized. 593 */ 594 - if (!plic_cpuhp_setup_done) { 595 - cpuhp_setup = true; 596 for_each_online_cpu(cpu) { 597 handler = per_cpu_ptr(&plic_handlers, cpu); 598 if (!handler->present) { 599 - cpuhp_setup = false; 600 break; 601 } 602 } 603 - if (cpuhp_setup) { 604 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 605 "irqchip/sifive/plic:starting", 606 plic_starting_cpu, plic_dying_cpu); 607 register_syscore_ops(&plic_irq_syscore_ops); 608 - plic_cpuhp_setup_done = true; 609 } 610 } 611
··· 85 struct plic_priv *priv; 86 }; 87 static int plic_parent_irq __ro_after_init; 88 + static bool plic_global_setup_done __ro_after_init; 89 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 90 91 static int plic_irq_set_type(struct irq_data *d, unsigned int type); ··· 487 unsigned long plic_quirks = 0; 488 struct plic_handler *handler; 489 u32 nr_irqs, parent_hwirq; 490 struct plic_priv *priv; 491 irq_hw_number_t hwirq; 492 493 if (is_of_node(dev->fwnode)) { 494 const struct of_device_id *id; ··· 549 continue; 550 } 551 552 /* 553 * When running in M-mode we need to ignore the S-mode handler. 554 * Here we assume it always comes later, but that might be a ··· 597 goto fail_cleanup_contexts; 598 599 /* 600 + * We can have multiple PLIC instances so setup global state 601 * and register syscore operations only once after context 602 * handlers of all online CPUs are initialized. 603 */ 604 + if (!plic_global_setup_done) { 605 + struct irq_domain *domain; 606 + bool global_setup = true; 607 + 608 for_each_online_cpu(cpu) { 609 handler = per_cpu_ptr(&plic_handlers, cpu); 610 if (!handler->present) { 611 + global_setup = false; 612 break; 613 } 614 } 615 + 616 + if (global_setup) { 617 + /* Find parent domain and register chained handler */ 618 + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 619 + if (domain) 620 + plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 621 + if (plic_parent_irq) 622 + irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 623 + 624 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 625 "irqchip/sifive/plic:starting", 626 plic_starting_cpu, plic_dying_cpu); 627 register_syscore_ops(&plic_irq_syscore_ops); 628 + plic_global_setup_done = true; 629 } 630 } 631