Merge tag 'irq_urgent_for_v6.10_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Borislav Petkov:

- Make sure multi-bridge machines get all eiointc interrupt controllers
initialized even if the number of CPUs has been limited by a cmdline
param

- Make sure interrupt lines on liointc hw are configured properly even
when interrupt routing changes

- Avoid use-after-free in the error path of the MSI init code

* tag 'irq_urgent_for_v6.10_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
PCI/MSI: Fix UAF in msi_capability_init
irqchip/loongson-liointc: Set different ISRs for different cores
irqchip/loongson-eiointc: Use early_cpu_to_node() instead of cpu_to_node()

+13 -6
+3 -2
drivers/irqchip/irq-loongson-eiointc.c
··· 15 #include <linux/irqchip/chained_irq.h> 16 #include <linux/kernel.h> 17 #include <linux/syscore_ops.h> 18 19 #define EIOINTC_REG_NODEMAP 0x14a0 20 #define EIOINTC_REG_IPMAP 0x14c0 ··· 340 int node; 341 342 if (cpu_has_flatmode) 343 - node = cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); 344 else 345 node = eiointc_priv[nr_pics - 1]->node; 346 ··· 432 goto out_free_handle; 433 434 if (cpu_has_flatmode) 435 - node = cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); 436 else 437 node = acpi_eiointc->node; 438 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
··· 15 #include <linux/irqchip/chained_irq.h> 16 #include <linux/kernel.h> 17 #include <linux/syscore_ops.h> 18 + #include <asm/numa.h> 19 20 #define EIOINTC_REG_NODEMAP 0x14a0 21 #define EIOINTC_REG_IPMAP 0x14c0 ··· 339 int node; 340 341 if (cpu_has_flatmode) 342 + node = early_cpu_to_node(eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE); 343 else 344 node = eiointc_priv[nr_pics - 1]->node; 345 ··· 431 goto out_free_handle; 432 433 if (cpu_has_flatmode) 434 + node = early_cpu_to_node(acpi_eiointc->node * CORES_PER_EIO_NODE); 435 else 436 node = acpi_eiointc->node; 437 acpi_set_vec_parent(node, priv->eiointc_domain, pch_group);
+2 -2
drivers/irqchip/irq-loongson-liointc.c
··· 28 29 #define LIOINTC_INTC_CHIP_START 0x20 30 31 - #define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20) 32 #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) 33 #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) 34 #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) ··· 217 goto out_free_priv; 218 219 for (i = 0; i < LIOINTC_NUM_CORES; i++) 220 - priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; 221 222 for (i = 0; i < LIOINTC_NUM_PARENT; i++) 223 priv->handler[i].parent_int_map = parent_int_map[i];
··· 28 29 #define LIOINTC_INTC_CHIP_START 0x20 30 31 + #define LIOINTC_REG_INTC_STATUS(core) (LIOINTC_INTC_CHIP_START + 0x20 + (core) * 8) 32 #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) 33 #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) 34 #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) ··· 217 goto out_free_priv; 218 219 for (i = 0; i < LIOINTC_NUM_CORES; i++) 220 + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i); 221 222 for (i = 0; i < LIOINTC_NUM_PARENT; i++) 223 priv->handler[i].parent_int_map = parent_int_map[i];
+8 -2
drivers/pci/msi/msi.c
··· 352 struct irq_affinity *affd) 353 { 354 struct irq_affinity_desc *masks = NULL; 355 - struct msi_desc *entry; 356 int ret; 357 358 /* Reject multi-MSI early on irq domain enabled architectures */ ··· 377 /* All MSIs are unmasked by default; mask them all */ 378 entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); 379 pci_msi_mask(entry, msi_multi_mask(entry)); 380 381 /* Configure MSI capability structure */ 382 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); ··· 402 goto unlock; 403 404 err: 405 - pci_msi_unmask(entry, msi_multi_mask(entry)); 406 pci_free_msi_irqs(dev); 407 fail: 408 dev->msi_enabled = 0;
··· 352 struct irq_affinity *affd) 353 { 354 struct irq_affinity_desc *masks = NULL; 355 + struct msi_desc *entry, desc; 356 int ret; 357 358 /* Reject multi-MSI early on irq domain enabled architectures */ ··· 377 /* All MSIs are unmasked by default; mask them all */ 378 entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); 379 pci_msi_mask(entry, msi_multi_mask(entry)); 380 + /* 381 + * Copy the MSI descriptor for the error path because 382 + * pci_msi_setup_msi_irqs() will free it for the hierarchical 383 + * interrupt domain case. 384 + */ 385 + memcpy(&desc, entry, sizeof(desc)); 386 387 /* Configure MSI capability structure */ 388 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); ··· 396 goto unlock; 397 398 err: 399 + pci_msi_unmask(&desc, msi_multi_mask(&desc)); 400 pci_free_msi_irqs(dev); 401 fail: 402 dev->msi_enabled = 0;