Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/xive: Untangle xive from child interrupt controller drivers

xive-specific data is stored in handler_data. This creates a mess, as xive
has to rely on child interrupt controller drivers to clean up this data, as
was done by 9a014f45688 ("powerpc/pseries/pci: Add a msi_free() handler to
clear XIVE data").

Instead, store xive-specific data in chip_data and untangle the child
drivers.

Signed-off-by: Nam Cao <namcao@linutronix.de>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/83968073022a4cc211dcbd0faccd20ec05e58c3e.1754903590.git.namcao@linutronix.de

authored by

Nam Cao and committed by
Madhavan Srinivasan
cc0cc23b b034baff

+33 -70
-1
arch/powerpc/include/asm/xive.h
··· 111 111 int xive_native_populate_irq_data(u32 hw_irq, 112 112 struct xive_irq_data *data); 113 113 void xive_cleanup_irq_data(struct xive_irq_data *xd); 114 - void xive_irq_free_data(unsigned int virq); 115 114 void xive_native_free_irq(u32 irq); 116 115 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq); 117 116
+1 -20
arch/powerpc/platforms/powernv/pci-ioda.c
··· 37 37 #include <asm/firmware.h> 38 38 #include <asm/pnv-pci.h> 39 39 #include <asm/mmzone.h> 40 - #include <asm/xive.h> 41 40 42 41 #include "powernv.h" 43 42 #include "pci.h" ··· 1706 1707 return 0; 1707 1708 } 1708 1709 1709 - /* 1710 - * The msi_free() op is called before irq_domain_free_irqs_top() when 1711 - * the handler data is still available. Use that to clear the XIVE 1712 - * controller. 1713 - */ 1714 - static void pnv_msi_ops_msi_free(struct irq_domain *domain, 1715 - struct msi_domain_info *info, 1716 - unsigned int irq) 1717 - { 1718 - if (xive_enabled()) 1719 - xive_irq_free_data(irq); 1720 - } 1721 - 1722 - static struct msi_domain_ops pnv_pci_msi_domain_ops = { 1723 - .msi_free = pnv_msi_ops_msi_free, 1724 - }; 1725 - 1726 1710 static void pnv_msi_shutdown(struct irq_data *d) 1727 1711 { 1728 1712 d = d->parent_data; ··· 1736 1754 static struct msi_domain_info pnv_msi_domain_info = { 1737 1755 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 1738 1756 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), 1739 - .ops = &pnv_pci_msi_domain_ops, 1740 1757 .chip = &pnv_pci_msi_irq_chip, 1741 1758 }; 1742 1759 ··· 1851 1870 virq, d->hwirq, nr_irqs); 1852 1871 1853 1872 msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs); 1854 - /* XIVE domain is cleared through ->msi_free() */ 1873 + irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1855 1874 } 1856 1875 1857 1876 static const struct irq_domain_ops pnv_irq_domain_ops = {
+1 -17
arch/powerpc/platforms/pseries/msi.c
··· 15 15 #include <asm/hw_irq.h> 16 16 #include <asm/ppc-pci.h> 17 17 #include <asm/machdep.h> 18 - #include <asm/xive.h> 19 18 20 19 #include "pseries.h" 21 20 ··· 436 437 } 437 438 438 439 /* 439 - * ->msi_free() is called before irq_domain_free_irqs_top() when the 440 - * handler data is still available. Use that to clear the XIVE 441 - * controller data. 442 - */ 443 - static void pseries_msi_ops_msi_free(struct irq_domain *domain, 444 - struct msi_domain_info *info, 445 - unsigned int irq) 446 - { 447 - if (xive_enabled()) 448 - xive_irq_free_data(irq); 449 - } 450 - 451 - /* 452 440 * RTAS can not disable one MSI at a time. It's all or nothing. Do it 453 441 * at the end after all IRQs have been freed. 454 442 */ ··· 449 463 450 464 static struct msi_domain_ops pseries_pci_msi_domain_ops = { 451 465 .msi_prepare = pseries_msi_ops_prepare, 452 - .msi_free = pseries_msi_ops_msi_free, 453 466 .msi_post_free = pseries_msi_post_free, 454 467 }; 455 468 ··· 589 604 struct pci_controller *phb = irq_data_get_irq_chip_data(d); 590 605 591 606 pr_debug("%s bridge %pOF %d #%d\n", __func__, phb->dn, virq, nr_irqs); 592 - 593 - /* XIVE domain data is cleared through ->msi_free() */ 607 + irq_domain_free_irqs_parent(domain, virq, nr_irqs); 594 608 } 595 609 596 610 static const struct irq_domain_ops pseries_irq_domain_ops = {
+31 -32
arch/powerpc/sysdev/xive/common.c
··· 317 317 if (d) { 318 318 char buffer[128]; 319 319 320 - xive_irq_data_dump(irq_data_get_irq_handler_data(d), 320 + xive_irq_data_dump(irq_data_get_irq_chip_data(d), 321 321 buffer, sizeof(buffer)); 322 322 xmon_printf("%s", buffer); 323 323 } ··· 437 437 /* irq_chip eoi callback, called with irq descriptor lock held */ 438 438 static void xive_irq_eoi(struct irq_data *d) 439 439 { 440 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 440 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 441 441 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 442 442 443 443 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", ··· 595 595 const struct cpumask *affinity) 596 596 { 597 597 static unsigned int fuzz; 598 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 598 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 599 599 cpumask_var_t mask; 600 600 int cpu = -1; 601 601 ··· 628 628 629 629 static unsigned int xive_irq_startup(struct irq_data *d) 630 630 { 631 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 631 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 632 632 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 633 633 int target, rc; 634 634 ··· 673 673 /* called with irq descriptor lock held */ 674 674 static void xive_irq_shutdown(struct irq_data *d) 675 675 { 676 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 676 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 677 677 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 678 678 679 679 pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d); ··· 698 698 699 699 static void xive_irq_unmask(struct irq_data *d) 700 700 { 701 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 701 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 702 702 703 703 pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); 704 704 ··· 707 707 708 708 static void xive_irq_mask(struct irq_data *d) 709 709 { 710 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 710 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 711 711 712 712 pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd); 713 713 ··· 718 718 const struct cpumask *cpumask, 719 719 bool force) 720 720 { 721 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 721 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 722 722 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 723 723 u32 target, old_target; 724 724 int rc = 0; ··· 776 776 777 777 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 778 778 { 779 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 779 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 780 780 781 781 /* 782 782 * We only support these. This has really no effect other than setting ··· 815 815 816 816 static int xive_irq_retrigger(struct irq_data *d) 817 817 { 818 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 818 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 819 819 820 820 /* This should be only for MSIs */ 821 821 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) ··· 837 837 */ 838 838 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 839 839 { 840 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 840 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(d); 841 841 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 842 842 int rc; 843 843 u8 pq; ··· 951 951 static int xive_get_irqchip_state(struct irq_data *data, 952 952 enum irqchip_irq_state which, bool *state) 953 953 { 954 - struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 954 + struct xive_irq_data *xd = irq_data_get_irq_chip_data(data); 955 955 u8 pq; 956 956 957 957 switch (which) { ··· 1011 1011 } 1012 1012 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 1013 1013 1014 - static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 1014 + static struct xive_irq_data *xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 1015 1015 { 1016 1016 struct xive_irq_data *xd; 1017 1017 int rc; 1018 1018 1019 1019 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 1020 1020 if (!xd) 1021 - return -ENOMEM; 1021 + return ERR_PTR(-ENOMEM); 1022 1022 rc = xive_ops->populate_irq_data(hw, xd); 1023 1023 if (rc) { 1024 1024 kfree(xd); 1025 - return rc; 1025 + return ERR_PTR(rc); 1026 1026 } 1027 1027 xd->target = XIVE_INVALID_TARGET; 1028 - irq_set_handler_data(virq, xd); 1029 1028 1030 1029 /* 1031 1030 * Turn OFF by default the interrupt being mapped. A side ··· 1035 1036 */ 1036 1037 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 1037 1038 1038 - return 0; 1039 + return xd; 1039 1040 } 1040 1041 1041 - void xive_irq_free_data(unsigned int virq) 1042 + static void xive_irq_free_data(unsigned int virq) 1042 1043 { 1043 - struct xive_irq_data *xd = irq_get_handler_data(virq); 1044 + struct xive_irq_data *xd = irq_get_chip_data(virq); 1044 1045 1045 1046 if (!xd) 1046 1047 return; 1047 - irq_set_handler_data(virq, NULL); 1048 + irq_set_chip_data(virq, NULL); 1048 1049 xive_cleanup_irq_data(xd); 1049 1050 kfree(xd); 1050 1051 } 1051 - EXPORT_SYMBOL_GPL(xive_irq_free_data); 1052 1052 1053 1053 #ifdef CONFIG_SMP 1054 1054 ··· 1284 1286 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 1285 1287 irq_hw_number_t hw) 1286 1288 { 1287 - int rc; 1289 + struct xive_irq_data *xd; 1288 1290 1289 1291 /* 1290 1292 * Mark interrupts as edge sensitive by default so that resend ··· 1292 1294 */ 1293 1295 irq_clear_status_flags(virq, IRQ_LEVEL); 1294 1296 1295 - rc = xive_irq_alloc_data(virq, hw); 1296 - if (rc) 1297 - return rc; 1297 + xd = xive_irq_alloc_data(virq, hw); 1298 + if (IS_ERR(xd)) 1299 + return PTR_ERR(xd); 1298 1300 1299 1301 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 1302 + irq_set_chip_data(virq, xd); 1300 1303 1301 1304 return 0; 1302 1305 } ··· 1365 1366 seq_printf(m, "%*sXIVE:\n", ind, ""); 1366 1367 ind++; 1367 1368 1368 - xd = irq_data_get_irq_handler_data(irqd); 1369 + xd = irq_data_get_irq_chip_data(irqd); 1369 1370 if (!xd) { 1370 1371 seq_printf(m, "%*snot assigned\n", ind, ""); 1371 1372 return; ··· 1402 1403 unsigned int nr_irqs, void *arg) 1403 1404 { 1404 1405 struct irq_fwspec *fwspec = arg; 1406 + struct xive_irq_data *xd; 1405 1407 irq_hw_number_t hwirq; 1406 1408 unsigned int type = IRQ_TYPE_NONE; 1407 1409 int i, rc; ··· 1423 1423 irq_clear_status_flags(virq, IRQ_LEVEL); 1424 1424 1425 1425 /* allocates and sets handler data */ 1426 - rc = xive_irq_alloc_data(virq + i, hwirq + i); 1427 - if (rc) 1428 - return rc; 1426 + xd = xive_irq_alloc_data(virq + i, hwirq + i); 1427 + if (IS_ERR(xd)) 1428 + return PTR_ERR(xd); 1429 1429 1430 - irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 1431 - &xive_irq_chip, domain->host_data); 1430 + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, &xive_irq_chip, xd); 1432 1431 irq_set_handler(virq + i, handle_fasteoi_irq); 1433 1432 } 1434 1433 ··· 1763 1764 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 1764 1765 hw_irq, target, prio, lirq); 1765 1766 1766 - xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer)); 1767 + xive_irq_data_dump(irq_data_get_irq_chip_data(d), buffer, sizeof(buffer)); 1767 1768 seq_puts(m, buffer); 1768 1769 seq_puts(m, "\n"); 1769 1770 }