Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
"This updated pull request does not contain the last few GIC related
patches which were reported to cause a regression. There is a fix
available, but I let it breed for a couple of days first.

The irq departement provides:

- new infrastructure to support non PCI based MSI interrupts
- a couple of new irq chip drivers
- the usual pile of fixlets and updates to irq chip drivers
- preparatory changes for removal of the irq argument from interrupt
flow handlers
- preparatory changes to remove IRQF_VALID"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (129 commits)
irqchip/imx-gpcv2: IMX GPCv2 driver for wakeup sources
irqchip: Add bcm2836 interrupt controller for Raspberry Pi 2
irqchip: Add documentation for the bcm2836 interrupt controller
irqchip/bcm2835: Add support for being used as a second level controller
irqchip/bcm2835: Refactor handle_IRQ() calls out of MAKE_HWIRQ
PCI: xilinx: Fix typo in function name
irqchip/gic: Ensure gic_cpu_if_up/down() programs correct GIC instance
irqchip/gic: Only allow the primary GIC to set the CPU map
PCI/MSI: pci-xgene-msi: Consolidate chained IRQ handler install/remove
unicore32/irq: Prepare puv3_gpio_handler for irq argument removal
tile/pci_gx: Prepare trio_handle_level_irq for irq argument removal
m68k/irq: Prepare irq handlers for irq argument removal
C6X/megamode-pic: Prepare megamod_irq_cascade for irq argument removal
blackfin: Prepare irq handlers for irq argument removal
arc/irq: Prepare idu_cascade_isr for irq argument removal
sparc/irq: Use access helper irq_data_get_affinity_mask()
sparc/irq: Use helper irq_data_get_irq_handler_data()
parisc/irq: Use access helper irq_data_get_affinity_mask()
mn10300/irq: Use access helper irq_data_get_affinity_mask()
irqchip/i8259: Prepare i8259_irq_dispatch for irq argument removal
...

+2208 -849
+24 -1
Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2835-armctrl-ic.txt
··· 5 5 controller, or the HW block containing it, is referred to occasionally 6 6 as "armctrl" in the SoC documentation, hence naming of this binding. 7 7 8 + The BCM2836 contains the same interrupt controller with the same 9 + interrupts, but the per-CPU interrupt controller is the root, and an 10 + interrupt there indicates that the ARMCTRL has an interrupt to handle. 11 + 8 12 Required properties: 9 13 10 - - compatible : should be "brcm,bcm2835-armctrl-ic" 14 + - compatible : should be "brcm,bcm2835-armctrl-ic" or 15 + "brcm,bcm2836-armctrl-ic" 11 16 - reg : Specifies base physical address and size of the registers. 12 17 - interrupt-controller : Identifies the node as an interrupt controller 13 18 - #interrupt-cells : Specifies the number of cells needed to encode an ··· 24 19 25 20 The 2nd cell contains the interrupt number within the bank. Valid values 26 21 are 0..7 for bank 0, and 0..31 for bank 1. 22 + 23 + Additional required properties for brcm,bcm2836-armctrl-ic: 24 + - interrupt-parent : Specifies the parent interrupt controller when this 25 + controller is the second level. 26 + - interrupts : Specifies the interrupt on the parent for this interrupt 27 + controller to handle. 27 28 28 29 The interrupt sources are as follows: 29 30 ··· 113 102 114 103 Example: 115 104 105 + /* BCM2835, first level */ 116 106 intc: interrupt-controller { 117 107 compatible = "brcm,bcm2835-armctrl-ic"; 118 108 reg = <0x7e00b200 0x200>; 119 109 interrupt-controller; 120 110 #interrupt-cells = <2>; 111 + }; 112 + 113 + /* BCM2836, second level */ 114 + intc: interrupt-controller { 115 + compatible = "brcm,bcm2836-armctrl-ic"; 116 + reg = <0x7e00b200 0x200>; 117 + interrupt-controller; 118 + #interrupt-cells = <2>; 119 + 120 + interrupt-parent = <&local_intc>; 121 + interrupts = <8>; 121 122 };
+37
Documentation/devicetree/bindings/interrupt-controller/brcm,bcm2836-l1-intc.txt
··· 1 + BCM2836 per-CPU interrupt controller 2 + 3 + The BCM2836 has a per-cpu interrupt controller for the timer, PMU 4 + events, and SMP IPIs. One of the CPUs may receive interrupts for the 5 + peripheral (GPU) events, which chain to the BCM2835-style interrupt 6 + controller. 7 + 8 + Required properties: 9 + 10 + - compatible: Should be "brcm,bcm2836-l1-intc" 11 + - reg: Specifies base physical address and size of the 12 + registers 13 + - interrupt-controller: Identifies the node as an interrupt controller 14 + - #interrupt-cells: Specifies the number of cells needed to encode an 15 + interrupt source. The value shall be 1 16 + 17 + Please refer to interrupts.txt in this directory for details of the common 18 + Interrupt Controllers bindings used by client devices. 19 + 20 + The interrupt sources are as follows: 21 + 22 + 0: CNTPSIRQ 23 + 1: CNTPNSIRQ 24 + 2: CNTHPIRQ 25 + 3: CNTVIRQ 26 + 8: GPU_FAST 27 + 9: PMU_FAST 28 + 29 + Example: 30 + 31 + local_intc: local_intc { 32 + compatible = "brcm,bcm2836-l1-intc"; 33 + reg = <0x40000000 0x100>; 34 + interrupt-controller; 35 + #interrupt-cells = <1>; 36 + interrupt-parent = <&local_intc>; 37 + };
+1 -1
arch/alpha/kernel/irq.c
··· 59 59 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 60 60 last_cpu = cpu; 61 61 62 - cpumask_copy(data->affinity, cpumask_of(cpu)); 62 + cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu)); 63 63 chip->irq_set_affinity(data, cpumask_of(cpu), false); 64 64 return 0; 65 65 }
+2 -1
arch/arc/kernel/mcip.c
··· 252 252 253 253 static int idu_first_irq; 254 254 255 - static void idu_cascade_isr(unsigned int core_irq, struct irq_desc *desc) 255 + static void idu_cascade_isr(unsigned int __core_irq, struct irq_desc *desc) 256 256 { 257 257 struct irq_domain *domain = irq_desc_get_handler_data(desc); 258 + unsigned int core_irq = irq_desc_get_irq(desc); 258 259 unsigned int idu_irq; 259 260 260 261 idu_irq = core_irq - idu_first_irq;
-2
arch/arm/mach-shmobile/setup-r8a7779.c
··· 62 62 63 63 static void __init r8a7779_init_irq_dt(void) 64 64 { 65 - gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE); 66 - 67 65 irqchip_init(); 68 66 69 67 /* route all interrupts to ARM */
-1
arch/arm/mach-ux500/cpu.c
··· 56 56 struct device_node *np; 57 57 struct resource r; 58 58 59 - gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND); 60 59 irqchip_init(); 61 60 np = of_find_compatible_node(NULL, NULL, "stericsson,db8500-prcmu"); 62 61 of_address_to_resource(np, 0, &r);
+1 -1
arch/arm/mach-vexpress/tc2_pm.c
··· 80 80 * to the CPU by disabling the GIC CPU IF to prevent wfi 81 81 * from completing execution behind power controller back 82 82 */ 83 - gic_cpu_if_down(); 83 + gic_cpu_if_down(0); 84 84 } 85 85 86 86 static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
-1
arch/arm/mach-zynq/common.c
··· 186 186 187 187 static void __init zynq_irq_init(void) 188 188 { 189 - gic_set_irqchip_flags(IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND); 190 189 irqchip_init(); 191 190 } 192 191
+2 -2
arch/avr32/mach-at32ap/extint.c
··· 128 128 129 129 irqd_set_trigger_type(d, flow_type); 130 130 if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 131 - __irq_set_handler_locked(irq, handle_level_irq); 131 + irq_set_handler_locked(d, handle_level_irq); 132 132 else 133 - __irq_set_handler_locked(irq, handle_edge_irq); 133 + irq_set_handler_locked(d, handle_edge_irq); 134 134 135 135 return IRQ_SET_MASK_OK_NOCOPY; 136 136 }
+3 -3
arch/avr32/mach-at32ap/pio.c
··· 286 286 struct pio_device *pio = irq_desc_get_chip_data(desc); 287 287 unsigned gpio_irq; 288 288 289 - gpio_irq = (unsigned) irq_get_handler_data(irq); 289 + gpio_irq = (unsigned) irq_desc_get_handler_data(desc); 290 290 for (;;) { 291 291 u32 isr; 292 292 ··· 312 312 unsigned i; 313 313 314 314 irq_set_chip_data(irq, pio); 315 - irq_set_handler_data(irq, (void *)gpio_irq); 316 315 317 316 for (i = 0; i < 32; i++, gpio_irq++) { 318 317 irq_set_chip_data(gpio_irq, pio); ··· 319 320 handle_simple_irq); 320 321 } 321 322 322 - irq_set_chained_handler(irq, gpio_irq_handler); 323 + irq_set_chained_handler_and_data(irq, gpio_irq_handler, 324 + (void *)gpio_irq); 323 325 } 324 326 325 327 /*--------------------------------------------------------------------------*/
+3 -1
arch/blackfin/mach-bf537/ints-priority.c
··· 182 182 .irq_unmask = bf537_mac_rx_unmask_irq, 183 183 }; 184 184 185 - static void bf537_demux_mac_rx_irq(unsigned int int_irq, 185 + static void bf537_demux_mac_rx_irq(unsigned int __int_irq, 186 186 struct irq_desc *desc) 187 187 { 188 + unsigned int int_irq = irq_desc_get_irq(desc); 189 + 188 190 if (bfin_read_DMA1_IRQ_STATUS() & (DMA_DONE | DMA_ERR)) 189 191 bfin_handle_irq(IRQ_MAC_RX); 190 192 else
+8 -7
arch/blackfin/mach-common/ints-priority.c
··· 194 194 #ifdef CONFIG_SMP 195 195 static void bfin_internal_unmask_irq_chip(struct irq_data *d) 196 196 { 197 - bfin_internal_unmask_irq_affinity(d->irq, d->affinity); 197 + bfin_internal_unmask_irq_affinity(d->irq, 198 + irq_data_get_affinity_mask(d)); 198 199 } 199 200 200 201 static int bfin_internal_set_affinity(struct irq_data *d, ··· 686 685 } 687 686 #endif 688 687 689 - static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 688 + static inline void bfin_set_irq_handler(struct irq_data *d, irq_flow_handler_t handle) 690 689 { 691 690 #ifdef CONFIG_IPIPE 692 691 handle = handle_level_irq; 693 692 #endif 694 - __irq_set_handler_locked(irq, handle); 693 + irq_set_handler_locked(d, handle); 695 694 } 696 695 697 696 #ifdef CONFIG_GPIO_ADI ··· 803 802 } 804 803 805 804 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 806 - bfin_set_irq_handler(irq, handle_edge_irq); 805 + bfin_set_irq_handler(d, handle_edge_irq); 807 806 else 808 - bfin_set_irq_handler(irq, handle_level_irq); 807 + bfin_set_irq_handler(d, handle_level_irq); 809 808 810 809 return 0; 811 810 } ··· 825 824 } 826 825 } 827 826 828 - void bfin_demux_gpio_irq(unsigned int inta_irq, 829 - struct irq_desc *desc) 827 + void bfin_demux_gpio_irq(unsigned int __inta_irq, struct irq_desc *desc) 830 828 { 829 + unsigned int inta_irq = irq_desc_get_irq(desc); 831 830 unsigned int irq; 832 831 833 832 switch (inta_irq) {
+4 -3
arch/c6x/platforms/megamod-pic.c
··· 93 93 .irq_unmask = unmask_megamod, 94 94 }; 95 95 96 - static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc) 96 + static void megamod_irq_cascade(unsigned int __irq, struct irq_desc *desc) 97 97 { 98 98 struct megamod_cascade_data *cascade; 99 99 struct megamod_pic *pic; 100 + unsigned int irq; 100 101 u32 events; 101 102 int n, idx; 102 103 ··· 283 282 soc_writel(~0, &pic->regs->evtmask[i]); 284 283 soc_writel(~0, &pic->regs->evtclr[i]); 285 284 286 - irq_set_handler_data(irq, &cascade_data[i]); 287 - irq_set_chained_handler(irq, megamod_irq_cascade); 285 + irq_set_chained_handler_and_data(irq, megamod_irq_cascade, 286 + &cascade_data[i]); 288 287 } 289 288 290 289 /* Finally, set up the MUX registers */
+4 -4
arch/ia64/kernel/iosapic.c
··· 610 610 chip->name, irq_type->name); 611 611 chip = irq_type; 612 612 } 613 - __irq_set_chip_handler_name_locked(irq, chip, trigger == IOSAPIC_EDGE ? 614 - handle_edge_irq : handle_level_irq, 615 - NULL); 613 + irq_set_chip_handler_name_locked(irq_get_irq_data(irq), chip, 614 + trigger == IOSAPIC_EDGE ? handle_edge_irq : handle_level_irq, 615 + NULL); 616 616 return 0; 617 617 } 618 618 ··· 838 838 if (iosapic_intr_info[irq].count == 0) { 839 839 #ifdef CONFIG_SMP 840 840 /* Clear affinity */ 841 - cpumask_setall(irq_get_irq_data(irq)->affinity); 841 + cpumask_setall(irq_get_affinity_mask(irq)); 842 842 #endif 843 843 /* Clear the interrupt information */ 844 844 iosapic_intr_info[irq].dest = 0;
+3 -3
arch/ia64/kernel/irq.c
··· 67 67 void set_irq_affinity_info (unsigned int irq, int hwid, int redir) 68 68 { 69 69 if (irq < NR_IRQS) { 70 - cpumask_copy(irq_get_irq_data(irq)->affinity, 70 + cpumask_copy(irq_get_affinity_mask(irq), 71 71 cpumask_of(cpu_logical_id(hwid))); 72 72 irq_redir[irq] = (char) (redir & 0xff); 73 73 } ··· 119 119 if (irqd_is_per_cpu(data)) 120 120 continue; 121 121 122 - if (cpumask_any_and(data->affinity, cpu_online_mask) 123 - >= nr_cpu_ids) { 122 + if (cpumask_any_and(irq_data_get_affinity_mask(data), 123 + cpu_online_mask) >= nr_cpu_ids) { 124 124 /* 125 125 * Save it for phase 2 processing 126 126 */
+3 -3
arch/ia64/kernel/msi_ia64.c
··· 23 23 if (irq_prepare_move(irq, cpu)) 24 24 return -1; 25 25 26 - __get_cached_msi_msg(idata->msi_desc, &msg); 26 + __get_cached_msi_msg(irq_data_get_msi_desc(idata), &msg); 27 27 28 28 addr = msg.address_lo; 29 29 addr &= MSI_ADDR_DEST_ID_MASK; ··· 36 36 msg.data = data; 37 37 38 38 pci_write_msi_msg(irq, &msg); 39 - cpumask_copy(idata->affinity, cpumask_of(cpu)); 39 + cpumask_copy(irq_data_get_affinity_mask(idata), cpumask_of(cpu)); 40 40 41 41 return 0; 42 42 } ··· 148 148 msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu)); 149 149 150 150 dmar_msi_write(irq, &msg); 151 - cpumask_copy(data->affinity, mask); 151 + cpumask_copy(irq_data_get_affinity_mask(data), mask); 152 152 153 153 return 0; 154 154 }
+2 -2
arch/ia64/sn/kernel/msi_sn.c
··· 175 175 * Release XIO resources for the old MSI PCI address 176 176 */ 177 177 178 - __get_cached_msi_msg(data->msi_desc, &msg); 178 + __get_cached_msi_msg(irq_data_get_msi_desc(data), &msg); 179 179 sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; 180 180 pdev = sn_pdev->pdi_linux_pcidev; 181 181 provider = SN_PCIDEV_BUSPROVIDER(pdev); ··· 206 206 msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); 207 207 208 208 pci_write_msi_msg(irq, &msg); 209 - cpumask_copy(data->affinity, cpu_mask); 209 + cpumask_copy(irq_data_get_affinity_mask(data), cpu_mask); 210 210 211 211 return 0; 212 212 }
+3 -1
arch/m68k/coldfire/intc-5272.c
··· 143 143 * We need to be careful with the masking/acking due to the side effects 144 144 * of masking an interrupt. 145 145 */ 146 - static void intc_external_irq(unsigned int irq, struct irq_desc *desc) 146 + static void intc_external_irq(unsigned int __irq, struct irq_desc *desc) 147 147 { 148 + unsigned int irq = irq_desc_get_irq(desc); 149 + 148 150 irq_desc_get_chip(desc)->irq_ack(&desc->irq_data); 149 151 handle_simple_irq(irq, desc); 150 152 }
+4 -2
arch/m68k/mac/oss.c
··· 63 63 * Handle miscellaneous OSS interrupts. 64 64 */ 65 65 66 - static void oss_irq(unsigned int irq, struct irq_desc *desc) 66 + static void oss_irq(unsigned int __irq, struct irq_desc *desc) 67 67 { 68 68 int events = oss->irq_pending & 69 - (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); 69 + (OSS_IP_IOPSCC | OSS_IP_SCSI | OSS_IP_IOPISM); 70 70 71 71 #ifdef DEBUG_IRQS 72 72 if ((console_loglevel == 10) && !(events & OSS_IP_SCSI)) { 73 + unsigned int irq = irq_desc_get_irq(desc); 74 + 73 75 printk("oss_irq: irq %u events = 0x%04X\n", irq, 74 76 (int) oss->irq_pending); 75 77 }
+2 -1
arch/m68k/mac/psc.c
··· 113 113 * PSC interrupt handler. It's a lot like the VIA interrupt handler. 114 114 */ 115 115 116 - static void psc_irq(unsigned int irq, struct irq_desc *desc) 116 + static void psc_irq(unsigned int __irq, struct irq_desc *desc) 117 117 { 118 118 unsigned int offset = (unsigned int)irq_desc_get_handler_data(desc); 119 + unsigned int irq = irq_desc_get_irq(desc); 119 120 int pIFR = pIFRbase + offset; 120 121 int pIER = pIERbase + offset; 121 122 int irq_num;
+1 -2
arch/microblaze/kernel/intc.c
··· 11 11 12 12 #include <linux/irqdomain.h> 13 13 #include <linux/irq.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/of_address.h> 15 16 #include <linux/io.h> 16 17 #include <linux/bug.h> 17 - 18 - #include "../../drivers/irqchip/irqchip.h" 19 18 20 19 static void __iomem *intc_baseaddr; 21 20
-4
arch/mips/Kconfig
··· 1071 1071 config SYS_SUPPORTS_HOTPLUG_CPU 1072 1072 bool 1073 1073 1074 - config I8259 1075 - bool 1076 - select IRQ_DOMAIN 1077 - 1078 1074 config MIPS_BONITO64 1079 1075 bool 1080 1076
-1
arch/mips/ath79/irq.c
··· 17 17 #include <linux/interrupt.h> 18 18 #include <linux/irqchip.h> 19 19 #include <linux/of_irq.h> 20 - #include "../../../drivers/irqchip/irqchip.h" 21 20 22 21 #include <asm/irq_cpu.h> 23 22 #include <asm/mipsregs.h>
+1 -1
arch/mips/bmips/irq.c
··· 34 34 irqchip_init(); 35 35 } 36 36 37 - OF_DECLARE_2(irqchip, mips_cpu_intc, "mti,cpu-interrupt-controller", 37 + IRQCHIP_DECLARE(mips_cpu_intc, "mti,cpu-interrupt-controller", 38 38 mips_cpu_irq_of_init);
-1
arch/mips/kernel/Makefile
··· 61 61 obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o 62 62 obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o 63 63 64 - obj-$(CONFIG_I8259) += i8259.o 65 64 obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o 66 65 obj-$(CONFIG_MIPS_MSC) += irq-msc01.o 67 66 obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+4 -4
arch/mips/kernel/i8259.c drivers/irqchip/irq-i8259.c
··· 12 12 #include <linux/init.h> 13 13 #include <linux/ioport.h> 14 14 #include <linux/interrupt.h> 15 + #include <linux/irqchip.h> 15 16 #include <linux/irqdomain.h> 16 17 #include <linux/kernel.h> 17 18 #include <linux/of_irq.h> ··· 22 21 23 22 #include <asm/i8259.h> 24 23 #include <asm/io.h> 25 - 26 - #include "../../drivers/irqchip/irqchip.h" 27 24 28 25 /* 29 26 * This is the 'legacy' 8259A Programmable Interrupt Controller, ··· 352 353 __init_i8259_irqs(NULL); 353 354 } 354 355 355 - static void i8259_irq_dispatch(unsigned int irq, struct irq_desc *desc) 356 + static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc) 356 357 { 357 - struct irq_domain *domain = irq_get_handler_data(irq); 358 + struct irq_domain *domain = irq_desc_get_handler_data(desc); 358 359 int hwirq = i8259_irq(); 360 + unsigned int irq; 359 361 360 362 if (hwirq < 0) 361 363 return;
+1 -1
arch/mips/pci/msi-octeon.c
··· 200 200 if (type == PCI_CAP_ID_MSI && nvec > 1) 201 201 return 1; 202 202 203 - list_for_each_entry(entry, &dev->msi_list, list) { 203 + for_each_pci_msi_entry(entry, dev) { 204 204 ret = arch_setup_msi_irq(dev, entry); 205 205 if (ret < 0) 206 206 return ret;
+1 -1
arch/mn10300/kernel/cevt-mn10300.c
··· 116 116 { 117 117 struct irq_data *data; 118 118 data = irq_get_irq_data(cd->irq); 119 - cpumask_copy(data->affinity, cpumask_of(cpu)); 119 + cpumask_copy(irq_data_get_affinity_mask(data), cpumask_of(cpu)); 120 120 iact->flags |= IRQF_NOBALANCING; 121 121 } 122 122 #endif
+7 -6
arch/mn10300/kernel/irq.c
··· 87 87 tmp2 = GxICR(irq); 88 88 89 89 irq_affinity_online[irq] = 90 - cpumask_any_and(d->affinity, cpu_online_mask); 90 + cpumask_any_and(irq_data_get_affinity_mask(d), 91 + cpu_online_mask); 91 92 CROSS_GxICR(irq, irq_affinity_online[irq]) = 92 93 (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT; 93 94 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); ··· 125 124 } else { 126 125 tmp = GxICR(irq); 127 126 128 - irq_affinity_online[irq] = cpumask_any_and(d->affinity, 127 + irq_affinity_online[irq] = cpumask_any_and(irq_data_get_affinity_mask(d), 129 128 cpu_online_mask); 130 129 CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT; 131 130 tmp = CROSS_GxICR(irq, irq_affinity_online[irq]); ··· 317 316 self = smp_processor_id(); 318 317 for (irq = 0; irq < NR_IRQS; irq++) { 319 318 struct irq_data *data = irq_get_irq_data(irq); 319 + struct cpumask *mask = irq_data_get_affinity_mask(data); 320 320 321 321 if (irqd_is_per_cpu(data)) 322 322 continue; 323 323 324 - if (cpumask_test_cpu(self, data->affinity) && 324 + if (cpumask_test_cpu(self, mask) && 325 325 !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) { 326 326 int cpu_id; 327 327 cpu_id = cpumask_first(cpu_online_mask); 328 - cpumask_set_cpu(cpu_id, data->affinity); 328 + cpumask_set_cpu(cpu_id, mask); 329 329 } 330 330 /* We need to operate irq_affinity_online atomically. */ 331 331 arch_local_cli_save(flags); ··· 337 335 GxICR(irq) = x & GxICR_LEVEL; 338 336 tmp = GxICR(irq); 339 337 340 - new = cpumask_any_and(data->affinity, 341 - cpu_online_mask); 338 + new = cpumask_any_and(mask, cpu_online_mask); 342 339 irq_affinity_online[irq] = new; 343 340 344 341 CROSS_GxICR(irq, new) =
+6 -6
arch/parisc/kernel/irq.c
··· 131 131 if (cpu_dest < 0) 132 132 return -1; 133 133 134 - cpumask_copy(d->affinity, dest); 134 + cpumask_copy(irq_data_get_affinity_mask(d), dest); 135 135 136 136 return 0; 137 137 } ··· 339 339 { 340 340 #ifdef CONFIG_SMP 341 341 struct irq_data *d = irq_get_irq_data(irq); 342 - cpumask_copy(d->affinity, cpumask_of(cpu)); 342 + cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu)); 343 343 #endif 344 344 345 345 return per_cpu(cpu_data, cpu).txn_addr; ··· 508 508 unsigned long eirr_val; 509 509 int irq, cpu = smp_processor_id(); 510 510 #ifdef CONFIG_SMP 511 - struct irq_desc *desc; 511 + struct irq_data *irq_data; 512 512 cpumask_t dest; 513 513 #endif 514 514 ··· 522 522 irq = eirr_to_irq(eirr_val); 523 523 524 524 #ifdef CONFIG_SMP 525 - desc = irq_to_desc(irq); 526 - cpumask_copy(&dest, desc->irq_data.affinity); 527 - if (irqd_is_per_cpu(&desc->irq_data) && 525 + irq_data = irq_get_irq_data(irq); 526 + cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data)); 527 + if (irqd_is_per_cpu(irq_data) && 528 528 !cpumask_test_cpu(smp_processor_id(), &dest)) { 529 529 int cpu = cpumask_first(&dest); 530 530
+2 -1
arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
··· 123 123 } 124 124 125 125 static int 126 - cpld_pic_host_match(struct irq_domain *h, struct device_node *node) 126 + cpld_pic_host_match(struct irq_domain *h, struct device_node *node, 127 + enum irq_domain_bus_token bus_token) 127 128 { 128 129 return cpld_pic_node == node; 129 130 }
+3 -3
arch/powerpc/platforms/cell/axon_msi.c
··· 213 213 return -ENODEV; 214 214 } 215 215 216 - entry = list_first_entry(&dev->msi_list, struct msi_desc, list); 216 + entry = first_pci_msi_entry(dev); 217 217 218 218 for (; dn; dn = of_get_next_parent(dn)) { 219 219 if (entry->msi_attrib.is_64) { ··· 269 269 if (rc) 270 270 return rc; 271 271 272 - list_for_each_entry(entry, &dev->msi_list, list) { 272 + for_each_pci_msi_entry(entry, dev) { 273 273 virq = irq_create_direct_mapping(msic->irq_domain); 274 274 if (virq == NO_IRQ) { 275 275 dev_warn(&dev->dev, ··· 292 292 293 293 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); 294 294 295 - list_for_each_entry(entry, &dev->msi_list, list) { 295 + for_each_pci_msi_entry(entry, dev) { 296 296 if (entry->irq == NO_IRQ) 297 297 continue; 298 298
+2 -1
arch/powerpc/platforms/cell/interrupt.c
··· 222 222 #endif /* CONFIG_SMP */ 223 223 224 224 225 - static int iic_host_match(struct irq_domain *h, struct device_node *node) 225 + static int iic_host_match(struct irq_domain *h, struct device_node *node, 226 + enum irq_domain_bus_token bus_token) 226 227 { 227 228 return of_device_is_compatible(node, 228 229 "IBM,CBEA-Internal-Interrupt-Controller");
+2 -1
arch/powerpc/platforms/embedded6xx/flipper-pic.c
··· 108 108 return 0; 109 109 } 110 110 111 - static int flipper_pic_match(struct irq_domain *h, struct device_node *np) 111 + static int flipper_pic_match(struct irq_domain *h, struct device_node *np, 112 + enum irq_domain_bus_token bus_token) 112 113 { 113 114 return 1; 114 115 }
+2 -2
arch/powerpc/platforms/pasemi/msi.c
··· 66 66 67 67 pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev); 68 68 69 - list_for_each_entry(entry, &pdev->msi_list, list) { 69 + for_each_pci_msi_entry(entry, pdev) { 70 70 if (entry->irq == NO_IRQ) 71 71 continue; 72 72 ··· 94 94 msg.address_hi = 0; 95 95 msg.address_lo = PASEMI_MSI_ADDR; 96 96 97 - list_for_each_entry(entry, &pdev->msi_list, list) { 97 + for_each_pci_msi_entry(entry, pdev) { 98 98 /* Allocate 16 interrupts for now, since that's the grouping for 99 99 * affinity. This can be changed later if it turns out 32 is too 100 100 * few MSIs for someone, but restrictions will apply to how the
+2 -1
arch/powerpc/platforms/powermac/pic.c
··· 268 268 .name = "cascade", 269 269 }; 270 270 271 - static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node) 271 + static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, 272 + enum irq_domain_bus_token bus_token) 272 273 { 273 274 /* We match all, we don't always have a node anyway */ 274 275 return 1;
+2 -1
arch/powerpc/platforms/powernv/opal-irqchip.c
··· 134 134 opal_handle_events(be64_to_cpu(last_outstanding_events)); 135 135 } 136 136 137 - static int opal_event_match(struct irq_domain *h, struct device_node *node) 137 + static int opal_event_match(struct irq_domain *h, struct device_node *node, 138 + enum irq_domain_bus_token bus_token) 138 139 { 139 140 return h->of_node == node; 140 141 }
+2 -2
arch/powerpc/platforms/powernv/pci.c
··· 61 61 if (pdev->no_64bit_msi && !phb->msi32_support) 62 62 return -ENODEV; 63 63 64 - list_for_each_entry(entry, &pdev->msi_list, list) { 64 + for_each_pci_msi_entry(entry, pdev) { 65 65 if (!entry->msi_attrib.is_64 && !phb->msi32_support) { 66 66 pr_warn("%s: Supports only 64-bit MSIs\n", 67 67 pci_name(pdev)); ··· 103 103 if (WARN_ON(!phb)) 104 104 return; 105 105 106 - list_for_each_entry(entry, &pdev->msi_list, list) { 106 + for_each_pci_msi_entry(entry, pdev) { 107 107 if (entry->irq == NO_IRQ) 108 108 continue; 109 109 irq_set_msi_desc(entry->irq, NULL);
+2 -1
arch/powerpc/platforms/ps3/interrupt.c
··· 678 678 return 0; 679 679 } 680 680 681 - static int ps3_host_match(struct irq_domain *h, struct device_node *np) 681 + static int ps3_host_match(struct irq_domain *h, struct device_node *np, 682 + enum irq_domain_bus_token bus_token) 682 683 { 683 684 /* Match all */ 684 685 return 1;
+3 -3
arch/powerpc/platforms/pseries/msi.c
··· 118 118 { 119 119 struct msi_desc *entry; 120 120 121 - list_for_each_entry(entry, &pdev->msi_list, list) { 121 + for_each_pci_msi_entry(entry, pdev) { 122 122 if (entry->irq == NO_IRQ) 123 123 continue; 124 124 ··· 350 350 * So we must reject such requests. */ 351 351 352 352 expected = 0; 353 - list_for_each_entry(entry, &pdev->msi_list, list) { 353 + for_each_pci_msi_entry(entry, pdev) { 354 354 if (entry->msi_attrib.entry_nr != expected) { 355 355 pr_debug("rtas_msi: bad MSI-X entries.\n"); 356 356 return -EINVAL; ··· 462 462 } 463 463 464 464 i = 0; 465 - list_for_each_entry(entry, &pdev->msi_list, list) { 465 + for_each_pci_msi_entry(entry, pdev) { 466 466 hwirq = rtas_query_irq_number(pdn, i++); 467 467 if (hwirq < 0) { 468 468 pr_debug("rtas_msi: error (%d) getting hwirq\n", rc);
+2 -1
arch/powerpc/sysdev/ehv_pic.c
··· 177 177 return irq_linear_revmap(global_ehv_pic->irqhost, irq); 178 178 } 179 179 180 - static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node) 180 + static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, 181 + enum irq_domain_bus_token bus_token) 181 182 { 182 183 /* Exact match, unless ehv_pic node is NULL */ 183 184 return h->of_node == NULL || h->of_node == node;
+2 -2
arch/powerpc/sysdev/fsl_msi.c
··· 129 129 struct msi_desc *entry; 130 130 struct fsl_msi *msi_data; 131 131 132 - list_for_each_entry(entry, &pdev->msi_list, list) { 132 + for_each_pci_msi_entry(entry, pdev) { 133 133 if (entry->irq == NO_IRQ) 134 134 continue; 135 135 msi_data = irq_get_chip_data(entry->irq); ··· 219 219 } 220 220 } 221 221 222 - list_for_each_entry(entry, &pdev->msi_list, list) { 222 + for_each_pci_msi_entry(entry, pdev) { 223 223 /* 224 224 * Loop over all the MSI devices until we find one that has an 225 225 * available interrupt.
+2 -1
arch/powerpc/sysdev/i8259.c
··· 162 162 .flags = IORESOURCE_BUSY, 163 163 }; 164 164 165 - static int i8259_host_match(struct irq_domain *h, struct device_node *node) 165 + static int i8259_host_match(struct irq_domain *h, struct device_node *node, 166 + enum irq_domain_bus_token bus_token) 166 167 { 167 168 return h->of_node == NULL || h->of_node == node; 168 169 }
+2 -1
arch/powerpc/sysdev/ipic.c
··· 671 671 .irq_set_type = ipic_set_irq_type, 672 672 }; 673 673 674 - static int ipic_host_match(struct irq_domain *h, struct device_node *node) 674 + static int ipic_host_match(struct irq_domain *h, struct device_node *node, 675 + enum irq_domain_bus_token bus_token) 675 676 { 676 677 /* Exact match, unless ipic node is NULL */ 677 678 return h->of_node == NULL || h->of_node == node;
+2 -1
arch/powerpc/sysdev/mpic.c
··· 1007 1007 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 1008 1008 1009 1009 1010 - static int mpic_host_match(struct irq_domain *h, struct device_node *node) 1010 + static int mpic_host_match(struct irq_domain *h, struct device_node *node, 1011 + enum irq_domain_bus_token bus_token) 1011 1012 { 1012 1013 /* Exact match, unless mpic node is NULL */ 1013 1014 return h->of_node == NULL || h->of_node == node;
+2 -2
arch/powerpc/sysdev/mpic_u3msi.c
··· 108 108 { 109 109 struct msi_desc *entry; 110 110 111 - list_for_each_entry(entry, &pdev->msi_list, list) { 111 + for_each_pci_msi_entry(entry, pdev) { 112 112 if (entry->irq == NO_IRQ) 113 113 continue; 114 114 ··· 140 140 return -ENXIO; 141 141 } 142 142 143 - list_for_each_entry(entry, &pdev->msi_list, list) { 143 + for_each_pci_msi_entry(entry, pdev) { 144 144 hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); 145 145 if (hwirq < 0) { 146 146 pr_debug("u3msi: failed allocating hwirq\n");
+2 -2
arch/powerpc/sysdev/ppc4xx_hsta_msi.c
··· 51 51 return -EINVAL; 52 52 } 53 53 54 - list_for_each_entry(entry, &dev->msi_list, list) { 54 + for_each_pci_msi_entry(entry, dev) { 55 55 irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1); 56 56 if (irq < 0) { 57 57 pr_debug("%s: Failed to allocate msi interrupt\n", ··· 109 109 struct msi_desc *entry; 110 110 int irq; 111 111 112 - list_for_each_entry(entry, &dev->msi_list, list) { 112 + for_each_pci_msi_entry(entry, dev) { 113 113 if (entry->irq == NO_IRQ) 114 114 continue; 115 115
+2 -2
arch/powerpc/sysdev/ppc4xx_msi.c
··· 93 93 if (!msi_data->msi_virqs) 94 94 return -ENOMEM; 95 95 96 - list_for_each_entry(entry, &dev->msi_list, list) { 96 + for_each_pci_msi_entry(entry, dev) { 97 97 int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); 98 98 if (int_no >= 0) 99 99 break; ··· 127 127 128 128 dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); 129 129 130 - list_for_each_entry(entry, &dev->msi_list, list) { 130 + for_each_pci_msi_entry(entry, dev) { 131 131 if (entry->irq == NO_IRQ) 132 132 continue; 133 133 irq_set_msi_desc(entry->irq, NULL);
+2 -1
arch/powerpc/sysdev/qe_lib/qe_ic.c
··· 244 244 .irq_mask_ack = qe_ic_mask_irq, 245 245 }; 246 246 247 - static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) 247 + static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, 248 + enum irq_domain_bus_token bus_token) 248 249 { 249 250 /* Exact match, unless qe_ic node is NULL */ 250 251 return h->of_node == NULL || h->of_node == node;
+1 -1
arch/powerpc/sysdev/xics/ics-opal.c
··· 72 72 * card, using the MSI mask bits. Firmware doesn't appear to unmask 73 73 * at that level, so we do it here by hand. 74 74 */ 75 - if (d->msi_desc) 75 + if (irq_data_get_msi_desc(d)) 76 76 pci_msi_unmask_irq(d); 77 77 #endif 78 78
+1 -1
arch/powerpc/sysdev/xics/ics-rtas.c
··· 75 75 * card, using the MSI mask bits. Firmware doesn't appear to unmask 76 76 * at that level, so we do it here by hand. 77 77 */ 78 - if (d->msi_desc) 78 + if (irq_data_get_msi_desc(d)) 79 79 pci_msi_unmask_irq(d); 80 80 #endif 81 81 /* unmask it */
+2 -1
arch/powerpc/sysdev/xics/xics-common.c
··· 298 298 } 299 299 #endif /* CONFIG_SMP */ 300 300 301 - static int xics_host_match(struct irq_domain *h, struct device_node *node) 301 + static int xics_host_match(struct irq_domain *h, struct device_node *node, 302 + enum irq_domain_bus_token bus_token) 302 303 { 303 304 struct ics *ics; 304 305
+3 -3
arch/s390/pci/pci.c
··· 409 409 410 410 /* Request MSI interrupts */ 411 411 hwirq = 0; 412 - list_for_each_entry(msi, &pdev->msi_list, list) { 412 + for_each_pci_msi_entry(msi, pdev) { 413 413 rc = -EIO; 414 414 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ 415 415 if (irq < 0) ··· 435 435 return (msi_vecs == nvec) ? 0 : msi_vecs; 436 436 437 437 out_msi: 438 - list_for_each_entry(msi, &pdev->msi_list, list) { 438 + for_each_pci_msi_entry(msi, pdev) { 439 439 if (hwirq-- == 0) 440 440 break; 441 441 irq_set_msi_desc(msi->irq, NULL); ··· 465 465 return; 466 466 467 467 /* Release MSI interrupts */ 468 - list_for_each_entry(msi, &pdev->msi_list, list) { 468 + for_each_pci_msi_entry(msi, pdev) { 469 469 if (msi->msi_attrib.is_msix) 470 470 __pci_msix_desc_mask_irq(msi, 1); 471 471 else
+1 -1
arch/sh/boards/mach-se/7343/irq.c
··· 31 31 32 32 static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc) 33 33 { 34 - struct irq_data *data = irq_get_irq_data(irq); 34 + struct irq_data *data = irq_desc_get_irq_data(desc); 35 35 struct irq_chip *chip = irq_data_get_irq_chip(data); 36 36 unsigned long mask; 37 37 int bit;
+1 -1
arch/sh/boards/mach-se/7722/irq.c
··· 30 30 31 31 static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc) 32 32 { 33 - struct irq_data *data = irq_get_irq_data(irq); 33 + struct irq_data *data = irq_desc_get_irq_data(desc); 34 34 struct irq_chip *chip = irq_data_get_irq_chip(data); 35 35 unsigned long mask; 36 36 int bit;
+2 -1
arch/sh/boards/mach-se/7724/irq.c
··· 92 92 .irq_unmask = enable_se7724_irq, 93 93 }; 94 94 95 - static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc) 95 + static void se7724_irq_demux(unsigned int __irq, struct irq_desc *desc) 96 96 { 97 + unsigned int irq = irq_desc_get_irq(desc); 97 98 struct fpga_irq set = get_fpga_irq(irq); 98 99 unsigned short intv = __raw_readw(set.sraddr); 99 100 unsigned int ext_irq = set.base;
+1 -1
arch/sh/boards/mach-x3proto/gpio.c
··· 62 62 63 63 static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 64 64 { 65 - struct irq_data *data = irq_get_irq_data(irq); 65 + struct irq_data *data = irq_desc_get_irq_data(desc); 66 66 struct irq_chip *chip = irq_data_get_irq_chip(data); 67 67 unsigned long mask; 68 68 int pin;
+5 -4
arch/sh/kernel/irq.c
··· 227 227 for_each_active_irq(irq) { 228 228 struct irq_data *data = irq_get_irq_data(irq); 229 229 230 - if (data->node == cpu) { 231 - unsigned int newcpu = cpumask_any_and(data->affinity, 230 + if (irq_data_get_node(data) == cpu) { 231 + struct cpumask *mask = irq_data_get_affinity_mask(data); 232 + unsigned int newcpu = cpumask_any_and(mask, 232 233 cpu_online_mask); 233 234 if (newcpu >= nr_cpu_ids) { 234 235 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", 235 236 irq, cpu); 236 237 237 - cpumask_setall(data->affinity); 238 + cpumask_setall(mask); 238 239 } 239 - irq_set_affinity(irq, data->affinity); 240 + irq_set_affinity(irq, mask); 240 241 } 241 242 } 242 243 }
+16 -11
arch/sparc/kernel/irq_64.c
··· 210 210 211 211 static inline unsigned int irq_data_to_handle(struct irq_data *data) 212 212 { 213 - struct irq_handler_data *ihd = data->handler_data; 213 + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); 214 214 215 215 return ihd->dev_handle; 216 216 } 217 217 218 218 static inline unsigned int irq_data_to_ino(struct irq_data *data) 219 219 { 220 - struct irq_handler_data *ihd = data->handler_data; 220 + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); 221 221 222 222 return ihd->dev_ino; 223 223 } 224 224 225 225 static inline unsigned long irq_data_to_sysino(struct irq_data *data) 226 226 { 227 - struct irq_handler_data *ihd = data->handler_data; 227 + struct irq_handler_data *ihd = irq_data_get_irq_handler_data(data); 228 228 229 229 return ihd->sysino; 230 230 } ··· 370 370 371 371 static void sun4u_irq_enable(struct irq_data *data) 372 372 { 373 - struct irq_handler_data *handler_data = data->handler_data; 373 + struct irq_handler_data *handler_data; 374 374 375 + handler_data = irq_data_get_irq_handler_data(data); 375 376 if (likely(handler_data)) { 376 377 unsigned long cpuid, imap, val; 377 378 unsigned int tid; 378 379 379 - cpuid = irq_choose_cpu(data->irq, data->affinity); 380 + cpuid = irq_choose_cpu(data->irq, 381 + irq_data_get_affinity_mask(data)); 380 382 imap = handler_data->imap; 381 383 382 384 tid = sun4u_compute_tid(imap, cpuid); ··· 395 393 static int sun4u_set_affinity(struct irq_data *data, 396 394 const struct cpumask *mask, bool force) 397 395 { 398 - struct irq_handler_data *handler_data = data->handler_data; 396 + struct irq_handler_data *handler_data; 399 397 398 + handler_data = irq_data_get_irq_handler_data(data); 400 399 if (likely(handler_data)) { 401 400 unsigned long cpuid, imap, val; 402 401 unsigned int tid; ··· 441 438 442 439 static void sun4u_irq_eoi(struct irq_data *data) 443 440 { 444 - struct irq_handler_data *handler_data = data->handler_data; 441 + struct irq_handler_data *handler_data; 445 442 443 + handler_data = irq_data_get_irq_handler_data(data); 446 444 if (likely(handler_data)) 447 445 upa_writeq(ICLR_IDLE, handler_data->iclr); 448 446 } 449 447 450 448 static void sun4v_irq_enable(struct irq_data *data) 451 449 { 452 - unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); 450 + unsigned long cpuid = irq_choose_cpu(data->irq, 451 + irq_data_get_affinity_mask(data)); 453 452 unsigned int ino = irq_data_to_sysino(data); 454 453 int err; 455 454 ··· 513 508 unsigned long cpuid; 514 509 int err; 515 510 516 - cpuid = irq_choose_cpu(data->irq, data->affinity); 511 + cpuid = irq_choose_cpu(data->irq, irq_data_get_affinity_mask(data)); 517 512 518 513 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); 519 514 if (err != HV_EOK) ··· 886 881 if (desc->action && !irqd_is_per_cpu(data)) { 887 882 if (data->chip->irq_set_affinity) 888 883 data->chip->irq_set_affinity(data, 889 - data->affinity, 890 - false); 884 + irq_data_get_affinity_mask(data), 885 + false); 891 886 } 892 887 raw_spin_unlock_irqrestore(&desc->lock, flags); 893 888 }
+3 -3
arch/sparc/kernel/leon_kernel.c
··· 126 126 int oldcpu, newcpu; 127 127 128 128 mask = (unsigned long)data->chip_data; 129 - oldcpu = irq_choose_cpu(data->affinity); 129 + oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); 130 130 newcpu = irq_choose_cpu(dest); 131 131 132 132 if (oldcpu == newcpu) ··· 149 149 int cpu; 150 150 151 151 mask = (unsigned long)data->chip_data; 152 - cpu = irq_choose_cpu(data->affinity); 152 + cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); 153 153 spin_lock_irqsave(&leon_irq_lock, flags); 154 154 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); 155 155 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask)); ··· 162 162 int cpu; 163 163 164 164 mask = (unsigned long)data->chip_data; 165 - cpu = irq_choose_cpu(data->affinity); 165 + cpu = irq_choose_cpu(irq_data_get_affinity_mask(data)); 166 166 spin_lock_irqsave(&leon_irq_lock, flags); 167 167 oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); 168 168 LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
+1 -1
arch/sparc/kernel/pci.c
··· 914 914 void arch_teardown_msi_irq(unsigned int irq) 915 915 { 916 916 struct msi_desc *entry = irq_get_msi_desc(irq); 917 - struct pci_dev *pdev = entry->dev; 917 + struct pci_dev *pdev = msi_desc_to_pci_dev(entry); 918 918 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; 919 919 920 920 if (pbm->teardown_msi_irq)
+2 -2
arch/sparc/kernel/sun4d_irq.c
··· 188 188 189 189 static void sun4d_mask_irq(struct irq_data *data) 190 190 { 191 - struct sun4d_handler_data *handler_data = data->handler_data; 191 + struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data); 192 192 unsigned int real_irq; 193 193 #ifdef CONFIG_SMP 194 194 int cpuid = handler_data->cpuid; ··· 206 206 207 207 static void sun4d_unmask_irq(struct irq_data *data) 208 208 { 209 - struct sun4d_handler_data *handler_data = data->handler_data; 209 + struct sun4d_handler_data *handler_data = irq_data_get_irq_handler_data(data); 210 210 unsigned int real_irq; 211 211 #ifdef CONFIG_SMP 212 212 int cpuid = handler_data->cpuid;
+4 -2
arch/sparc/kernel/sun4m_irq.c
··· 188 188 189 189 static void sun4m_mask_irq(struct irq_data *data) 190 190 { 191 - struct sun4m_handler_data *handler_data = data->handler_data; 191 + struct sun4m_handler_data *handler_data; 192 192 int cpu = smp_processor_id(); 193 193 194 + handler_data = irq_data_get_irq_handler_data(data); 194 195 if (handler_data->mask) { 195 196 unsigned long flags; 196 197 ··· 207 206 208 207 static void sun4m_unmask_irq(struct irq_data *data) 209 208 { 210 - struct sun4m_handler_data *handler_data = data->handler_data; 209 + struct sun4m_handler_data *handler_data; 211 210 int cpu = smp_processor_id(); 212 211 212 + handler_data = irq_data_get_irq_handler_data(data); 213 213 if (handler_data->mask) { 214 214 unsigned long flags; 215 215
+3 -2
arch/tile/kernel/pci_gx.c
··· 304 304 * to Linux which just calls handle_level_irq() after clearing the 305 305 * MAC INTx Assert status bit associated with this interrupt. 306 306 */ 307 - static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) 307 + static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc) 308 308 { 309 309 struct pci_controller *controller = irq_desc_get_handler_data(desc); 310 310 gxio_trio_context_t *trio_context = controller->trio; 311 311 uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); 312 + unsigned int irq = irq_desc_get_irq(desc); 312 313 int mac = controller->mac; 313 314 unsigned int reg_offset; 314 315 uint64_t level_mask; ··· 1443 1442 /* MSI support starts here. */ 1444 1443 static unsigned int tilegx_msi_startup(struct irq_data *d) 1445 1444 { 1446 - if (d->msi_desc) 1445 + if (irq_data_get_msi_desc(d)) 1447 1446 pci_msi_unmask_irq(d); 1448 1447 1449 1448 return 0;
+2 -3
arch/unicore32/kernel/irq.c
··· 112 112 * irq_controller_lock held, and IRQs disabled. Decode the IRQ 113 113 * and call the handler. 114 114 */ 115 - static void 116 - puv3_gpio_handler(unsigned int irq, struct irq_desc *desc) 115 + static void puv3_gpio_handler(unsigned int __irq, struct irq_desc *desc) 117 116 { 118 - unsigned int mask; 117 + unsigned int mask, irq; 119 118 120 119 mask = readl(GPIO_GEDR); 121 120 do {
+4 -4
arch/x86/pci/xen.c
··· 179 179 if (ret) 180 180 goto error; 181 181 i = 0; 182 - list_for_each_entry(msidesc, &dev->msi_list, list) { 182 + for_each_pci_msi_entry(msidesc, dev) { 183 183 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 184 184 (type == PCI_CAP_ID_MSI) ? nvec : 1, 185 185 (type == PCI_CAP_ID_MSIX) ? ··· 230 230 if (type == PCI_CAP_ID_MSI && nvec > 1) 231 231 return 1; 232 232 233 - list_for_each_entry(msidesc, &dev->msi_list, list) { 233 + for_each_pci_msi_entry(msidesc, dev) { 234 234 __pci_read_msi_msg(msidesc, &msg); 235 235 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | 236 236 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); ··· 274 274 int ret = 0; 275 275 struct msi_desc *msidesc; 276 276 277 - list_for_each_entry(msidesc, &dev->msi_list, list) { 277 + for_each_pci_msi_entry(msidesc, dev) { 278 278 struct physdev_map_pirq map_irq; 279 279 domid_t domid; 280 280 ··· 386 386 { 387 387 struct msi_desc *msidesc; 388 388 389 - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); 389 + msidesc = first_pci_msi_entry(dev); 390 390 if (msidesc->msi_attrib.is_msix) 391 391 xen_pci_frontend_disable_msix(dev); 392 392 else
+6 -4
arch/xtensa/kernel/irq.c
··· 177 177 178 178 for_each_active_irq(i) { 179 179 struct irq_data *data = irq_get_irq_data(i); 180 + struct cpumask *mask; 180 181 unsigned int newcpu; 181 182 182 183 if (irqd_is_per_cpu(data)) 183 184 continue; 184 185 185 - if (!cpumask_test_cpu(cpu, data->affinity)) 186 + mask = irq_data_get_affinity_mask(data); 187 + if (!cpumask_test_cpu(cpu, mask)) 186 188 continue; 187 189 188 - newcpu = cpumask_any_and(data->affinity, cpu_online_mask); 190 + newcpu = cpumask_any_and(mask, cpu_online_mask); 189 191 190 192 if (newcpu >= nr_cpu_ids) { 191 193 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n", 192 194 i, cpu); 193 195 194 - cpumask_setall(data->affinity); 196 + cpumask_setall(mask); 195 197 } 196 - irq_set_affinity(i, data->affinity); 198 + irq_set_affinity(i, mask); 197 199 } 198 200 } 199 201 #endif /* CONFIG_HOTPLUG_CPU */
+1
drivers/base/Makefile
··· 22 22 obj-$(CONFIG_SOC_BUS) += soc.o 23 23 obj-$(CONFIG_PINCTRL) += pinctrl.o 24 24 obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o 25 + obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o 25 26 26 27 ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 27 28
+3
drivers/base/core.c
··· 708 708 INIT_LIST_HEAD(&dev->devres_head); 709 709 device_pm_init(dev); 710 710 set_dev_node(dev, -1); 711 + #ifdef CONFIG_GENERIC_MSI_IRQ 712 + INIT_LIST_HEAD(&dev->msi_list); 713 + #endif 711 714 } 712 715 EXPORT_SYMBOL_GPL(device_initialize); 713 716
+282
drivers/base/platform-msi.c
··· 1 + /* 2 + * MSI framework for platform devices 3 + * 4 + * Copyright (C) 2015 ARM Limited, All Rights Reserved. 5 + * Author: Marc Zyngier <marc.zyngier@arm.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #include <linux/device.h> 21 + #include <linux/idr.h> 22 + #include <linux/irq.h> 23 + #include <linux/irqdomain.h> 24 + #include <linux/msi.h> 25 + #include <linux/slab.h> 26 + 27 + #define DEV_ID_SHIFT 24 28 + 29 + /* 30 + * Internal data structure containing a (made up, but unique) devid 31 + * and the callback to write the MSI message. 32 + */ 33 + struct platform_msi_priv_data { 34 + irq_write_msi_msg_t write_msg; 35 + int devid; 36 + }; 37 + 38 + /* The devid allocator */ 39 + static DEFINE_IDA(platform_msi_devid_ida); 40 + 41 + #ifdef GENERIC_MSI_DOMAIN_OPS 42 + /* 43 + * Convert an msi_desc to a globaly unique identifier (per-device 44 + * devid + msi_desc position in the msi_list). 45 + */ 46 + static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) 47 + { 48 + u32 devid; 49 + 50 + devid = desc->platform.msi_priv_data->devid; 51 + 52 + return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; 53 + } 54 + 55 + static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) 56 + { 57 + arg->desc = desc; 58 + arg->hwirq = platform_msi_calc_hwirq(desc); 59 + } 60 + 61 + static int platform_msi_init(struct irq_domain *domain, 62 + struct msi_domain_info *info, 63 + unsigned int virq, irq_hw_number_t hwirq, 64 + msi_alloc_info_t *arg) 65 + { 66 + struct irq_data *data; 67 + 68 + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, 69 + info->chip, info->chip_data); 70 + 71 + /* 72 + * Save the MSI descriptor in handler_data so that the 73 + * irq_write_msi_msg callback can retrieve it (and the 74 + * associated device). 75 + */ 76 + data = irq_domain_get_irq_data(domain, virq); 77 + data->handler_data = arg->desc; 78 + 79 + return 0; 80 + } 81 + #else 82 + #define platform_msi_set_desc NULL 83 + #define platform_msi_init NULL 84 + #endif 85 + 86 + static void platform_msi_update_dom_ops(struct msi_domain_info *info) 87 + { 88 + struct msi_domain_ops *ops = info->ops; 89 + 90 + BUG_ON(!ops); 91 + 92 + if (ops->msi_init == NULL) 93 + ops->msi_init = platform_msi_init; 94 + if (ops->set_desc == NULL) 95 + ops->set_desc = platform_msi_set_desc; 96 + } 97 + 98 + static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg) 99 + { 100 + struct msi_desc *desc = irq_data_get_irq_handler_data(data); 101 + struct platform_msi_priv_data *priv_data; 102 + 103 + priv_data = desc->platform.msi_priv_data; 104 + 105 + priv_data->write_msg(desc, msg); 106 + } 107 + 108 + static void platform_msi_update_chip_ops(struct msi_domain_info *info) 109 + { 110 + struct irq_chip *chip = info->chip; 111 + 112 + BUG_ON(!chip); 113 + if (!chip->irq_mask) 114 + chip->irq_mask = irq_chip_mask_parent; 115 + if (!chip->irq_unmask) 116 + chip->irq_unmask = irq_chip_unmask_parent; 117 + if (!chip->irq_eoi) 118 + chip->irq_eoi = irq_chip_eoi_parent; 119 + if (!chip->irq_set_affinity) 120 + chip->irq_set_affinity = msi_domain_set_affinity; 121 + if (!chip->irq_write_msi_msg) 122 + chip->irq_write_msi_msg = platform_msi_write_msg; 123 + } 124 + 125 + static void platform_msi_free_descs(struct device *dev) 126 + { 127 + struct msi_desc *desc, *tmp; 128 + 129 + list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { 130 + list_del(&desc->list); 131 + free_msi_entry(desc); 132 + } 133 + } 134 + 135 + static int platform_msi_alloc_descs(struct device *dev, int nvec, 136 + struct platform_msi_priv_data *data) 137 + 138 + { 139 + int i; 140 + 141 + for (i = 0; i < nvec; i++) { 142 + struct msi_desc *desc; 143 + 144 + desc = alloc_msi_entry(dev); 145 + if (!desc) 146 + break; 147 + 148 + desc->platform.msi_priv_data = data; 149 + desc->platform.msi_index = i; 150 + desc->nvec_used = 1; 151 + 152 + list_add_tail(&desc->list, dev_to_msi_list(dev)); 153 + } 154 + 155 + if (i != nvec) { 156 + /* Clean up the mess */ 157 + platform_msi_free_descs(dev); 158 + 159 + return -ENOMEM; 160 + } 161 + 162 + return 0; 163 + } 164 + 165 + /** 166 + * platform_msi_create_irq_domain - Create a platform MSI interrupt domain 167 + * @np: Optional device-tree node of the interrupt controller 168 + * @info: MSI domain info 169 + * @parent: Parent irq domain 170 + * 171 + * Updates the domain and chip ops and creates a platform MSI 172 + * interrupt domain. 173 + * 174 + * Returns: 175 + * A domain pointer or NULL in case of failure. 176 + */ 177 + struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, 178 + struct msi_domain_info *info, 179 + struct irq_domain *parent) 180 + { 181 + struct irq_domain *domain; 182 + 183 + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 184 + platform_msi_update_dom_ops(info); 185 + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 186 + platform_msi_update_chip_ops(info); 187 + 188 + domain = msi_create_irq_domain(np, info, parent); 189 + if (domain) 190 + domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; 191 + 192 + return domain; 193 + } 194 + 195 + /** 196 + * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev 197 + * @dev: The device for which to allocate interrupts 198 + * @nvec: The number of interrupts to allocate 199 + * @write_msi_msg: Callback to write an interrupt message for @dev 200 + * 201 + * Returns: 202 + * Zero for success, or an error code in case of failure 203 + */ 204 + int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, 205 + irq_write_msi_msg_t write_msi_msg) 206 + { 207 + struct platform_msi_priv_data *priv_data; 208 + int err; 209 + 210 + /* 211 + * Limit the number of interrupts to 256 per device. Should we 212 + * need to bump this up, DEV_ID_SHIFT should be adjusted 213 + * accordingly (which would impact the max number of MSI 214 + * capable devices). 215 + */ 216 + if (!dev->msi_domain || !write_msi_msg || !nvec || 217 + nvec > (1 << (32 - DEV_ID_SHIFT))) 218 + return -EINVAL; 219 + 220 + if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) { 221 + dev_err(dev, "Incompatible msi_domain, giving up\n"); 222 + return -EINVAL; 223 + } 224 + 225 + /* Already had a helping of MSI? Greed... */ 226 + if (!list_empty(dev_to_msi_list(dev))) 227 + return -EBUSY; 228 + 229 + priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL); 230 + if (!priv_data) 231 + return -ENOMEM; 232 + 233 + priv_data->devid = ida_simple_get(&platform_msi_devid_ida, 234 + 0, 1 << DEV_ID_SHIFT, GFP_KERNEL); 235 + if (priv_data->devid < 0) { 236 + err = priv_data->devid; 237 + goto out_free_data; 238 + } 239 + 240 + priv_data->write_msg = write_msi_msg; 241 + 242 + err = platform_msi_alloc_descs(dev, nvec, priv_data); 243 + if (err) 244 + goto out_free_id; 245 + 246 + err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec); 247 + if (err) 248 + goto out_free_desc; 249 + 250 + return 0; 251 + 252 + out_free_desc: 253 + platform_msi_free_descs(dev); 254 + out_free_id: 255 + ida_simple_remove(&platform_msi_devid_ida, priv_data->devid); 256 + out_free_data: 257 + kfree(priv_data); 258 + 259 + return err; 260 + } 261 + 262 + /** 263 + * platform_msi_domain_free_irqs - Free MSI interrupts for @dev 264 + * @dev: The device for which to free interrupts 265 + */ 266 + void platform_msi_domain_free_irqs(struct device *dev) 267 + { 268 + struct msi_desc *desc; 269 + 270 + desc = first_msi_entry(dev); 271 + if (desc) { 272 + struct platform_msi_priv_data *data; 273 + 274 + data = desc->platform.msi_priv_data; 275 + 276 + ida_simple_remove(&platform_msi_devid_ida, data->devid); 277 + kfree(data); 278 + } 279 + 280 + msi_domain_free_irqs(dev->msi_domain, dev); 281 + platform_msi_free_descs(dev); 282 + }
+10
drivers/irqchip/Kconfig
··· 61 61 select MULTI_IRQ_HANDLER 62 62 select SPARSE_IRQ 63 63 64 + config I8259 65 + bool 66 + select IRQ_DOMAIN 67 + 64 68 config BCM7038_L1_IRQ 65 69 bool 66 70 select GENERIC_IRQ_CHIP ··· 181 177 config RENESAS_H8S_INTC 182 178 bool 183 179 select IRQ_DOMAIN 180 + 181 + config IMX_GPCV2 182 + bool 183 + select IRQ_DOMAIN 184 + help 185 + Enables the wakeup IRQs for IMX platforms with GPCv2 block
+4 -1
drivers/irqchip/Makefile
··· 1 1 obj-$(CONFIG_IRQCHIP) += irqchip.o 2 2 3 3 obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o 4 + obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o 4 5 obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o 5 6 obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o 6 7 obj-$(CONFIG_ARCH_MMP) += irq-mmp.o ··· 23 22 obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o 24 23 obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o 25 24 obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o 26 - obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o 25 + obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o 27 26 obj-$(CONFIG_ARM_NVIC) += irq-nvic.o 28 27 obj-$(CONFIG_ARM_VIC) += irq-vic.o 29 28 obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o 30 29 obj-$(CONFIG_ATMEL_AIC5_IRQ) += irq-atmel-aic-common.o irq-atmel-aic5.o 30 + obj-$(CONFIG_I8259) += irq-i8259.o 31 31 obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o 32 32 obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o 33 33 obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o ··· 54 52 obj-$(CONFIG_RENESAS_H8S_INTC) += irq-renesas-h8s.o 55 53 obj-$(CONFIG_ARCH_SA1100) += irq-sa11x0.o 56 54 obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o 55 + obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
+10 -10
drivers/irqchip/exynos-combiner.c
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/syscore_ops.h> 17 17 #include <linux/irqdomain.h> 18 + #include <linux/irqchip.h> 18 19 #include <linux/irqchip/chained_irq.h> 19 20 #include <linux/interrupt.h> 20 21 #include <linux/of_address.h> 21 22 #include <linux/of_irq.h> 22 - 23 - #include "irqchip.h" 24 23 25 24 #define COMBINER_ENABLE_SET 0x0 26 25 #define COMBINER_ENABLE_CLEAR 0x4 ··· 65 66 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); 66 67 } 67 68 68 - static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 69 + static void combiner_handle_cascade_irq(unsigned int __irq, 70 + struct irq_desc *desc) 69 71 { 70 - struct combiner_chip_data *chip_data = irq_get_handler_data(irq); 71 - struct irq_chip *chip = irq_get_chip(irq); 72 + struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc); 73 + struct irq_chip *chip = irq_desc_get_chip(desc); 74 + unsigned int irq = irq_desc_get_irq(desc); 72 75 unsigned int cascade_irq, combiner_irq; 73 76 unsigned long status; 74 77 ··· 123 122 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data, 124 123 unsigned int irq) 125 124 { 126 - if (irq_set_handler_data(irq, combiner_data) != 0) 127 - BUG(); 128 - irq_set_chained_handler(irq, combiner_handle_cascade_irq); 125 + irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq, 126 + combiner_data); 129 127 } 130 128 131 129 static void __init combiner_init_one(struct combiner_chip_data *combiner_data, ··· 185 185 186 186 combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL); 187 187 if (!combiner_data) { 188 - pr_warning("%s: could not allocate combiner data\n", __func__); 188 + pr_warn("%s: could not allocate combiner data\n", __func__); 189 189 return; 190 190 } 191 191 192 192 combiner_irq_domain = irq_domain_add_linear(np, nr_irq, 193 193 &combiner_irq_domain_ops, combiner_data); 194 194 if (WARN_ON(!combiner_irq_domain)) { 195 - pr_warning("%s: irq domain init failed\n", __func__); 195 + pr_warn("%s: irq domain init failed\n", __func__); 196 196 return; 197 197 } 198 198
+2 -3
drivers/irqchip/irq-armada-370-xp.c
··· 18 18 #include <linux/init.h> 19 19 #include <linux/irq.h> 20 20 #include <linux/interrupt.h> 21 + #include <linux/irqchip.h> 21 22 #include <linux/irqchip/chained_irq.h> 22 23 #include <linux/cpu.h> 23 24 #include <linux/io.h> ··· 33 32 #include <asm/exception.h> 34 33 #include <asm/smp_plat.h> 35 34 #include <asm/mach/irq.h> 36 - 37 - #include "irqchip.h" 38 35 39 36 /* Interrupt Controller Registers Map */ 40 37 #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) ··· 450 451 static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq, 451 452 struct irq_desc *desc) 452 453 { 453 - struct irq_chip *chip = irq_get_chip(irq); 454 + struct irq_chip *chip = irq_desc_get_chip(desc); 454 455 unsigned long irqmap, irqn, irqsrc, cpuid; 455 456 unsigned int cascade_irq; 456 457
+2 -2
drivers/irqchip/irq-atmel-aic.c
··· 19 19 #include <linux/bitmap.h> 20 20 #include <linux/types.h> 21 21 #include <linux/irq.h> 22 + #include <linux/irqchip.h> 22 23 #include <linux/of.h> 23 24 #include <linux/of_address.h> 24 25 #include <linux/of_irq.h> ··· 32 31 #include <asm/mach/irq.h> 33 32 34 33 #include "irq-atmel-aic-common.h" 35 - #include "irqchip.h" 36 34 37 35 /* Number of irq lines managed by AIC */ 38 36 #define NR_AIC_IRQS 32 ··· 225 225 aic_common_rtt_irq_fixup(root); 226 226 } 227 227 228 - static const struct of_device_id __initdata aic_irq_fixups[] = { 228 + static const struct of_device_id aic_irq_fixups[] __initconst = { 229 229 { .compatible = "atmel,at91rm9200", .data = at91rm9200_aic_irq_fixup }, 230 230 { .compatible = "atmel,at91sam9g45", .data = at91sam9g45_aic_irq_fixup }, 231 231 { .compatible = "atmel,at91sam9n12", .data = at91rm9200_aic_irq_fixup },
+2 -2
drivers/irqchip/irq-atmel-aic5.c
··· 19 19 #include <linux/bitmap.h> 20 20 #include <linux/types.h> 21 21 #include <linux/irq.h> 22 + #include <linux/irqchip.h> 22 23 #include <linux/of.h> 23 24 #include <linux/of_address.h> 24 25 #include <linux/of_irq.h> ··· 32 31 #include <asm/mach/irq.h> 33 32 34 33 #include "irq-atmel-aic-common.h" 35 - #include "irqchip.h" 36 34 37 35 /* Number of irq lines managed by AIC */ 38 36 #define NR_AIC5_IRQS 128 ··· 290 290 aic_common_rtc_irq_fixup(root); 291 291 } 292 292 293 - static const struct of_device_id __initdata aic5_irq_fixups[] = { 293 + static const struct of_device_id aic5_irq_fixups[] __initconst = { 294 294 { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup }, 295 295 { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup }, 296 296 { /* sentinel */ },
+73 -36
drivers/irqchip/irq-bcm2835.c
··· 48 48 #include <linux/slab.h> 49 49 #include <linux/of_address.h> 50 50 #include <linux/of_irq.h> 51 + #include <linux/irqchip.h> 51 52 #include <linux/irqdomain.h> 52 53 53 54 #include <asm/exception.h> 54 55 #include <asm/mach/irq.h> 55 - 56 - #include "irqchip.h" 57 56 58 57 /* Put the bank and irq (32 bits) into the hwirq */ 59 58 #define MAKE_HWIRQ(b, n) ((b << 5) | (n)) ··· 75 76 #define NR_BANKS 3 76 77 #define IRQS_PER_BANK 32 77 78 78 - static int reg_pending[] __initconst = { 0x00, 0x04, 0x08 }; 79 - static int reg_enable[] __initconst = { 0x18, 0x10, 0x14 }; 80 - static int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 }; 81 - static int bank_irqs[] __initconst = { 8, 32, 32 }; 79 + static const int reg_pending[] __initconst = { 0x00, 0x04, 0x08 }; 80 + static const int reg_enable[] __initconst = { 0x18, 0x10, 0x14 }; 81 + static const int reg_disable[] __initconst = { 0x24, 0x1c, 0x20 }; 82 + static const int bank_irqs[] __initconst = { 8, 32, 32 }; 82 83 83 84 static const int shortcuts[] = { 84 85 7, 9, 10, 18, 19, /* Bank 1 */ ··· 96 97 static struct armctrl_ic intc __read_mostly; 97 98 static void __exception_irq_entry bcm2835_handle_irq( 98 99 struct pt_regs *regs); 100 + static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc); 99 101 100 102 static void armctrl_mask_irq(struct irq_data *d) 101 103 { ··· 140 140 }; 141 141 142 142 static int __init armctrl_of_init(struct device_node *node, 143 - struct device_node *parent) 143 + struct device_node *parent, 144 + bool is_2836) 144 145 { 145 146 void __iomem *base; 146 147 int irq, b, i; ··· 170 169 } 171 170 } 172 171 173 - set_handle_irq(bcm2835_handle_irq); 172 + if (is_2836) { 173 + int parent_irq = irq_of_parse_and_map(node, 0); 174 + 175 + if (!parent_irq) { 176 + panic("%s: unable to get parent interrupt.\n", 177 + node->full_name); 178 + } 179 + irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); 180 + } else { 181 + set_handle_irq(bcm2835_handle_irq); 182 + } 183 + 174 184 return 0; 175 185 } 186 + 187 + static int __init bcm2835_armctrl_of_init(struct device_node *node, 188 + struct device_node *parent) 189 + { 190 + return armctrl_of_init(node, parent, false); 191 + } 192 + 193 + static int __init bcm2836_armctrl_of_init(struct device_node *node, 194 + struct device_node *parent) 195 + { 196 + return armctrl_of_init(node, parent, true); 197 + } 198 + 176 199 177 200 /* 178 201 * Handle each interrupt across the entire interrupt controller. This reads the ··· 204 179 * handle_IRQ may briefly re-enable interrupts for soft IRQ handling. 205 180 */ 206 181 207 - static void armctrl_handle_bank(int bank, struct pt_regs *regs) 182 + static u32 armctrl_translate_bank(int bank) 208 183 { 209 - u32 stat, irq; 184 + u32 stat = readl_relaxed(intc.pending[bank]); 210 185 211 - while ((stat = readl_relaxed(intc.pending[bank]))) { 212 - irq = MAKE_HWIRQ(bank, ffs(stat) - 1); 213 - handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); 214 - } 186 + return MAKE_HWIRQ(bank, ffs(stat) - 1); 215 187 } 216 188 217 - static void armctrl_handle_shortcut(int bank, struct pt_regs *regs, 218 - u32 stat) 189 + static u32 armctrl_translate_shortcut(int bank, u32 stat) 219 190 { 220 - u32 irq = MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]); 221 - handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); 191 + return MAKE_HWIRQ(bank, shortcuts[ffs(stat >> SHORTCUT_SHIFT) - 1]); 192 + } 193 + 194 + static u32 get_next_armctrl_hwirq(void) 195 + { 196 + u32 stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK; 197 + 198 + if (stat == 0) 199 + return ~0; 200 + else if (stat & BANK0_HWIRQ_MASK) 201 + return MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1); 202 + else if (stat & SHORTCUT1_MASK) 203 + return armctrl_translate_shortcut(1, stat & SHORTCUT1_MASK); 204 + else if (stat & SHORTCUT2_MASK) 205 + return armctrl_translate_shortcut(2, stat & SHORTCUT2_MASK); 206 + else if (stat & BANK1_HWIRQ) 207 + return armctrl_translate_bank(1); 208 + else if (stat & BANK2_HWIRQ) 209 + return armctrl_translate_bank(2); 210 + else 211 + BUG(); 222 212 } 223 213 224 214 static void __exception_irq_entry bcm2835_handle_irq( 225 215 struct pt_regs *regs) 226 216 { 227 - u32 stat, irq; 217 + u32 hwirq; 228 218 229 - while ((stat = readl_relaxed(intc.pending[0]) & BANK0_VALID_MASK)) { 230 - if (stat & BANK0_HWIRQ_MASK) { 231 - irq = MAKE_HWIRQ(0, ffs(stat & BANK0_HWIRQ_MASK) - 1); 232 - handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); 233 - } else if (stat & SHORTCUT1_MASK) { 234 - armctrl_handle_shortcut(1, regs, stat & SHORTCUT1_MASK); 235 - } else if (stat & SHORTCUT2_MASK) { 236 - armctrl_handle_shortcut(2, regs, stat & SHORTCUT2_MASK); 237 - } else if (stat & BANK1_HWIRQ) { 238 - armctrl_handle_bank(1, regs); 239 - } else if (stat & BANK2_HWIRQ) { 240 - armctrl_handle_bank(2, regs); 241 - } else { 242 - BUG(); 243 - } 244 - } 219 + while ((hwirq = get_next_armctrl_hwirq()) != ~0) 220 + handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); 245 221 } 246 222 247 - IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", armctrl_of_init); 223 + static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc) 224 + { 225 + u32 hwirq; 226 + 227 + while ((hwirq = get_next_armctrl_hwirq()) != ~0) 228 + generic_handle_irq(irq_linear_revmap(intc.domain, hwirq)); 229 + } 230 + 231 + IRQCHIP_DECLARE(bcm2835_armctrl_ic, "brcm,bcm2835-armctrl-ic", 232 + bcm2835_armctrl_of_init); 233 + IRQCHIP_DECLARE(bcm2836_armctrl_ic, "brcm,bcm2836-armctrl-ic", 234 + bcm2836_armctrl_of_init);
+275
drivers/irqchip/irq-bcm2836.c
··· 1 + /* 2 + * Root interrupt controller for the BCM2836 (Raspberry Pi 2). 3 + * 4 + * Copyright 2015 Broadcom 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + #include <linux/cpu.h> 18 + #include <linux/of_address.h> 19 + #include <linux/of_irq.h> 20 + #include <linux/irqchip.h> 21 + #include <linux/irqdomain.h> 22 + #include <asm/exception.h> 23 + 24 + /* 25 + * The low 2 bits identify the CPU that the GPU IRQ goes to, and the 26 + * next 2 bits identify the CPU that the GPU FIQ goes to. 27 + */ 28 + #define LOCAL_GPU_ROUTING 0x00c 29 + /* When setting bits 0-3, enables PMU interrupts on that CPU. */ 30 + #define LOCAL_PM_ROUTING_SET 0x010 31 + /* When setting bits 0-3, disables PMU interrupts on that CPU. */ 32 + #define LOCAL_PM_ROUTING_CLR 0x014 33 + /* 34 + * The low 4 bits of this are the CPU's timer IRQ enables, and the 35 + * next 4 bits are the CPU's timer FIQ enables (which override the IRQ 36 + * bits). 37 + */ 38 + #define LOCAL_TIMER_INT_CONTROL0 0x040 39 + /* 40 + * The low 4 bits of this are the CPU's per-mailbox IRQ enables, and 41 + * the next 4 bits are the CPU's per-mailbox FIQ enables (which 42 + * override the IRQ bits). 43 + */ 44 + #define LOCAL_MAILBOX_INT_CONTROL0 0x050 45 + /* 46 + * The CPU's interrupt status register. Bits are defined by the the 47 + * LOCAL_IRQ_* bits below. 48 + */ 49 + #define LOCAL_IRQ_PENDING0 0x060 50 + /* Same status bits as above, but for FIQ. */ 51 + #define LOCAL_FIQ_PENDING0 0x070 52 + /* 53 + * Mailbox0 write-to-set bits. There are 16 mailboxes, 4 per CPU, and 54 + * these bits are organized by mailbox number and then CPU number. We 55 + * use mailbox 0 for IPIs. The mailbox's interrupt is raised while 56 + * any bit is set. 57 + */ 58 + #define LOCAL_MAILBOX0_SET0 0x080 59 + /* Mailbox0 write-to-clear bits. */ 60 + #define LOCAL_MAILBOX0_CLR0 0x0c0 61 + 62 + #define LOCAL_IRQ_CNTPSIRQ 0 63 + #define LOCAL_IRQ_CNTPNSIRQ 1 64 + #define LOCAL_IRQ_CNTHPIRQ 2 65 + #define LOCAL_IRQ_CNTVIRQ 3 66 + #define LOCAL_IRQ_MAILBOX0 4 67 + #define LOCAL_IRQ_MAILBOX1 5 68 + #define LOCAL_IRQ_MAILBOX2 6 69 + #define LOCAL_IRQ_MAILBOX3 7 70 + #define LOCAL_IRQ_GPU_FAST 8 71 + #define LOCAL_IRQ_PMU_FAST 9 72 + #define LAST_IRQ LOCAL_IRQ_PMU_FAST 73 + 74 + struct bcm2836_arm_irqchip_intc { 75 + struct irq_domain *domain; 76 + void __iomem *base; 77 + }; 78 + 79 + static struct bcm2836_arm_irqchip_intc intc __read_mostly; 80 + 81 + static void bcm2836_arm_irqchip_mask_per_cpu_irq(unsigned int reg_offset, 82 + unsigned int bit, 83 + int cpu) 84 + { 85 + void __iomem *reg = intc.base + reg_offset + 4 * cpu; 86 + 87 + writel(readl(reg) & ~BIT(bit), reg); 88 + } 89 + 90 + static void bcm2836_arm_irqchip_unmask_per_cpu_irq(unsigned int reg_offset, 91 + unsigned int bit, 92 + int cpu) 93 + { 94 + void __iomem *reg = intc.base + reg_offset + 4 * cpu; 95 + 96 + writel(readl(reg) | BIT(bit), reg); 97 + } 98 + 99 + static void bcm2836_arm_irqchip_mask_timer_irq(struct irq_data *d) 100 + { 101 + bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0, 102 + d->hwirq - LOCAL_IRQ_CNTPSIRQ, 103 + smp_processor_id()); 104 + } 105 + 106 + static void bcm2836_arm_irqchip_unmask_timer_irq(struct irq_data *d) 107 + { 108 + bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_TIMER_INT_CONTROL0, 109 + d->hwirq - LOCAL_IRQ_CNTPSIRQ, 110 + smp_processor_id()); 111 + } 112 + 113 + static struct irq_chip bcm2836_arm_irqchip_timer = { 114 + .name = "bcm2836-timer", 115 + .irq_mask = bcm2836_arm_irqchip_mask_timer_irq, 116 + .irq_unmask = bcm2836_arm_irqchip_unmask_timer_irq, 117 + }; 118 + 119 + static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d) 120 + { 121 + writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_CLR); 122 + } 123 + 124 + static void bcm2836_arm_irqchip_unmask_pmu_irq(struct irq_data *d) 125 + { 126 + writel(1 << smp_processor_id(), intc.base + LOCAL_PM_ROUTING_SET); 127 + } 128 + 129 + static struct irq_chip bcm2836_arm_irqchip_pmu = { 130 + .name = "bcm2836-pmu", 131 + .irq_mask = bcm2836_arm_irqchip_mask_pmu_irq, 132 + .irq_unmask = bcm2836_arm_irqchip_unmask_pmu_irq, 133 + }; 134 + 135 + static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d) 136 + { 137 + } 138 + 139 + static void bcm2836_arm_irqchip_unmask_gpu_irq(struct irq_data *d) 140 + { 141 + } 142 + 143 + static struct irq_chip bcm2836_arm_irqchip_gpu = { 144 + .name = "bcm2836-gpu", 145 + .irq_mask = bcm2836_arm_irqchip_mask_gpu_irq, 146 + .irq_unmask = bcm2836_arm_irqchip_unmask_gpu_irq, 147 + }; 148 + 149 + static void bcm2836_arm_irqchip_register_irq(int hwirq, struct irq_chip *chip) 150 + { 151 + int irq = irq_create_mapping(intc.domain, hwirq); 152 + 153 + irq_set_percpu_devid(irq); 154 + irq_set_chip_and_handler(irq, chip, handle_percpu_devid_irq); 155 + irq_set_status_flags(irq, IRQ_NOAUTOEN); 156 + } 157 + 158 + static void 159 + __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs) 160 + { 161 + int cpu = smp_processor_id(); 162 + u32 stat; 163 + 164 + stat = readl_relaxed(intc.base + LOCAL_IRQ_PENDING0 + 4 * cpu); 165 + if (stat & 0x10) { 166 + #ifdef CONFIG_SMP 167 + void __iomem *mailbox0 = (intc.base + 168 + LOCAL_MAILBOX0_CLR0 + 16 * cpu); 169 + u32 mbox_val = readl(mailbox0); 170 + u32 ipi = ffs(mbox_val) - 1; 171 + 172 + writel(1 << ipi, mailbox0); 173 + handle_IPI(ipi, regs); 174 + #endif 175 + } else { 176 + u32 hwirq = ffs(stat) - 1; 177 + 178 + handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); 179 + } 180 + } 181 + 182 + #ifdef CONFIG_SMP 183 + static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, 184 + unsigned int ipi) 185 + { 186 + int cpu; 187 + void __iomem *mailbox0_base = intc.base + LOCAL_MAILBOX0_SET0; 188 + 189 + /* 190 + * Ensure that stores to normal memory are visible to the 191 + * other CPUs before issuing the IPI. 192 + */ 193 + dsb(); 194 + 195 + for_each_cpu(cpu, mask) { 196 + writel(1 << ipi, mailbox0_base + 16 * cpu); 197 + } 198 + } 199 + 200 + /* Unmasks the IPI on the CPU when it's online. */ 201 + static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb, 202 + unsigned long action, void *hcpu) 203 + { 204 + unsigned int cpu = (unsigned long)hcpu; 205 + unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0; 206 + unsigned int mailbox = 0; 207 + 208 + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 209 + bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu); 210 + else if (action == CPU_DYING) 211 + bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu); 212 + 213 + return NOTIFY_OK; 214 + } 215 + 216 + static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { 217 + .notifier_call = bcm2836_arm_irqchip_cpu_notify, 218 + .priority = 100, 219 + }; 220 + #endif 221 + 222 + static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = { 223 + .xlate = irq_domain_xlate_onecell 224 + }; 225 + 226 + static void 227 + bcm2836_arm_irqchip_smp_init(void) 228 + { 229 + #ifdef CONFIG_SMP 230 + /* Unmask IPIs to the boot CPU. */ 231 + bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, 232 + CPU_STARTING, 233 + (void *)smp_processor_id()); 234 + register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); 235 + 236 + set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); 237 + #endif 238 + } 239 + 240 + static int __init bcm2836_arm_irqchip_l1_intc_of_init(struct device_node *node, 241 + struct device_node *parent) 242 + { 243 + intc.base = of_iomap(node, 0); 244 + if (!intc.base) { 245 + panic("%s: unable to map local interrupt registers\n", 246 + node->full_name); 247 + } 248 + 249 + intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1, 250 + &bcm2836_arm_irqchip_intc_ops, 251 + NULL); 252 + if (!intc.domain) 253 + panic("%s: unable to create IRQ domain\n", node->full_name); 254 + 255 + bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ, 256 + &bcm2836_arm_irqchip_timer); 257 + bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPNSIRQ, 258 + &bcm2836_arm_irqchip_timer); 259 + bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTHPIRQ, 260 + &bcm2836_arm_irqchip_timer); 261 + bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTVIRQ, 262 + &bcm2836_arm_irqchip_timer); 263 + bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_GPU_FAST, 264 + &bcm2836_arm_irqchip_gpu); 265 + bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_PMU_FAST, 266 + &bcm2836_arm_irqchip_pmu); 267 + 268 + bcm2836_arm_irqchip_smp_init(); 269 + 270 + set_handle_irq(bcm2836_arm_irqchip_handle_irq); 271 + return 0; 272 + } 273 + 274 + IRQCHIP_DECLARE(bcm2836_arm_irqchip_l1_intc, "brcm,bcm2836-l1-intc", 275 + bcm2836_arm_irqchip_l1_intc_of_init);
+3 -4
drivers/irqchip/irq-bcm7038-l1.c
··· 29 29 #include <linux/slab.h> 30 30 #include <linux/smp.h> 31 31 #include <linux/types.h> 32 + #include <linux/irqchip.h> 32 33 #include <linux/irqchip/chained_irq.h> 33 - 34 - #include "irqchip.h" 35 34 36 35 #define IRQS_PER_WORD 32 37 36 #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) ··· 256 257 pr_err("failed to map parent interrupt %d\n", parent_irq); 257 258 return -EINVAL; 258 259 } 259 - irq_set_handler_data(parent_irq, intc); 260 - irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle); 260 + irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle, 261 + intc); 261 262 262 263 return 0; 263 264 }
+53 -23
drivers/irqchip/irq-bcm7120-l2.c
··· 26 26 #include <linux/irqdomain.h> 27 27 #include <linux/reboot.h> 28 28 #include <linux/bitops.h> 29 + #include <linux/irqchip.h> 29 30 #include <linux/irqchip/chained_irq.h> 30 - 31 - #include "irqchip.h" 32 31 33 32 /* Register offset in the L2 interrupt controller */ 34 33 #define IRQEN 0x00 ··· 36 37 #define MAX_WORDS 4 37 38 #define MAX_MAPPINGS (MAX_WORDS * 2) 38 39 #define IRQS_PER_WORD 32 40 + 41 + struct bcm7120_l1_intc_data { 42 + struct bcm7120_l2_intc_data *b; 43 + u32 irq_map_mask[MAX_WORDS]; 44 + }; 39 45 40 46 struct bcm7120_l2_intc_data { 41 47 unsigned int n_words; ··· 51 47 struct irq_domain *domain; 52 48 bool can_wake; 53 49 u32 irq_fwd_mask[MAX_WORDS]; 54 - u32 irq_map_mask[MAX_WORDS]; 50 + struct bcm7120_l1_intc_data *l1_data; 55 51 int num_parent_irqs; 56 52 const __be32 *map_mask_prop; 57 53 }; 58 54 59 55 static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) 60 56 { 61 - struct bcm7120_l2_intc_data *b = irq_desc_get_handler_data(desc); 57 + struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc); 58 + struct bcm7120_l2_intc_data *b = data->b; 62 59 struct irq_chip *chip = irq_desc_get_chip(desc); 63 60 unsigned int idx; 64 61 ··· 74 69 75 70 irq_gc_lock(gc); 76 71 pending = irq_reg_readl(gc, b->stat_offset[idx]) & 77 - gc->mask_cache; 72 + gc->mask_cache & 73 + data->irq_map_mask[idx]; 78 74 irq_gc_unlock(gc); 79 75 80 76 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { ··· 87 81 chained_irq_exit(chip, desc); 88 82 } 89 83 90 - static void bcm7120_l2_intc_suspend(struct irq_data *d) 84 + static void bcm7120_l2_intc_suspend(struct irq_chip_generic *gc) 91 85 { 92 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 93 - struct irq_chip_type *ct = irq_data_get_chip_type(d); 94 86 struct bcm7120_l2_intc_data *b = gc->private; 87 + struct irq_chip_type *ct = gc->chip_types; 95 88 96 89 irq_gc_lock(gc); 97 90 if (b->can_wake) ··· 99 94 irq_gc_unlock(gc); 100 95 } 101 96 102 - static void bcm7120_l2_intc_resume(struct irq_data *d) 97 + static void bcm7120_l2_intc_resume(struct irq_chip_generic *gc) 103 98 { 104 - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 105 - struct irq_chip_type *ct = irq_data_get_chip_type(d); 99 + struct irq_chip_type *ct = gc->chip_types; 106 100 107 101 /* Restore the saved mask */ 108 102 irq_gc_lock(gc); ··· 111 107 112 108 static int bcm7120_l2_intc_init_one(struct device_node *dn, 113 109 struct bcm7120_l2_intc_data *data, 114 - int irq) 110 + int irq, u32 *valid_mask) 115 111 { 112 + struct bcm7120_l1_intc_data *l1_data = &data->l1_data[irq]; 116 113 int parent_irq; 117 114 unsigned int idx; 118 115 ··· 125 120 126 121 /* For multiple parent IRQs with multiple words, this looks like: 127 122 * <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...> 123 + * 124 + * We need to associate a given parent interrupt with its corresponding 125 + * map_mask in order to mask the status register with it because we 126 + * have the same handler being called for multiple parent interrupts. 127 + * 128 + * This is typically something needed on BCM7xxx (STB chips). 128 129 */ 129 130 for (idx = 0; idx < data->n_words; idx++) { 130 131 if (data->map_mask_prop) { 131 - data->irq_map_mask[idx] |= 132 + l1_data->irq_map_mask[idx] |= 132 133 be32_to_cpup(data->map_mask_prop + 133 134 irq * data->n_words + idx); 134 135 } else { 135 - data->irq_map_mask[idx] = 0xffffffff; 136 + l1_data->irq_map_mask[idx] = 0xffffffff; 136 137 } 138 + valid_mask[idx] |= l1_data->irq_map_mask[idx]; 137 139 } 138 140 139 - irq_set_handler_data(parent_irq, data); 140 - irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle); 141 + l1_data->b = data; 141 142 143 + irq_set_chained_handler_and_data(parent_irq, 144 + bcm7120_l2_intc_irq_handle, l1_data); 142 145 return 0; 143 146 } 144 147 ··· 227 214 struct irq_chip_type *ct; 228 215 int ret = 0; 229 216 unsigned int idx, irq, flags; 217 + u32 valid_mask[MAX_WORDS] = { }; 230 218 231 219 data = kzalloc(sizeof(*data), GFP_KERNEL); 232 220 if (!data) ··· 240 226 goto out_unmap; 241 227 } 242 228 229 + data->l1_data = kcalloc(data->num_parent_irqs, sizeof(*data->l1_data), 230 + GFP_KERNEL); 231 + if (!data->l1_data) { 232 + ret = -ENOMEM; 233 + goto out_free_l1_data; 234 + } 235 + 243 236 ret = iomap_regs_fn(dn, data); 244 237 if (ret < 0) 245 - goto out_unmap; 238 + goto out_free_l1_data; 246 239 247 240 for (idx = 0; idx < data->n_words; idx++) { 248 241 __raw_writel(data->irq_fwd_mask[idx], ··· 258 237 } 259 238 260 239 for (irq = 0; irq < data->num_parent_irqs; irq++) { 261 - ret = bcm7120_l2_intc_init_one(dn, data, irq); 240 + ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); 262 241 if (ret) 263 - goto out_unmap; 242 + goto out_free_l1_data; 264 243 } 265 244 266 245 data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words, 267 246 &irq_generic_chip_ops, NULL); 268 247 if (!data->domain) { 269 248 ret = -ENOMEM; 270 - goto out_unmap; 249 + goto out_free_l1_data; 271 250 } 272 251 273 252 /* MIPS chips strapped for BE will automagically configure the ··· 291 270 irq = idx * IRQS_PER_WORD; 292 271 gc = irq_get_domain_generic_chip(data->domain, irq); 293 272 294 - gc->unused = 0xffffffff & ~data->irq_map_mask[idx]; 273 + gc->unused = 0xffffffff & ~valid_mask[idx]; 295 274 gc->private = data; 296 275 ct = gc->chip_types; 297 276 ··· 301 280 ct->chip.irq_mask = irq_gc_mask_clr_bit; 302 281 ct->chip.irq_unmask = irq_gc_mask_set_bit; 303 282 ct->chip.irq_ack = irq_gc_noop; 304 - ct->chip.irq_suspend = bcm7120_l2_intc_suspend; 305 - ct->chip.irq_resume = bcm7120_l2_intc_resume; 283 + gc->suspend = bcm7120_l2_intc_suspend; 284 + gc->resume = bcm7120_l2_intc_resume; 285 + 286 + /* 287 + * Initialize mask-cache, in case we need it for 288 + * saving/restoring fwd mask even w/o any child interrupts 289 + * installed 290 + */ 291 + gc->mask_cache = irq_reg_readl(gc, ct->regs.mask); 306 292 307 293 if (data->can_wake) { 308 294 /* This IRQ chip can wake the system, set all ··· 328 300 329 301 out_free_domain: 330 302 irq_domain_remove(data->domain); 303 + out_free_l1_data: 304 + kfree(data->l1_data); 331 305 out_unmap: 332 306 for (idx = 0; idx < MAX_MAPPINGS; idx++) { 333 307 if (data->map_base[idx])
+5 -5
drivers/irqchip/irq-brcmstb-l2.c
··· 32 32 #include <linux/irqchip.h> 33 33 #include <linux/irqchip/chained_irq.h> 34 34 35 - #include "irqchip.h" 36 - 37 35 /* Register offsets in the L2 interrupt controller */ 38 36 #define CPU_STATUS 0x00 39 37 #define CPU_SET 0x04 ··· 49 51 u32 saved_mask; /* for suspend/resume */ 50 52 }; 51 53 52 - static void brcmstb_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) 54 + static void brcmstb_l2_intc_irq_handle(unsigned int __irq, 55 + struct irq_desc *desc) 53 56 { 54 57 struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); 55 58 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); 56 59 struct irq_chip *chip = irq_desc_get_chip(desc); 60 + unsigned int irq = irq_desc_get_irq(desc); 57 61 u32 status; 58 62 59 63 chained_irq_enter(chip, desc); ··· 172 172 } 173 173 174 174 /* Set the IRQ chaining logic */ 175 - irq_set_handler_data(data->parent_irq, data); 176 - irq_set_chained_handler(data->parent_irq, brcmstb_l2_intc_irq_handle); 175 + irq_set_chained_handler_and_data(data->parent_irq, 176 + brcmstb_l2_intc_irq_handle, data); 177 177 178 178 gc = irq_get_domain_generic_chip(data->domain, 0); 179 179 gc->reg_base = data->base;
+1 -2
drivers/irqchip/irq-clps711x.c
··· 11 11 12 12 #include <linux/io.h> 13 13 #include <linux/irq.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/irqdomain.h> 15 16 #include <linux/of_address.h> 16 17 #include <linux/of_irq.h> ··· 19 18 20 19 #include <asm/exception.h> 21 20 #include <asm/mach/irq.h> 22 - 23 - #include "irqchip.h" 24 21 25 22 #define CLPS711X_INTSR1 (0x0240) 26 23 #define CLPS711X_INTMR1 (0x0280)
+1 -2
drivers/irqchip/irq-crossbar.c
··· 11 11 */ 12 12 #include <linux/err.h> 13 13 #include <linux/io.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/irqdomain.h> 15 16 #include <linux/of_address.h> 16 17 #include <linux/of_irq.h> 17 18 #include <linux/slab.h> 18 - 19 - #include "irqchip.h" 20 19 21 20 #define IRQ_FREE -1 22 21 #define IRQ_RESERVED -2
+1 -2
drivers/irqchip/irq-digicolor.c
··· 12 12 13 13 #include <linux/io.h> 14 14 #include <linux/irq.h> 15 + #include <linux/irqchip.h> 15 16 #include <linux/of.h> 16 17 #include <linux/of_address.h> 17 18 #include <linux/of_irq.h> ··· 20 19 #include <linux/regmap.h> 21 20 22 21 #include <asm/exception.h> 23 - 24 - #include "irqchip.h" 25 22 26 23 #define UC_IRQ_CONTROL 0x04 27 24
+23 -33
drivers/irqchip/irq-dw-apb-ictl.c
··· 13 13 14 14 #include <linux/io.h> 15 15 #include <linux/irq.h> 16 + #include <linux/irqchip.h> 16 17 #include <linux/irqchip/chained_irq.h> 17 18 #include <linux/of_address.h> 18 19 #include <linux/of_irq.h> 19 - 20 - #include "irqchip.h" 21 20 22 21 #define APB_INT_ENABLE_L 0x00 23 22 #define APB_INT_ENABLE_H 0x04 ··· 24 25 #define APB_INT_MASK_H 0x0c 25 26 #define APB_INT_FINALSTATUS_L 0x30 26 27 #define APB_INT_FINALSTATUS_H 0x34 28 + #define APB_INT_BASE_OFFSET 0x04 27 29 28 30 static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc) 29 31 { 30 - struct irq_chip *chip = irq_get_chip(irq); 31 - struct irq_chip_generic *gc = irq_get_handler_data(irq); 32 - struct irq_domain *d = gc->private; 33 - u32 stat; 32 + struct irq_domain *d = irq_desc_get_handler_data(desc); 33 + struct irq_chip *chip = irq_desc_get_chip(desc); 34 34 int n; 35 35 36 36 chained_irq_enter(chip, desc); 37 37 38 - for (n = 0; n < gc->num_ct; n++) { 39 - stat = readl_relaxed(gc->reg_base + 40 - APB_INT_FINALSTATUS_L + 4 * n); 38 + for (n = 0; n < d->revmap_size; n += 32) { 39 + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n); 40 + u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L); 41 + 41 42 while (stat) { 42 43 u32 hwirq = ffs(stat) - 1; 43 - generic_handle_irq(irq_find_mapping(d, 44 - gc->irq_base + hwirq + 32 * n)); 44 + u32 virq = irq_find_mapping(d, gc->irq_base + hwirq); 45 + 46 + generic_handle_irq(virq); 45 47 stat &= ~(1 << hwirq); 46 48 } 47 49 } ··· 73 73 struct irq_domain *domain; 74 74 struct irq_chip_generic *gc; 75 75 void __iomem *iobase; 76 - int ret, nrirqs, irq; 76 + int ret, nrirqs, irq, i; 77 77 u32 reg; 78 78 79 79 /* Map the parent interrupt for the chained handler */ ··· 128 128 goto err_unmap; 129 129 } 130 130 131 - ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1, 132 - np->name, handle_level_irq, clr, 0, 133 - IRQ_GC_MASK_CACHE_PER_TYPE | 131 + ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name, 132 + handle_level_irq, clr, 0, 134 133 IRQ_GC_INIT_MASK_CACHE); 135 134 if (ret) { 136 135 pr_err("%s: unable to alloc irq domain gc\n", np->full_name); 137 136 goto err_unmap; 138 137 } 139 138 140 - gc = irq_get_domain_generic_chip(domain, 0); 141 - gc->private = domain; 142 - gc->reg_base = iobase; 143 - 144 - gc->chip_types[0].regs.mask = APB_INT_MASK_L; 145 - gc->chip_types[0].regs.enable = APB_INT_ENABLE_L; 146 - gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; 147 - gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; 148 - gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume; 149 - 150 - if (nrirqs > 32) { 151 - gc->chip_types[1].regs.mask = APB_INT_MASK_H; 152 - gc->chip_types[1].regs.enable = APB_INT_ENABLE_H; 153 - gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit; 154 - gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit; 155 - gc->chip_types[1].chip.irq_resume = dw_apb_ictl_resume; 139 + for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) { 140 + gc = irq_get_domain_generic_chip(domain, i * 32); 141 + gc->reg_base = iobase + i * APB_INT_BASE_OFFSET; 142 + gc->chip_types[0].regs.mask = APB_INT_MASK_L; 143 + gc->chip_types[0].regs.enable = APB_INT_ENABLE_L; 144 + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; 145 + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; 146 + gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume; 156 147 } 157 148 158 - irq_set_handler_data(irq, gc); 159 - irq_set_chained_handler(irq, dw_apb_ictl_handler); 149 + irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain); 160 150 161 151 return 0; 162 152
+31 -21
drivers/irqchip/irq-gic-v2m.c
··· 45 45 46 46 struct v2m_data { 47 47 spinlock_t msi_cnt_lock; 48 - struct msi_controller mchip; 49 48 struct resource res; /* GICv2m resource */ 50 49 void __iomem *base; /* GICv2m virt address */ 51 50 u32 spi_start; /* The SPI number that MSIs start */ 52 51 u32 nr_spis; /* The number of SPIs for MSIs */ 53 52 unsigned long *bm; /* MSI vector bitmap */ 54 - struct irq_domain *domain; 55 53 }; 56 54 57 55 static void gicv2m_mask_msi_irq(struct irq_data *d) ··· 211 213 return true; 212 214 } 213 215 216 + static struct irq_chip gicv2m_pmsi_irq_chip = { 217 + .name = "pMSI", 218 + }; 219 + 220 + static struct msi_domain_ops gicv2m_pmsi_ops = { 221 + }; 222 + 223 + static struct msi_domain_info gicv2m_pmsi_domain_info = { 224 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), 225 + .ops = &gicv2m_pmsi_ops, 226 + .chip = &gicv2m_pmsi_irq_chip, 227 + }; 228 + 214 229 static int __init gicv2m_init_one(struct device_node *node, 215 230 struct irq_domain *parent) 216 231 { 217 232 int ret; 218 233 struct v2m_data *v2m; 234 + struct irq_domain *inner_domain, *pci_domain, *plat_domain; 219 235 220 236 v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); 221 237 if (!v2m) { ··· 273 261 goto err_iounmap; 274 262 } 275 263 276 - v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m); 277 - if (!v2m->domain) { 264 + inner_domain = irq_domain_add_tree(node, &gicv2m_domain_ops, v2m); 265 + if (!inner_domain) { 278 266 pr_err("Failed to create GICv2m domain\n"); 279 267 ret = -ENOMEM; 280 268 goto err_free_bm; 281 269 } 282 270 283 - v2m->domain->parent = parent; 284 - v2m->mchip.of_node = node; 285 - v2m->mchip.domain = pci_msi_create_irq_domain(node, 286 - &gicv2m_msi_domain_info, 287 - v2m->domain); 288 - if (!v2m->mchip.domain) { 289 - pr_err("Failed to create MSI domain\n"); 271 + inner_domain->bus_token = DOMAIN_BUS_NEXUS; 272 + inner_domain->parent = parent; 273 + pci_domain = pci_msi_create_irq_domain(node, &gicv2m_msi_domain_info, 274 + inner_domain); 275 + plat_domain = platform_msi_create_irq_domain(node, 276 + &gicv2m_pmsi_domain_info, 277 + inner_domain); 278 + if (!pci_domain || !plat_domain) { 279 + pr_err("Failed to create MSI domains\n"); 290 280 ret = -ENOMEM; 291 281 goto err_free_domains; 292 282 } 293 283 294 284 spin_lock_init(&v2m->msi_cnt_lock); 295 - 296 - ret = of_pci_msi_chip_add(&v2m->mchip); 297 - if (ret) { 298 - pr_err("Failed to add msi_chip.\n"); 299 - goto err_free_domains; 300 - } 301 285 302 286 pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, 303 287 (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, ··· 302 294 return 0; 303 295 304 296 err_free_domains: 305 - if (v2m->mchip.domain) 306 - irq_domain_remove(v2m->mchip.domain); 307 - if (v2m->domain) 308 - irq_domain_remove(v2m->domain); 297 + if (plat_domain) 298 + irq_domain_remove(plat_domain); 299 + if (pci_domain) 300 + irq_domain_remove(pci_domain); 301 + if (inner_domain) 302 + irq_domain_remove(inner_domain); 309 303 err_free_bm: 310 304 kfree(v2m->bm); 311 305 err_iounmap:
+140
drivers/irqchip/irq-gic-v3-its-pci-msi.c
··· 1 + /* 2 + * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/msi.h> 19 + #include <linux/of.h> 20 + #include <linux/of_irq.h> 21 + #include <linux/of_pci.h> 22 + 23 + static void its_mask_msi_irq(struct irq_data *d) 24 + { 25 + pci_msi_mask_irq(d); 26 + irq_chip_mask_parent(d); 27 + } 28 + 29 + static void its_unmask_msi_irq(struct irq_data *d) 30 + { 31 + pci_msi_unmask_irq(d); 32 + irq_chip_unmask_parent(d); 33 + } 34 + 35 + static struct irq_chip its_msi_irq_chip = { 36 + .name = "ITS-MSI", 37 + .irq_unmask = its_unmask_msi_irq, 38 + .irq_mask = its_mask_msi_irq, 39 + .irq_eoi = irq_chip_eoi_parent, 40 + .irq_write_msi_msg = pci_msi_domain_write_msg, 41 + }; 42 + 43 + struct its_pci_alias { 44 + struct pci_dev *pdev; 45 + u32 dev_id; 46 + u32 count; 47 + }; 48 + 49 + static int its_pci_msi_vec_count(struct pci_dev *pdev) 50 + { 51 + int msi, msix; 52 + 53 + msi = max(pci_msi_vec_count(pdev), 0); 54 + msix = max(pci_msix_vec_count(pdev), 0); 55 + 56 + return max(msi, msix); 57 + } 58 + 59 + static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) 60 + { 61 + struct its_pci_alias *dev_alias = data; 62 + 63 + dev_alias->dev_id = alias; 64 + if (pdev != dev_alias->pdev) 65 + dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); 66 + 67 + return 0; 68 + } 69 + 70 + static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, 71 + int nvec, msi_alloc_info_t *info) 72 + { 73 + struct pci_dev *pdev; 74 + struct its_pci_alias dev_alias; 75 + struct msi_domain_info *msi_info; 76 + 77 + if (!dev_is_pci(dev)) 78 + return -EINVAL; 79 + 80 + msi_info = msi_get_domain_info(domain->parent); 81 + 82 + pdev = to_pci_dev(dev); 83 + dev_alias.pdev = pdev; 84 + dev_alias.count = nvec; 85 + 86 + pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); 87 + 88 + /* ITS specific DeviceID, as the core ITS ignores dev. */ 89 + info->scratchpad[0].ul = dev_alias.dev_id; 90 + 91 + return msi_info->ops->msi_prepare(domain->parent, 92 + dev, dev_alias.count, info); 93 + } 94 + 95 + static struct msi_domain_ops its_pci_msi_ops = { 96 + .msi_prepare = its_pci_msi_prepare, 97 + }; 98 + 99 + static struct msi_domain_info its_pci_msi_domain_info = { 100 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 101 + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), 102 + .ops = &its_pci_msi_ops, 103 + .chip = &its_msi_irq_chip, 104 + }; 105 + 106 + static struct of_device_id its_device_id[] = { 107 + { .compatible = "arm,gic-v3-its", }, 108 + {}, 109 + }; 110 + 111 + static int __init its_pci_msi_init(void) 112 + { 113 + struct device_node *np; 114 + struct irq_domain *parent; 115 + 116 + for (np = of_find_matching_node(NULL, its_device_id); np; 117 + np = of_find_matching_node(np, its_device_id)) { 118 + if (!of_property_read_bool(np, "msi-controller")) 119 + continue; 120 + 121 + parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); 122 + if (!parent || !msi_get_domain_info(parent)) { 123 + pr_err("%s: unable to locate ITS domain\n", 124 + np->full_name); 125 + continue; 126 + } 127 + 128 + if (!pci_msi_create_irq_domain(np, &its_pci_msi_domain_info, 129 + parent)) { 130 + pr_err("%s: unable to create PCI domain\n", 131 + np->full_name); 132 + continue; 133 + } 134 + 135 + pr_info("PCI/MSI: %s domain created\n", np->full_name); 136 + } 137 + 138 + return 0; 139 + } 140 + early_initcall(its_pci_msi_init);
+93
drivers/irqchip/irq-gic-v3-its-platform-msi.c
··· 1 + /* 2 + * Copyright (C) 2013-2015 ARM Limited, All Rights Reserved. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/device.h> 19 + #include <linux/msi.h> 20 + #include <linux/of.h> 21 + #include <linux/of_irq.h> 22 + 23 + static struct irq_chip its_pmsi_irq_chip = { 24 + .name = "ITS-pMSI", 25 + }; 26 + 27 + static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, 28 + int nvec, msi_alloc_info_t *info) 29 + { 30 + struct msi_domain_info *msi_info; 31 + u32 dev_id; 32 + int ret; 33 + 34 + msi_info = msi_get_domain_info(domain->parent); 35 + 36 + /* Suck the DeviceID out of the msi-parent property */ 37 + ret = of_property_read_u32_index(dev->of_node, "msi-parent", 38 + 1, &dev_id); 39 + if (ret) 40 + return ret; 41 + 42 + /* ITS specific DeviceID, as the core ITS ignores dev. */ 43 + info->scratchpad[0].ul = dev_id; 44 + 45 + return msi_info->ops->msi_prepare(domain->parent, 46 + dev, nvec, info); 47 + } 48 + 49 + static struct msi_domain_ops its_pmsi_ops = { 50 + .msi_prepare = its_pmsi_prepare, 51 + }; 52 + 53 + static struct msi_domain_info its_pmsi_domain_info = { 54 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), 55 + .ops = &its_pmsi_ops, 56 + .chip = &its_pmsi_irq_chip, 57 + }; 58 + 59 + static struct of_device_id its_device_id[] = { 60 + { .compatible = "arm,gic-v3-its", }, 61 + {}, 62 + }; 63 + 64 + static int __init its_pmsi_init(void) 65 + { 66 + struct device_node *np; 67 + struct irq_domain *parent; 68 + 69 + for (np = of_find_matching_node(NULL, its_device_id); np; 70 + np = of_find_matching_node(np, its_device_id)) { 71 + if (!of_property_read_bool(np, "msi-controller")) 72 + continue; 73 + 74 + parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); 75 + if (!parent || !msi_get_domain_info(parent)) { 76 + pr_err("%s: unable to locate ITS domain\n", 77 + np->full_name); 78 + continue; 79 + } 80 + 81 + if (!platform_msi_create_irq_domain(np, &its_pmsi_domain_info, 82 + parent)) { 83 + pr_err("%s: unable to create platform domain\n", 84 + np->full_name); 85 + continue; 86 + } 87 + 88 + pr_info("Platform MSI: %s domain created\n", np->full_name); 89 + } 90 + 91 + return 0; 92 + } 93 + early_initcall(its_pmsi_init);
+41 -103
drivers/irqchip/irq-gic-v3-its.c
··· 30 30 #include <linux/percpu.h> 31 31 #include <linux/slab.h> 32 32 33 + #include <linux/irqchip.h> 33 34 #include <linux/irqchip/arm-gic-v3.h> 34 35 35 36 #include <asm/cacheflush.h> 36 37 #include <asm/cputype.h> 37 38 #include <asm/exception.h> 38 - 39 - #include "irqchip.h" 40 39 41 40 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) 42 41 ··· 53 54 54 55 /* 55 56 * The ITS structure - contains most of the infrastructure, with the 56 - * msi_controller, the command queue, the collections, and the list of 57 - * devices writing to it. 57 + * top-level MSI domain, the command queue, the collections, and the 58 + * list of devices writing to it. 58 59 */ 59 60 struct its_node { 60 61 raw_spinlock_t lock; 61 62 struct list_head entry; 62 - struct msi_controller msi_chip; 63 - struct irq_domain *domain; 64 63 void __iomem *base; 65 64 unsigned long phys_base; 66 65 struct its_cmd_block *cmd_base; ··· 640 643 .irq_compose_msi_msg = its_irq_compose_msi_msg, 641 644 }; 642 645 643 - static void its_mask_msi_irq(struct irq_data *d) 644 - { 645 - pci_msi_mask_irq(d); 646 - irq_chip_mask_parent(d); 647 - } 648 - 649 - static void its_unmask_msi_irq(struct irq_data *d) 650 - { 651 - pci_msi_unmask_irq(d); 652 - irq_chip_unmask_parent(d); 653 - } 654 - 655 - static struct irq_chip its_msi_irq_chip = { 656 - .name = "ITS-MSI", 657 - .irq_unmask = its_unmask_msi_irq, 658 - .irq_mask = its_mask_msi_irq, 659 - .irq_eoi = irq_chip_eoi_parent, 660 - .irq_write_msi_msg = pci_msi_domain_write_msg, 661 - }; 662 - 663 646 /* 664 647 * How we allocate LPIs: 665 648 * ··· 808 831 } 809 832 } 810 833 811 - static int its_alloc_tables(struct its_node *its) 834 + static int its_alloc_tables(const char *node_name, struct its_node *its) 812 835 { 813 836 int err; 814 837 int i; ··· 851 874 if (order >= MAX_ORDER) { 852 875 order = MAX_ORDER - 1; 853 876 pr_warn("%s: Device Table too large, reduce its page order to %u\n", 854 - its->msi_chip.of_node->full_name, order); 877 + node_name, order); 855 878 } 856 879 } 857 880 ··· 921 944 922 945 if (val != tmp) { 923 946 pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", 924 - its->msi_chip.of_node->full_name, i, 947 + node_name, i, 925 948 (unsigned long) val, (unsigned long) tmp); 926 949 err = -ENXIO; 927 950 goto out_free; ··· 1186 1209 return 0; 1187 1210 } 1188 1211 1189 - struct its_pci_alias { 1190 - struct pci_dev *pdev; 1191 - u32 dev_id; 1192 - u32 count; 1193 - }; 1194 - 1195 - static int its_pci_msi_vec_count(struct pci_dev *pdev) 1196 - { 1197 - int msi, msix; 1198 - 1199 - msi = max(pci_msi_vec_count(pdev), 0); 1200 - msix = max(pci_msix_vec_count(pdev), 0); 1201 - 1202 - return max(msi, msix); 1203 - } 1204 - 1205 - static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) 1206 - { 1207 - struct its_pci_alias *dev_alias = data; 1208 - 1209 - dev_alias->dev_id = alias; 1210 - if (pdev != dev_alias->pdev) 1211 - dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); 1212 - 1213 - return 0; 1214 - } 1215 - 1216 1212 static int its_msi_prepare(struct irq_domain *domain, struct device *dev, 1217 1213 int nvec, msi_alloc_info_t *info) 1218 1214 { 1219 - struct pci_dev *pdev; 1220 1215 struct its_node *its; 1221 1216 struct its_device *its_dev; 1222 - struct its_pci_alias dev_alias; 1217 + struct msi_domain_info *msi_info; 1218 + u32 dev_id; 1223 1219 1224 - if (!dev_is_pci(dev)) 1225 - return -EINVAL; 1220 + /* 1221 + * We ignore "dev" entierely, and rely on the dev_id that has 1222 + * been passed via the scratchpad. This limits this domain's 1223 + * usefulness to upper layers that definitely know that they 1224 + * are built on top of the ITS. 1225 + */ 1226 + dev_id = info->scratchpad[0].ul; 1226 1227 1227 - pdev = to_pci_dev(dev); 1228 - dev_alias.pdev = pdev; 1229 - dev_alias.count = nvec; 1228 + msi_info = msi_get_domain_info(domain); 1229 + its = msi_info->data; 1230 1230 1231 - pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); 1232 - its = domain->parent->host_data; 1233 - 1234 - its_dev = its_find_device(its, dev_alias.dev_id); 1231 + its_dev = its_find_device(its, dev_id); 1235 1232 if (its_dev) { 1236 1233 /* 1237 1234 * We already have seen this ID, probably through 1238 1235 * another alias (PCI bridge of some sort). No need to 1239 1236 * create the device. 1240 1237 */ 1241 - dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); 1238 + pr_debug("Reusing ITT for devID %x\n", dev_id); 1242 1239 goto out; 1243 1240 } 1244 1241 1245 - its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); 1242 + its_dev = its_create_device(its, dev_id, nvec); 1246 1243 if (!its_dev) 1247 1244 return -ENOMEM; 1248 1245 1249 - dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", 1250 - dev_alias.count, ilog2(dev_alias.count)); 1246 + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); 1251 1247 out: 1252 1248 info->scratchpad[0].ptr = its_dev; 1253 - info->scratchpad[1].ptr = dev; 1254 1249 return 0; 1255 1250 } 1256 1251 1257 - static struct msi_domain_ops its_pci_msi_ops = { 1252 + static struct msi_domain_ops its_msi_domain_ops = { 1258 1253 .msi_prepare = its_msi_prepare, 1259 - }; 1260 - 1261 - static struct msi_domain_info its_pci_msi_domain_info = { 1262 - .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 1263 - MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), 1264 - .ops = &its_pci_msi_ops, 1265 - .chip = &its_msi_irq_chip, 1266 1254 }; 1267 1255 1268 1256 static int its_irq_gic_domain_alloc(struct irq_domain *domain, ··· 1265 1323 1266 1324 irq_domain_set_hwirq_and_chip(domain, virq + i, 1267 1325 hwirq, &its_irq_chip, its_dev); 1268 - dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", 1269 - (int)(hwirq - its_dev->event_map.lpi_base), 1270 - (int)hwirq, virq + i); 1326 + pr_debug("ID:%d pID:%d vID:%d\n", 1327 + (int)(hwirq - its_dev->event_map.lpi_base), 1328 + (int) hwirq, virq + i); 1271 1329 } 1272 1330 1273 1331 return 0; ··· 1368 1426 struct resource res; 1369 1427 struct its_node *its; 1370 1428 void __iomem *its_base; 1429 + struct irq_domain *inner_domain; 1371 1430 u32 val; 1372 1431 u64 baser, tmp; 1373 1432 int err; ··· 1412 1469 INIT_LIST_HEAD(&its->its_device_list); 1413 1470 its->base = its_base; 1414 1471 its->phys_base = res.start; 1415 - its->msi_chip.of_node = node; 1416 1472 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1417 1473 1418 1474 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); ··· 1421 1479 } 1422 1480 its->cmd_write = its->cmd_base; 1423 1481 1424 - err = its_alloc_tables(its); 1482 + err = its_alloc_tables(node->full_name, its); 1425 1483 if (err) 1426 1484 goto out_free_cmd; 1427 1485 ··· 1457 1515 writeq_relaxed(0, its->base + GITS_CWRITER); 1458 1516 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1459 1517 1460 - if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { 1461 - its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); 1462 - if (!its->domain) { 1518 + if (of_property_read_bool(node, "msi-controller")) { 1519 + struct msi_domain_info *info; 1520 + 1521 + info = kzalloc(sizeof(*info), GFP_KERNEL); 1522 + if (!info) { 1463 1523 err = -ENOMEM; 1464 1524 goto out_free_tables; 1465 1525 } 1466 1526 1467 - its->domain->parent = parent; 1468 - 1469 - its->msi_chip.domain = pci_msi_create_irq_domain(node, 1470 - &its_pci_msi_domain_info, 1471 - its->domain); 1472 - if (!its->msi_chip.domain) { 1527 + inner_domain = irq_domain_add_tree(node, &its_domain_ops, its); 1528 + if (!inner_domain) { 1473 1529 err = -ENOMEM; 1474 - goto out_free_domains; 1530 + kfree(info); 1531 + goto out_free_tables; 1475 1532 } 1476 1533 1477 - err = of_pci_msi_chip_add(&its->msi_chip); 1478 - if (err) 1479 - goto out_free_domains; 1534 + inner_domain->parent = parent; 1535 + inner_domain->bus_token = DOMAIN_BUS_NEXUS; 1536 + info->ops = &its_msi_domain_ops; 1537 + info->data = its; 1538 + inner_domain->host_data = info; 1480 1539 } 1481 1540 1482 1541 spin_lock(&its_lock); ··· 1486 1543 1487 1544 return 0; 1488 1545 1489 - out_free_domains: 1490 - if (its->msi_chip.domain) 1491 - irq_domain_remove(its->msi_chip.domain); 1492 - if (its->domain) 1493 - irq_domain_remove(its->domain); 1494 1546 out_free_tables: 1495 1547 its_free_tables(its); 1496 1548 out_free_cmd:
+1 -1
drivers/irqchip/irq-gic-v3.c
··· 25 25 #include <linux/percpu.h> 26 26 #include <linux/slab.h> 27 27 28 + #include <linux/irqchip.h> 28 29 #include <linux/irqchip/arm-gic-v3.h> 29 30 30 31 #include <asm/cputype.h> ··· 33 32 #include <asm/smp_plat.h> 34 33 35 34 #include "irq-gic-common.h" 36 - #include "irqchip.h" 37 35 38 36 struct redist_region { 39 37 void __iomem *redist_base;
+45 -36
drivers/irqchip/irq-gic.c
··· 38 38 #include <linux/interrupt.h> 39 39 #include <linux/percpu.h> 40 40 #include <linux/slab.h> 41 + #include <linux/irqchip.h> 41 42 #include <linux/irqchip/chained_irq.h> 42 43 #include <linux/irqchip/arm-gic.h> 43 44 #include <linux/irqchip/arm-gic-acpi.h> ··· 49 48 #include <asm/smp_plat.h> 50 49 51 50 #include "irq-gic-common.h" 52 - #include "irqchip.h" 53 51 54 52 union gic_base { 55 53 void __iomem *common_base; ··· 288 288 289 289 static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc) 290 290 { 291 - struct gic_chip_data *chip_data = irq_get_handler_data(irq); 292 - struct irq_chip *chip = irq_get_chip(irq); 291 + struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 292 + struct irq_chip *chip = irq_desc_get_chip(desc); 293 293 unsigned int cascade_irq, gic_irq; 294 294 unsigned long status; 295 295 ··· 324 324 #endif 325 325 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 326 326 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 327 - .flags = IRQCHIP_SET_TYPE_MASKED, 327 + .flags = IRQCHIP_SET_TYPE_MASKED | 328 + IRQCHIP_SKIP_SET_WAKE | 329 + IRQCHIP_MASK_ON_SUSPEND, 328 330 }; 329 331 330 332 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 331 333 { 332 334 if (gic_nr >= MAX_GIC_NR) 333 335 BUG(); 334 - if (irq_set_handler_data(irq, &gic_data[gic_nr]) != 0) 335 - BUG(); 336 - irq_set_chained_handler(irq, gic_handle_cascade_irq); 336 + irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, 337 + &gic_data[gic_nr]); 337 338 } 338 339 339 340 static u8 gic_get_cpumask(struct gic_chip_data *gic) ··· 356 355 return mask; 357 356 } 358 357 359 - static void gic_cpu_if_up(void) 358 + static void gic_cpu_if_up(struct gic_chip_data *gic) 360 359 { 361 - void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); 360 + void __iomem *cpu_base = gic_data_cpu_base(gic); 362 361 u32 bypass = 0; 363 362 364 363 /* ··· 402 401 int i; 403 402 404 403 /* 405 - * Get what the GIC says our CPU mask is. 404 + * Setting up the CPU map is only relevant for the primary GIC 405 + * because any nested/secondary GICs do not directly interface 406 + * with the CPU(s). 406 407 */ 407 - BUG_ON(cpu >= NR_GIC_CPU_IF); 408 - cpu_mask = gic_get_cpumask(gic); 409 - gic_cpu_map[cpu] = cpu_mask; 408 + if (gic == &gic_data[0]) { 409 + /* 410 + * Get what the GIC says our CPU mask is. 411 + */ 412 + BUG_ON(cpu >= NR_GIC_CPU_IF); 413 + cpu_mask = gic_get_cpumask(gic); 414 + gic_cpu_map[cpu] = cpu_mask; 410 415 411 - /* 412 - * Clear our mask from the other map entries in case they're 413 - * still undefined. 414 - */ 415 - for (i = 0; i < NR_GIC_CPU_IF; i++) 416 - if (i != cpu) 417 - gic_cpu_map[i] &= ~cpu_mask; 416 + /* 417 + * Clear our mask from the other map entries in case they're 418 + * still undefined. 419 + */ 420 + for (i = 0; i < NR_GIC_CPU_IF; i++) 421 + if (i != cpu) 422 + gic_cpu_map[i] &= ~cpu_mask; 423 + } 418 424 419 425 gic_cpu_config(dist_base, NULL); 420 426 421 427 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 422 - gic_cpu_if_up(); 428 + gic_cpu_if_up(gic); 423 429 } 424 430 425 - void gic_cpu_if_down(void) 431 + int gic_cpu_if_down(unsigned int gic_nr) 426 432 { 427 - void __iomem *cpu_base = gic_data_cpu_base(&gic_data[0]); 433 + void __iomem *cpu_base; 428 434 u32 val = 0; 429 435 436 + if (gic_nr >= MAX_GIC_NR) 437 + return -EINVAL; 438 + 439 + cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 430 440 val = readl(cpu_base + GIC_CPU_CTRL); 431 441 val &= ~GICC_ENABLE; 432 442 writel_relaxed(val, cpu_base + GIC_CPU_CTRL); 443 + 444 + return 0; 433 445 } 434 446 435 447 #ifdef CONFIG_CPU_PM ··· 578 564 dist_base + GIC_DIST_PRI + i * 4); 579 565 580 566 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 581 - gic_cpu_if_up(); 567 + gic_cpu_if_up(&gic_data[gic_nr]); 582 568 } 583 569 584 570 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) ··· 894 880 .xlate = gic_irq_domain_xlate, 895 881 }; 896 882 897 - void gic_set_irqchip_flags(unsigned long flags) 898 - { 899 - gic_chip.flags |= flags; 900 - } 901 - 902 883 void __init gic_init_bases(unsigned int gic_nr, int irq_start, 903 884 void __iomem *dist_base, void __iomem *cpu_base, 904 885 u32 percpu_offset, struct device_node *node) ··· 937 928 gic->cpu_base.common_base = cpu_base; 938 929 gic_set_base_accessor(gic, gic_get_common_base); 939 930 } 940 - 941 - /* 942 - * Initialize the CPU interface map to all CPUs. 943 - * It will be refined as each CPU probes its ID. 944 - */ 945 - for (i = 0; i < NR_GIC_CPU_IF; i++) 946 - gic_cpu_map[i] = 0xff; 947 931 948 932 /* 949 933 * Find out how many interrupts are supported. ··· 983 981 return; 984 982 985 983 if (gic_nr == 0) { 984 + /* 985 + * Initialize the CPU interface map to all CPUs. 986 + * It will be refined as each CPU probes its ID. 987 + * This is only necessary for the primary GIC. 988 + */ 989 + for (i = 0; i < NR_GIC_CPU_IF; i++) 990 + gic_cpu_map[i] = 0xff; 986 991 #ifdef CONFIG_SMP 987 992 set_smp_cross_call(gic_raise_softirq); 988 993 register_cpu_notifier(&gic_cpu_notifier);
+4 -2
drivers/irqchip/irq-hip04.c
··· 41 41 #include <linux/irqdomain.h> 42 42 #include <linux/interrupt.h> 43 43 #include <linux/slab.h> 44 + #include <linux/irqchip.h> 44 45 #include <linux/irqchip/arm-gic.h> 45 46 46 47 #include <asm/irq.h> ··· 49 48 #include <asm/smp_plat.h> 50 49 51 50 #include "irq-gic-common.h" 52 - #include "irqchip.h" 53 51 54 52 #define HIP04_MAX_IRQS 510 55 53 ··· 202 202 #ifdef CONFIG_SMP 203 203 .irq_set_affinity = hip04_irq_set_affinity, 204 204 #endif 205 - .flags = IRQCHIP_SET_TYPE_MASKED, 205 + .flags = IRQCHIP_SET_TYPE_MASKED | 206 + IRQCHIP_SKIP_SET_WAKE | 207 + IRQCHIP_MASK_ON_SUSPEND, 206 208 }; 207 209 208 210 static u16 hip04_get_cpumask(struct hip04_irq_data *intc)
+6 -5
drivers/irqchip/irq-imgpdc.c
··· 218 218 return 0; 219 219 } 220 220 221 - static void pdc_intc_perip_isr(unsigned int irq, struct irq_desc *desc) 221 + static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc) 222 222 { 223 + unsigned int irq = irq_desc_get_irq(desc); 223 224 struct pdc_intc_priv *priv; 224 225 unsigned int i, irq_no; 225 226 ··· 452 451 /* Setup chained handlers for the peripheral IRQs */ 453 452 for (i = 0; i < priv->nr_perips; ++i) { 454 453 irq = priv->perip_irqs[i]; 455 - irq_set_handler_data(irq, priv); 456 - irq_set_chained_handler(irq, pdc_intc_perip_isr); 454 + irq_set_chained_handler_and_data(irq, pdc_intc_perip_isr, 455 + priv); 457 456 } 458 457 459 458 /* Setup chained handler for the syswake IRQ */ 460 - irq_set_handler_data(priv->syswake_irq, priv); 461 - irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr); 459 + irq_set_chained_handler_and_data(priv->syswake_irq, 460 + pdc_intc_syswake_isr, priv); 462 461 463 462 dev_info(&pdev->dev, 464 463 "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
+278
drivers/irqchip/irq-imx-gpcv2.c
··· 1 + /* 2 + * Copyright (C) 2015 Freescale Semiconductor, Inc. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #include <linux/of_address.h> 10 + #include <linux/of_irq.h> 11 + #include <linux/slab.h> 12 + #include <linux/irqchip.h> 13 + #include <linux/syscore_ops.h> 14 + 15 + #define IMR_NUM 4 16 + #define GPC_MAX_IRQS (IMR_NUM * 32) 17 + 18 + #define GPC_IMR1_CORE0 0x30 19 + #define GPC_IMR1_CORE1 0x40 20 + 21 + struct gpcv2_irqchip_data { 22 + struct raw_spinlock rlock; 23 + void __iomem *gpc_base; 24 + u32 wakeup_sources[IMR_NUM]; 25 + u32 saved_irq_mask[IMR_NUM]; 26 + u32 cpu2wakeup; 27 + }; 28 + 29 + static struct gpcv2_irqchip_data *imx_gpcv2_instance; 30 + 31 + /* 32 + * Interface for the low level wakeup code. 33 + */ 34 + u32 imx_gpcv2_get_wakeup_source(u32 **sources) 35 + { 36 + if (!imx_gpcv2_instance) 37 + return 0; 38 + 39 + if (sources) 40 + *sources = imx_gpcv2_instance->wakeup_sources; 41 + 42 + return IMR_NUM; 43 + } 44 + 45 + static int gpcv2_wakeup_source_save(void) 46 + { 47 + struct gpcv2_irqchip_data *cd; 48 + void __iomem *reg; 49 + int i; 50 + 51 + cd = imx_gpcv2_instance; 52 + if (!cd) 53 + return 0; 54 + 55 + for (i = 0; i < IMR_NUM; i++) { 56 + reg = cd->gpc_base + cd->cpu2wakeup + i * 4; 57 + cd->saved_irq_mask[i] = readl_relaxed(reg); 58 + writel_relaxed(cd->wakeup_sources[i], reg); 59 + } 60 + 61 + return 0; 62 + } 63 + 64 + static void gpcv2_wakeup_source_restore(void) 65 + { 66 + struct gpcv2_irqchip_data *cd; 67 + void __iomem *reg; 68 + int i; 69 + 70 + cd = imx_gpcv2_instance; 71 + if (!cd) 72 + return; 73 + 74 + for (i = 0; i < IMR_NUM; i++) { 75 + reg = cd->gpc_base + cd->cpu2wakeup + i * 4; 76 + writel_relaxed(cd->saved_irq_mask[i], reg); 77 + } 78 + } 79 + 80 + static struct syscore_ops imx_gpcv2_syscore_ops = { 81 + .suspend = gpcv2_wakeup_source_save, 82 + .resume = gpcv2_wakeup_source_restore, 83 + }; 84 + 85 + static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on) 86 + { 87 + struct gpcv2_irqchip_data *cd = d->chip_data; 88 + unsigned int idx = d->hwirq / 32; 89 + unsigned long flags; 90 + void __iomem *reg; 91 + u32 mask, val; 92 + 93 + raw_spin_lock_irqsave(&cd->rlock, flags); 94 + reg = cd->gpc_base + cd->cpu2wakeup + idx * 4; 95 + mask = 1 << d->hwirq % 32; 96 + val = cd->wakeup_sources[idx]; 97 + 98 + cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask); 99 + raw_spin_unlock_irqrestore(&cd->rlock, flags); 100 + 101 + /* 102 + * Do *not* call into the parent, as the GIC doesn't have any 103 + * wake-up facility... 104 + */ 105 + 106 + return 0; 107 + } 108 + 109 + static void imx_gpcv2_irq_unmask(struct irq_data *d) 110 + { 111 + struct gpcv2_irqchip_data *cd = d->chip_data; 112 + void __iomem *reg; 113 + u32 val; 114 + 115 + raw_spin_lock(&cd->rlock); 116 + reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4; 117 + val = readl_relaxed(reg); 118 + val &= ~(1 << d->hwirq % 32); 119 + writel_relaxed(val, reg); 120 + raw_spin_unlock(&cd->rlock); 121 + 122 + irq_chip_unmask_parent(d); 123 + } 124 + 125 + static void imx_gpcv2_irq_mask(struct irq_data *d) 126 + { 127 + struct gpcv2_irqchip_data *cd = d->chip_data; 128 + void __iomem *reg; 129 + u32 val; 130 + 131 + raw_spin_lock(&cd->rlock); 132 + reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4; 133 + val = readl_relaxed(reg); 134 + val |= 1 << (d->hwirq % 32); 135 + writel_relaxed(val, reg); 136 + raw_spin_unlock(&cd->rlock); 137 + 138 + irq_chip_mask_parent(d); 139 + } 140 + 141 + static struct irq_chip gpcv2_irqchip_data_chip = { 142 + .name = "GPCv2", 143 + .irq_eoi = irq_chip_eoi_parent, 144 + .irq_mask = imx_gpcv2_irq_mask, 145 + .irq_unmask = imx_gpcv2_irq_unmask, 146 + .irq_set_wake = imx_gpcv2_irq_set_wake, 147 + .irq_retrigger = irq_chip_retrigger_hierarchy, 148 + #ifdef CONFIG_SMP 149 + .irq_set_affinity = irq_chip_set_affinity_parent, 150 + #endif 151 + }; 152 + 153 + static int imx_gpcv2_domain_xlate(struct irq_domain *domain, 154 + struct device_node *controller, 155 + const u32 *intspec, 156 + unsigned int intsize, 157 + unsigned long *out_hwirq, 158 + unsigned int *out_type) 159 + { 160 + /* Shouldn't happen, really... */ 161 + if (domain->of_node != controller) 162 + return -EINVAL; 163 + 164 + /* Not GIC compliant */ 165 + if (intsize != 3) 166 + return -EINVAL; 167 + 168 + /* No PPI should point to this domain */ 169 + if (intspec[0] != 0) 170 + return -EINVAL; 171 + 172 + *out_hwirq = intspec[1]; 173 + *out_type = intspec[2]; 174 + return 0; 175 + } 176 + 177 + static int imx_gpcv2_domain_alloc(struct irq_domain *domain, 178 + unsigned int irq, unsigned int nr_irqs, 179 + void *data) 180 + { 181 + struct of_phandle_args *args = data; 182 + struct of_phandle_args parent_args; 183 + irq_hw_number_t hwirq; 184 + int i; 185 + 186 + /* Not GIC compliant */ 187 + if (args->args_count != 3) 188 + return -EINVAL; 189 + 190 + /* No PPI should point to this domain */ 191 + if (args->args[0] != 0) 192 + return -EINVAL; 193 + 194 + /* Can't deal with this */ 195 + hwirq = args->args[1]; 196 + if (hwirq >= GPC_MAX_IRQS) 197 + return -EINVAL; 198 + 199 + for (i = 0; i < nr_irqs; i++) { 200 + irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, 201 + &gpcv2_irqchip_data_chip, domain->host_data); 202 + } 203 + 204 + parent_args = *args; 205 + parent_args.np = domain->parent->of_node; 206 + return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args); 207 + } 208 + 209 + static struct irq_domain_ops gpcv2_irqchip_data_domain_ops = { 210 + .xlate = imx_gpcv2_domain_xlate, 211 + .alloc = imx_gpcv2_domain_alloc, 212 + .free = irq_domain_free_irqs_common, 213 + }; 214 + 215 + static int __init imx_gpcv2_irqchip_init(struct device_node *node, 216 + struct device_node *parent) 217 + { 218 + struct irq_domain *parent_domain, *domain; 219 + struct gpcv2_irqchip_data *cd; 220 + int i; 221 + 222 + if (!parent) { 223 + pr_err("%s: no parent, giving up\n", node->full_name); 224 + return -ENODEV; 225 + } 226 + 227 + parent_domain = irq_find_host(parent); 228 + if (!parent_domain) { 229 + pr_err("%s: unable to get parent domain\n", node->full_name); 230 + return -ENXIO; 231 + } 232 + 233 + cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL); 234 + if (!cd) { 235 + pr_err("kzalloc failed!\n"); 236 + return -ENOMEM; 237 + } 238 + 239 + cd->gpc_base = of_iomap(node, 0); 240 + if (!cd->gpc_base) { 241 + pr_err("fsl-gpcv2: unable to map gpc registers\n"); 242 + kfree(cd); 243 + return -ENOMEM; 244 + } 245 + 246 + domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, 247 + node, &gpcv2_irqchip_data_domain_ops, cd); 248 + if (!domain) { 249 + iounmap(cd->gpc_base); 250 + kfree(cd); 251 + return -ENOMEM; 252 + } 253 + irq_set_default_host(domain); 254 + 255 + /* Initially mask all interrupts */ 256 + for (i = 0; i < IMR_NUM; i++) { 257 + writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4); 258 + writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4); 259 + cd->wakeup_sources[i] = ~0; 260 + } 261 + 262 + /* Let CORE0 as the default CPU to wake up by GPC */ 263 + cd->cpu2wakeup = GPC_IMR1_CORE0; 264 + 265 + /* 266 + * Due to hardware design failure, need to make sure GPR 267 + * interrupt(#32) is unmasked during RUN mode to avoid entering 268 + * DSM by mistake. 269 + */ 270 + writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup); 271 + 272 + imx_gpcv2_instance = cd; 273 + register_syscore_ops(&imx_gpcv2_syscore_ops); 274 + 275 + return 0; 276 + } 277 + 278 + IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
+1 -2
drivers/irqchip/irq-ingenic.c
··· 18 18 #include <linux/types.h> 19 19 #include <linux/interrupt.h> 20 20 #include <linux/ioport.h> 21 + #include <linux/irqchip.h> 21 22 #include <linux/irqchip/ingenic.h> 22 23 #include <linux/of_address.h> 23 24 #include <linux/of_irq.h> ··· 28 27 29 28 #include <asm/io.h> 30 29 #include <asm/mach-jz4740/irq.h> 31 - 32 - #include "irqchip.h" 33 30 34 31 struct ingenic_intc_data { 35 32 void __iomem *base;
+3 -3
drivers/irqchip/irq-keystone.c
··· 20 20 #include <linux/module.h> 21 21 #include <linux/moduleparam.h> 22 22 #include <linux/irqdomain.h> 23 + #include <linux/irqchip.h> 23 24 #include <linux/irqchip/chained_irq.h> 24 25 #include <linux/of.h> 25 26 #include <linux/of_platform.h> 26 27 #include <linux/mfd/syscon.h> 27 28 #include <linux/regmap.h> 28 - #include "irqchip.h" 29 - 30 29 31 30 /* The source ID bits start from 4 to 31 (total 28 bits)*/ 32 31 #define BIT_OFS 4 ··· 83 84 /* nothing to do here */ 84 85 } 85 86 86 - static void keystone_irq_handler(unsigned irq, struct irq_desc *desc) 87 + static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc) 87 88 { 89 + unsigned int irq = irq_desc_get_irq(desc); 88 90 struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc); 89 91 unsigned long pending; 90 92 int src, virq;
+4 -5
drivers/irqchip/irq-metag-ext.c
··· 404 404 #ifdef CONFIG_METAG_SUSPEND_MEM 405 405 struct meta_intc_priv *priv = &meta_intc_priv; 406 406 #endif 407 - unsigned int irq = data->irq; 408 407 irq_hw_number_t hw = data->hwirq; 409 408 unsigned int bit = 1 << meta_intc_offset(hw); 410 409 void __iomem *level_addr = meta_intc_level_addr(hw); ··· 412 413 413 414 /* update the chip/handler */ 414 415 if (flow_type & IRQ_TYPE_LEVEL_MASK) 415 - __irq_set_chip_handler_name_locked(irq, &meta_intc_level_chip, 416 - handle_level_irq, NULL); 416 + irq_set_chip_handler_name_locked(data, &meta_intc_level_chip, 417 + handle_level_irq, NULL); 417 418 else 418 - __irq_set_chip_handler_name_locked(irq, &meta_intc_edge_chip, 419 - handle_edge_irq, NULL); 419 + irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip, 420 + handle_edge_irq, NULL); 420 421 421 422 /* and clear/set the bit in HWLEVELEXT */ 422 423 __global_lock2(flags);
+1 -2
drivers/irqchip/irq-metag.c
··· 286 286 int irq = tbisig_map(signum); 287 287 288 288 /* Register the multiplexed IRQ handler */ 289 - irq_set_handler_data(irq, priv); 290 - irq_set_chained_handler(irq, metag_internal_irq_demux); 289 + irq_set_chained_handler_and_data(irq, metag_internal_irq_demux, priv); 291 290 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); 292 291 } 293 292
+1 -2
drivers/irqchip/irq-mips-cpu.c
··· 31 31 #include <linux/interrupt.h> 32 32 #include <linux/kernel.h> 33 33 #include <linux/irq.h> 34 + #include <linux/irqchip.h> 34 35 #include <linux/irqdomain.h> 35 36 36 37 #include <asm/irq_cpu.h> 37 38 #include <asm/mipsregs.h> 38 39 #include <asm/mipsmtregs.h> 39 40 #include <asm/setup.h> 40 - 41 - #include "irqchip.h" 42 41 43 42 static inline void unmask_mips_irq(struct irq_data *d) 44 43 {
+8 -12
drivers/irqchip/irq-mips-gic.c
··· 11 11 #include <linux/init.h> 12 12 #include <linux/interrupt.h> 13 13 #include <linux/irq.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/irqchip/mips-gic.h> 15 16 #include <linux/of_address.h> 16 17 #include <linux/sched.h> ··· 22 21 #include <asm/traps.h> 23 22 24 23 #include <dt-bindings/interrupt-controller/mips-gic.h> 25 - 26 - #include "irqchip.h" 27 24 28 25 unsigned int gic_present; 29 26 ··· 357 358 break; 358 359 } 359 360 360 - if (is_edge) { 361 - __irq_set_chip_handler_name_locked(d->irq, 362 - &gic_edge_irq_controller, 363 - handle_edge_irq, NULL); 364 - } else { 365 - __irq_set_chip_handler_name_locked(d->irq, 366 - &gic_level_irq_controller, 367 - handle_level_irq, NULL); 368 - } 361 + if (is_edge) 362 + irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 363 + handle_edge_irq, NULL); 364 + else 365 + irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 366 + handle_level_irq, NULL); 369 367 spin_unlock_irqrestore(&gic_lock, flags); 370 368 371 369 return 0; ··· 392 396 clear_bit(irq, pcpu_masks[i].pcpu_mask); 393 397 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 394 398 395 - cpumask_copy(d->affinity, cpumask); 399 + cpumask_copy(irq_data_get_affinity_mask(d), cpumask); 396 400 spin_unlock_irqrestore(&gic_lock, flags); 397 401 398 402 return IRQ_SET_MASK_OK_NOCOPY;
+3 -3
drivers/irqchip/irq-mmp.c
··· 15 15 #include <linux/module.h> 16 16 #include <linux/init.h> 17 17 #include <linux/irq.h> 18 + #include <linux/irqchip.h> 18 19 #include <linux/irqdomain.h> 19 20 #include <linux/io.h> 20 21 #include <linux/ioport.h> ··· 24 23 25 24 #include <asm/exception.h> 26 25 #include <asm/hardirq.h> 27 - 28 - #include "irqchip.h" 29 26 30 27 #define MAX_ICU_NR 16 31 28 ··· 129 130 .irq_unmask = icu_unmask_irq, 130 131 }; 131 132 132 - static void icu_mux_irq_demux(unsigned int irq, struct irq_desc *desc) 133 + static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc) 133 134 { 135 + unsigned int irq = irq_desc_get_irq(desc); 134 136 struct irq_domain *domain; 135 137 struct icu_chip_data *data; 136 138 int i;
+1 -2
drivers/irqchip/irq-moxart.c
··· 12 12 13 13 #include <linux/io.h> 14 14 #include <linux/irq.h> 15 + #include <linux/irqchip.h> 15 16 #include <linux/of.h> 16 17 #include <linux/of_address.h> 17 18 #include <linux/of_irq.h> 18 19 #include <linux/irqdomain.h> 19 20 20 21 #include <asm/exception.h> 21 - 22 - #include "irqchip.h" 23 22 24 23 #define IRQ_SOURCE_REG 0 25 24 #define IRQ_MASK_REG 0x04
+1 -2
drivers/irqchip/irq-mtk-sysirq.c
··· 13 13 */ 14 14 15 15 #include <linux/irq.h> 16 + #include <linux/irqchip.h> 16 17 #include <linux/irqdomain.h> 17 18 #include <linux/of.h> 18 19 #include <linux/of_irq.h> ··· 21 20 #include <linux/io.h> 22 21 #include <linux/slab.h> 23 22 #include <linux/spinlock.h> 24 - 25 - #include "irqchip.h" 26 23 27 24 struct mtk_sysirq_chip_data { 28 25 spinlock_t lock;
+1 -2
drivers/irqchip/irq-mxs.c
··· 19 19 #include <linux/kernel.h> 20 20 #include <linux/init.h> 21 21 #include <linux/irq.h> 22 + #include <linux/irqchip.h> 22 23 #include <linux/irqdomain.h> 23 24 #include <linux/io.h> 24 25 #include <linux/of.h> ··· 27 26 #include <linux/of_irq.h> 28 27 #include <linux/stmp_device.h> 29 28 #include <asm/exception.h> 30 - 31 - #include "irqchip.h" 32 29 33 30 #define HW_ICOLL_VECTOR 0x0000 34 31 #define HW_ICOLL_LEVELACK 0x0010
+1 -2
drivers/irqchip/irq-nvic.c
··· 21 21 #include <linux/of.h> 22 22 #include <linux/of_address.h> 23 23 #include <linux/irq.h> 24 + #include <linux/irqchip.h> 24 25 #include <linux/irqdomain.h> 25 26 26 27 #include <asm/v7m.h> 27 28 #include <asm/exception.h> 28 - 29 - #include "irqchip.h" 30 29 31 30 #define NVIC_ISER 0x000 32 31 #define NVIC_ICER 0x080
+6 -32
drivers/irqchip/irq-omap-intc.c
··· 17 17 #include <linux/io.h> 18 18 19 19 #include <asm/exception.h> 20 + #include <linux/irqchip.h> 20 21 #include <linux/irqdomain.h> 21 22 #include <linux/of.h> 22 23 #include <linux/of_address.h> 23 24 #include <linux/of_irq.h> 24 - 25 - #include "irqchip.h" 26 25 27 26 /* Define these here for now until we drop all board-files */ 28 27 #define OMAP24XX_IC_BASE 0x480fe000 ··· 330 331 static asmlinkage void __exception_irq_entry 331 332 omap_intc_handle_irq(struct pt_regs *regs) 332 333 { 333 - u32 irqnr = 0; 334 - int handled_irq = 0; 335 - int i; 334 + u32 irqnr; 336 335 337 - do { 338 - for (i = 0; i < omap_nr_pending; i++) { 339 - irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)); 340 - if (irqnr) 341 - goto out; 342 - } 343 - 344 - out: 345 - if (!irqnr) 346 - break; 347 - 348 - irqnr = intc_readl(INTC_SIR); 349 - irqnr &= ACTIVEIRQ_MASK; 350 - 351 - if (irqnr) { 352 - handle_domain_irq(domain, irqnr, regs); 353 - handled_irq = 1; 354 - } 355 - } while (irqnr); 356 - 357 - /* 358 - * If an irq is masked or deasserted while active, we will 359 - * keep ending up here with no irq handled. So remove it from 360 - * the INTC with an ack. 361 - */ 362 - if (!handled_irq) 363 - omap_ack_irq(NULL); 336 + irqnr = intc_readl(INTC_SIR); 337 + irqnr &= ACTIVEIRQ_MASK; 338 + WARN_ONCE(!irqnr, "Spurious IRQ ?\n"); 339 + handle_domain_irq(domain, irqnr, regs); 364 340 } 365 341 366 342 void __init omap3_init_irq(void)
+1 -2
drivers/irqchip/irq-or1k-pic.c
··· 9 9 */ 10 10 11 11 #include <linux/irq.h> 12 + #include <linux/irqchip.h> 12 13 #include <linux/of.h> 13 14 #include <linux/of_irq.h> 14 15 #include <linux/of_address.h> 15 - 16 - #include "irqchip.h" 17 16 18 17 /* OR1K PIC implementation */ 19 18
+4 -5
drivers/irqchip/irq-orion.c
··· 10 10 11 11 #include <linux/io.h> 12 12 #include <linux/irq.h> 13 + #include <linux/irqchip.h> 13 14 #include <linux/of.h> 14 15 #include <linux/of_address.h> 15 16 #include <linux/of_irq.h> 16 17 #include <asm/exception.h> 17 18 #include <asm/mach/irq.h> 18 - 19 - #include "irqchip.h" 20 19 21 20 /* 22 21 * Orion SoC main interrupt controller ··· 108 109 109 110 static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) 110 111 { 111 - struct irq_domain *d = irq_get_handler_data(irq); 112 + struct irq_domain *d = irq_desc_get_handler_data(desc); 112 113 113 114 struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0); 114 115 u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) & ··· 197 198 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK); 198 199 writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE); 199 200 200 - irq_set_handler_data(irq, domain); 201 - irq_set_chained_handler(irq, orion_bridge_irq_handler); 201 + irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler, 202 + domain); 202 203 203 204 return 0; 204 205 }
-2
drivers/irqchip/irq-renesas-h8300h.c
··· 11 11 #include <linux/of_irq.h> 12 12 #include <asm/io.h> 13 13 14 - #include "irqchip.h" 15 - 16 14 static const char ipr_bit[] = { 17 15 7, 6, 5, 5, 18 16 4, 4, 4, 4, 3, 3, 3, 3,
+1 -1
drivers/irqchip/irq-renesas-h8s.c
··· 5 5 */ 6 6 7 7 #include <linux/irq.h> 8 + #include <linux/irqchip.h> 8 9 #include <linux/of_address.h> 9 10 #include <linux/of_irq.h> 10 11 #include <asm/io.h> 11 - #include "irqchip.h" 12 12 13 13 static void *intc_baseaddr; 14 14 #define IPRA ((unsigned long)intc_baseaddr)
+6 -10
drivers/irqchip/irq-renesas-irqc.c
··· 53 53 struct irqc_irq { 54 54 int hw_irq; 55 55 int requested_irq; 56 - int domain_irq; 57 56 struct irqc_priv *p; 58 57 }; 59 58 ··· 69 70 70 71 static void irqc_dbg(struct irqc_irq *i, char *str) 71 72 { 72 - dev_dbg(&i->p->pdev->dev, "%s (%d:%d:%d)\n", 73 - str, i->requested_irq, i->hw_irq, i->domain_irq); 73 + dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n", 74 + str, i->requested_irq, i->hw_irq); 74 75 } 75 76 76 77 static void irqc_irq_enable(struct irq_data *d) ··· 144 145 if (ioread32(p->iomem + DETECT_STATUS) & bit) { 145 146 iowrite32(bit, p->iomem + DETECT_STATUS); 146 147 irqc_dbg(i, "demux2"); 147 - generic_handle_irq(i->domain_irq); 148 + generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq)); 148 149 return IRQ_HANDLED; 149 150 } 150 151 return IRQ_NONE; ··· 155 156 { 156 157 struct irqc_priv *p = h->host_data; 157 158 158 - p->irq[hw].domain_irq = virq; 159 - p->irq[hw].hw_irq = hw; 160 - 161 159 irqc_dbg(&p->irq[hw], "map"); 162 160 irq_set_chip_data(virq, h->host_data); 163 161 irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq); 164 - set_irq_flags(virq, IRQF_VALID); /* kill me now */ 165 162 return 0; 166 163 } 167 164 ··· 210 215 break; 211 216 212 217 p->irq[k].p = p; 218 + p->irq[k].hw_irq = k; 213 219 p->irq[k].requested_irq = irq->start; 214 220 } 215 221 ··· 239 243 irq_chip->irq_set_wake = irqc_irq_set_wake; 240 244 irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; 241 245 242 - p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, 243 - p->number_of_irqs, 0, 246 + p->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 247 + p->number_of_irqs, 244 248 &irqc_irq_domain_ops, p); 245 249 if (!p->irq_domain) { 246 250 ret = -ENXIO;
+4 -7
drivers/irqchip/irq-s3c24xx.c
··· 25 25 #include <linux/ioport.h> 26 26 #include <linux/device.h> 27 27 #include <linux/irqdomain.h> 28 + #include <linux/irqchip.h> 28 29 #include <linux/irqchip/chained_irq.h> 29 30 #include <linux/of.h> 30 31 #include <linux/of_irq.h> ··· 40 39 #include <plat/cpu.h> 41 40 #include <plat/regs-irqtype.h> 42 41 #include <plat/pm.h> 43 - 44 - #include "irqchip.h" 45 42 46 43 #define S3C_IRQTYPE_NONE 0 47 44 #define S3C_IRQTYPE_EINT 1 ··· 298 299 .irq_set_type = s3c_irqext0_type, 299 300 }; 300 301 301 - static void s3c_irq_demux(unsigned int irq, struct irq_desc *desc) 302 + static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc) 302 303 { 303 304 struct irq_chip *chip = irq_desc_get_chip(desc); 304 305 struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc); 305 306 struct s3c_irq_intc *intc = irq_data->intc; 306 307 struct s3c_irq_intc *sub_intc = irq_data->sub_intc; 307 - unsigned long src; 308 - unsigned long msk; 309 - unsigned int n; 310 - unsigned int offset; 308 + unsigned int n, offset, irq; 309 + unsigned long src, msk; 311 310 312 311 /* we're using individual domains for the non-dt case 313 312 * and one big domain for the dt case where the subintc
+24 -22
drivers/irqchip/irq-sirfsoc.c
··· 11 11 #include <linux/irq.h> 12 12 #include <linux/of.h> 13 13 #include <linux/of_address.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/irqdomain.h> 15 16 #include <linux/syscore_ops.h> 16 17 #include <asm/mach/irq.h> 17 18 #include <asm/exception.h> 18 - #include "irqchip.h" 19 19 20 - #define SIRFSOC_INT_RISC_MASK0 0x0018 21 - #define SIRFSOC_INT_RISC_MASK1 0x001C 22 - #define SIRFSOC_INT_RISC_LEVEL0 0x0020 23 - #define SIRFSOC_INT_RISC_LEVEL1 0x0024 20 + #define SIRFSOC_INT_RISC_MASK0 0x0018 21 + #define SIRFSOC_INT_RISC_MASK1 0x001C 22 + #define SIRFSOC_INT_RISC_LEVEL0 0x0020 23 + #define SIRFSOC_INT_RISC_LEVEL1 0x0024 24 24 #define SIRFSOC_INIT_IRQ_ID 0x0038 25 + #define SIRFSOC_INT_BASE_OFFSET 0x0004 25 26 26 27 #define SIRFSOC_NUM_IRQS 64 28 + #define SIRFSOC_NUM_BANKS (SIRFSOC_NUM_IRQS / 32) 27 29 28 30 static struct irq_domain *sirfsoc_irqdomain; 29 31 30 - static __init void 31 - sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) 32 + static __init void sirfsoc_alloc_gc(void __iomem *base) 32 33 { 33 - struct irq_chip_generic *gc; 34 - struct irq_chip_type *ct; 35 - int ret; 36 34 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 37 35 unsigned int set = IRQ_LEVEL; 36 + struct irq_chip_generic *gc; 37 + struct irq_chip_type *ct; 38 + int i; 38 39 39 - ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc", 40 - handle_level_irq, clr, set, IRQ_GC_INIT_MASK_CACHE); 40 + irq_alloc_domain_generic_chips(sirfsoc_irqdomain, 32, 1, "irq_sirfsoc", 41 + handle_level_irq, clr, set, 42 + IRQ_GC_INIT_MASK_CACHE); 41 43 42 - gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start); 43 - gc->reg_base = base; 44 - ct = gc->chip_types; 45 - ct->chip.irq_mask = irq_gc_mask_clr_bit; 46 - ct->chip.irq_unmask = irq_gc_mask_set_bit; 47 - ct->regs.mask = SIRFSOC_INT_RISC_MASK0; 44 + for (i = 0; i < SIRFSOC_NUM_BANKS; i++) { 45 + gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, i * 32); 46 + gc->reg_base = base + i * SIRFSOC_INT_BASE_OFFSET; 47 + ct = gc->chip_types; 48 + ct->chip.irq_mask = irq_gc_mask_clr_bit; 49 + ct->chip.irq_unmask = irq_gc_mask_set_bit; 50 + ct->regs.mask = SIRFSOC_INT_RISC_MASK0; 51 + } 48 52 } 49 53 50 54 static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) ··· 68 64 panic("unable to map intc cpu registers\n"); 69 65 70 66 sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS, 71 - &irq_generic_chip_ops, base); 72 - 73 - sirfsoc_alloc_gc(base, 0, 32); 74 - sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); 67 + &irq_generic_chip_ops, base); 68 + sirfsoc_alloc_gc(base); 75 69 76 70 writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL0); 77 71 writel_relaxed(0, base + SIRFSOC_INT_RISC_LEVEL1);
+1 -2
drivers/irqchip/irq-sun4i.c
··· 16 16 17 17 #include <linux/io.h> 18 18 #include <linux/irq.h> 19 + #include <linux/irqchip.h> 19 20 #include <linux/of.h> 20 21 #include <linux/of_address.h> 21 22 #include <linux/of_irq.h> 22 23 23 24 #include <asm/exception.h> 24 25 #include <asm/mach/irq.h> 25 - 26 - #include "irqchip.h" 27 26 28 27 #define SUN4I_IRQ_VECTOR_REG 0x00 29 28 #define SUN4I_IRQ_PROTECTION_REG 0x08
+3 -4
drivers/irqchip/irq-sunxi-nmi.c
··· 17 17 #include <linux/of_irq.h> 18 18 #include <linux/of_address.h> 19 19 #include <linux/of_platform.h> 20 + #include <linux/irqchip.h> 20 21 #include <linux/irqchip/chained_irq.h> 21 - #include "irqchip.h" 22 22 23 23 #define SUNXI_NMI_SRC_TYPE_MASK 0x00000003 24 24 ··· 61 61 static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) 62 62 { 63 63 struct irq_domain *domain = irq_desc_get_handler_data(desc); 64 - struct irq_chip *chip = irq_get_chip(irq); 64 + struct irq_chip *chip = irq_desc_get_chip(desc); 65 65 unsigned int virq = irq_find_mapping(domain, 0); 66 66 67 67 chained_irq_enter(chip, desc); ··· 182 182 sunxi_sc_nmi_write(gc, reg_offs->enable, 0); 183 183 sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1); 184 184 185 - irq_set_handler_data(irq, domain); 186 - irq_set_chained_handler(irq, sunxi_sc_nmi_handle_irq); 185 + irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain); 187 186 188 187 return 0; 189 188
+5 -4
drivers/irqchip/irq-tb10x.c
··· 22 22 #include <linux/interrupt.h> 23 23 #include <linux/irqdomain.h> 24 24 #include <linux/irq.h> 25 + #include <linux/irqchip.h> 25 26 #include <linux/of_irq.h> 26 27 #include <linux/of_address.h> 27 28 #include <linux/of_platform.h> 28 29 #include <linux/io.h> 29 30 #include <linux/slab.h> 30 31 #include <linux/bitops.h> 31 - #include "irqchip.h" 32 32 33 33 #define AB_IRQCTL_INT_ENABLE 0x00 34 34 #define AB_IRQCTL_INT_STATUS 0x04 ··· 97 97 return IRQ_SET_MASK_OK; 98 98 } 99 99 100 - static void tb10x_irq_cascade(unsigned int irq, struct irq_desc *desc) 100 + static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc) 101 101 { 102 102 struct irq_domain *domain = irq_desc_get_handler_data(desc); 103 + unsigned int irq = irq_desc_get_irq(desc); 103 104 104 105 generic_handle_irq(irq_find_mapping(domain, irq)); 105 106 } ··· 174 173 for (i = 0; i < nrirqs; i++) { 175 174 unsigned int irq = irq_of_parse_and_map(ictl, i); 176 175 177 - irq_set_handler_data(irq, domain); 178 - irq_set_chained_handler(irq, tb10x_irq_cascade); 176 + irq_set_chained_handler_and_data(irq, tb10x_irq_cascade, 177 + domain); 179 178 } 180 179 181 180 ab_irqctl_writereg(gc, AB_IRQCTL_INT_ENABLE, 0);
+1 -2
drivers/irqchip/irq-tegra.c
··· 24 24 25 25 #include <linux/io.h> 26 26 #include <linux/irq.h> 27 + #include <linux/irqchip.h> 27 28 #include <linux/irqdomain.h> 28 29 #include <linux/of_address.h> 29 30 #include <linux/slab.h> 30 31 #include <linux/syscore_ops.h> 31 32 32 33 #include <dt-bindings/interrupt-controller/arm-gic.h> 33 - 34 - #include "irqchip.h" 35 34 36 35 #define ICTLR_CPU_IEP_VFIQ 0x08 37 36 #define ICTLR_CPU_IEP_FIR 0x14
+5 -5
drivers/irqchip/irq-versatile-fpga.c
··· 4 4 #include <linux/bitops.h> 5 5 #include <linux/irq.h> 6 6 #include <linux/io.h> 7 + #include <linux/irqchip.h> 7 8 #include <linux/irqchip/versatile-fpga.h> 8 9 #include <linux/irqdomain.h> 9 10 #include <linux/module.h> ··· 14 13 15 14 #include <asm/exception.h> 16 15 #include <asm/mach/irq.h> 17 - 18 - #include "irqchip.h" 19 16 20 17 #define IRQ_STATUS 0x00 21 18 #define IRQ_RAW_STATUS 0x04 ··· 65 66 writel(mask, f->base + IRQ_ENABLE_SET); 66 67 } 67 68 68 - static void fpga_irq_handle(unsigned int irq, struct irq_desc *desc) 69 + static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc) 69 70 { 70 71 struct fpga_irq_data *f = irq_desc_get_handler_data(desc); 72 + unsigned int irq = irq_desc_get_irq(desc); 71 73 u32 status = readl(f->base + IRQ_STATUS); 72 74 73 75 if (status == 0) { ··· 156 156 f->valid = valid; 157 157 158 158 if (parent_irq != -1) { 159 - irq_set_handler_data(parent_irq, f); 160 - irq_set_chained_handler(parent_irq, fpga_irq_handle); 159 + irq_set_chained_handler_and_data(parent_irq, fpga_irq_handle, 160 + f); 161 161 } 162 162 163 163 /* This will also allocate irq descriptors */
+1 -2
drivers/irqchip/irq-vf610-mscm-ir.c
··· 26 26 #include <linux/cpu_pm.h> 27 27 #include <linux/io.h> 28 28 #include <linux/irq.h> 29 + #include <linux/irqchip.h> 29 30 #include <linux/irqdomain.h> 30 31 #include <linux/mfd/syscon.h> 31 32 #include <dt-bindings/interrupt-controller/arm-gic.h> ··· 34 33 #include <linux/of_address.h> 35 34 #include <linux/slab.h> 36 35 #include <linux/regmap.h> 37 - 38 - #include "irqchip.h" 39 36 40 37 #define MSCM_CPxNUM 0x4 41 38
+3 -4
drivers/irqchip/irq-vic.c
··· 24 24 #include <linux/list.h> 25 25 #include <linux/io.h> 26 26 #include <linux/irq.h> 27 + #include <linux/irqchip.h> 27 28 #include <linux/irqchip/chained_irq.h> 28 29 #include <linux/irqdomain.h> 29 30 #include <linux/of.h> ··· 37 36 38 37 #include <asm/exception.h> 39 38 #include <asm/irq.h> 40 - 41 - #include "irqchip.h" 42 39 43 40 #define VIC_IRQ_STATUS 0x00 44 41 #define VIC_FIQ_STATUS 0x04 ··· 296 297 vic_id++; 297 298 298 299 if (parent_irq) { 299 - irq_set_handler_data(parent_irq, v); 300 - irq_set_chained_handler(parent_irq, vic_handle_irq_cascaded); 300 + irq_set_chained_handler_and_data(parent_irq, 301 + vic_handle_irq_cascaded, v); 301 302 } 302 303 303 304 v->domain = irq_domain_add_simple(node, fls(valid_sources), irq,
+4 -5
drivers/irqchip/irq-vt8500.c
··· 27 27 #include <linux/slab.h> 28 28 #include <linux/io.h> 29 29 #include <linux/irq.h> 30 + #include <linux/irqchip.h> 30 31 #include <linux/irqdomain.h> 31 32 #include <linux/interrupt.h> 32 33 #include <linux/bitops.h> ··· 39 38 #include <asm/irq.h> 40 39 #include <asm/exception.h> 41 40 #include <asm/mach/irq.h> 42 - 43 - #include "irqchip.h" 44 41 45 42 #define VT8500_ICPC_IRQ 0x20 46 43 #define VT8500_ICPC_FIQ 0x24 ··· 126 127 return -EINVAL; 127 128 case IRQF_TRIGGER_HIGH: 128 129 dctr |= VT8500_TRIGGER_HIGH; 129 - __irq_set_handler_locked(d->irq, handle_level_irq); 130 + irq_set_handler_locked(d, handle_level_irq); 130 131 break; 131 132 case IRQF_TRIGGER_FALLING: 132 133 dctr |= VT8500_TRIGGER_FALLING; 133 - __irq_set_handler_locked(d->irq, handle_edge_irq); 134 + irq_set_handler_locked(d, handle_edge_irq); 134 135 break; 135 136 case IRQF_TRIGGER_RISING: 136 137 dctr |= VT8500_TRIGGER_RISING; 137 - __irq_set_handler_locked(d->irq, handle_edge_irq); 138 + irq_set_handler_locked(d, handle_edge_irq); 138 139 break; 139 140 } 140 141 writeb(dctr, base + VT8500_ICDC + d->hwirq);
+1 -2
drivers/irqchip/irq-xtensa-mx.c
··· 11 11 #include <linux/interrupt.h> 12 12 #include <linux/irqdomain.h> 13 13 #include <linux/irq.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/of.h> 15 16 16 17 #include <asm/mxregs.h> 17 - 18 - #include "irqchip.h" 19 18 20 19 #define HW_IRQ_IPI_COUNT 2 21 20 #define HW_IRQ_MX_BASE 2
+1 -2
drivers/irqchip/irq-xtensa-pic.c
··· 15 15 #include <linux/interrupt.h> 16 16 #include <linux/irqdomain.h> 17 17 #include <linux/irq.h> 18 + #include <linux/irqchip.h> 18 19 #include <linux/of.h> 19 - 20 - #include "irqchip.h" 21 20 22 21 unsigned int cached_irq_mask; 23 22
+1 -2
drivers/irqchip/irq-zevio.c
··· 11 11 12 12 #include <linux/io.h> 13 13 #include <linux/irq.h> 14 + #include <linux/irqchip.h> 14 15 #include <linux/of.h> 15 16 #include <linux/of_address.h> 16 17 #include <linux/of_irq.h> 17 18 18 19 #include <asm/mach/irq.h> 19 20 #include <asm/exception.h> 20 - 21 - #include "irqchip.h" 22 21 23 22 #define IO_STATUS 0x000 24 23 #define IO_RAW_STATUS 0x004
-11
drivers/irqchip/irqchip.h
··· 1 - /* 2 - * Copyright (C) 2012 Thomas Petazzoni 3 - * 4 - * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 5 - * 6 - * This file is licensed under the terms of the GNU General Public 7 - * License version 2. This program is licensed "as is" without any 8 - * warranty of any kind, whether express or implied. 9 - */ 10 - 11 - #include <linux/irqchip.h>
+3 -4
drivers/irqchip/spear-shirq.c
··· 18 18 #include <linux/interrupt.h> 19 19 #include <linux/io.h> 20 20 #include <linux/irq.h> 21 + #include <linux/irqchip.h> 21 22 #include <linux/irqdomain.h> 22 23 #include <linux/of.h> 23 24 #include <linux/of_address.h> 24 25 #include <linux/of_irq.h> 25 26 #include <linux/spinlock.h> 26 - 27 - #include "irqchip.h" 28 27 29 28 /* 30 29 * struct spear_shirq: shared irq structure ··· 182 183 &spear320_shirq_intrcomm_ras, 183 184 }; 184 185 185 - static void shirq_handler(unsigned irq, struct irq_desc *desc) 186 + static void shirq_handler(unsigned __irq, struct irq_desc *desc) 186 187 { 187 - struct spear_shirq *shirq = irq_get_handler_data(irq); 188 + struct spear_shirq *shirq = irq_desc_get_handler_data(desc); 188 189 u32 pend; 189 190 190 191 pend = readl(shirq->base + shirq->status_reg) & shirq->mask;
+21
drivers/of/irq.c
··· 18 18 * driver. 19 19 */ 20 20 21 + #include <linux/device.h> 21 22 #include <linux/errno.h> 22 23 #include <linux/list.h> 23 24 #include <linux/module.h> ··· 576 575 list_del(&desc->list); 577 576 kfree(desc); 578 577 } 578 + } 579 + 580 + /** 581 + * of_msi_configure - Set the msi_domain field of a device 582 + * @dev: device structure to associate with an MSI irq domain 583 + * @np: device node for that device 584 + */ 585 + void of_msi_configure(struct device *dev, struct device_node *np) 586 + { 587 + struct device_node *msi_np; 588 + struct irq_domain *d; 589 + 590 + msi_np = of_parse_phandle(np, "msi-parent", 0); 591 + if (!msi_np) 592 + return; 593 + 594 + d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI); 595 + if (!d) 596 + d = irq_find_host(msi_np); 597 + dev_set_msi_domain(dev, d); 579 598 }
+1
drivers/of/platform.c
··· 184 184 dev->dev.bus = &platform_bus_type; 185 185 dev->dev.platform_data = platform_data; 186 186 of_dma_configure(&dev->dev, dev->dev.of_node); 187 + of_msi_configure(&dev->dev, dev->dev.of_node); 187 188 188 189 if (of_device_add(dev) != 0) { 189 190 of_dma_deconfigure(&dev->dev);
+1 -1
drivers/parisc/iosapic.c
··· 691 691 if (dest_cpu < 0) 692 692 return -1; 693 693 694 - cpumask_copy(d->affinity, cpumask_of(dest_cpu)); 694 + cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(dest_cpu)); 695 695 vi->txn_addr = txn_affinity_addr(d->irq, dest_cpu); 696 696 697 697 spin_lock_irqsave(&iosapic_lock, flags);
+9 -12
drivers/pci/host/pci-keystone-dw.c
··· 104 104 { 105 105 u32 offset, reg_offset, bit_pos; 106 106 struct keystone_pcie *ks_pcie; 107 - unsigned int irq = d->irq; 108 107 struct msi_desc *msi; 109 108 struct pcie_port *pp; 110 109 111 - msi = irq_get_msi_desc(irq); 112 - pp = sys_to_pcie(msi->dev->bus->sysdata); 110 + msi = irq_data_get_msi_desc(d); 111 + pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 113 112 ks_pcie = to_keystone_pcie(pp); 114 - offset = irq - irq_linear_revmap(pp->irq_domain, 0); 113 + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 115 114 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos); 116 115 117 116 writel(BIT(bit_pos), ··· 141 142 static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) 142 143 { 143 144 struct keystone_pcie *ks_pcie; 144 - unsigned int irq = d->irq; 145 145 struct msi_desc *msi; 146 146 struct pcie_port *pp; 147 147 u32 offset; 148 148 149 - msi = irq_get_msi_desc(irq); 150 - pp = sys_to_pcie(msi->dev->bus->sysdata); 149 + msi = irq_data_get_msi_desc(d); 150 + pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 151 151 ks_pcie = to_keystone_pcie(pp); 152 - offset = irq - irq_linear_revmap(pp->irq_domain, 0); 152 + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 153 153 154 154 /* Mask the end point if PVM implemented */ 155 155 if (IS_ENABLED(CONFIG_PCI_MSI)) { ··· 162 164 static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) 163 165 { 164 166 struct keystone_pcie *ks_pcie; 165 - unsigned int irq = d->irq; 166 167 struct msi_desc *msi; 167 168 struct pcie_port *pp; 168 169 u32 offset; 169 170 170 - msi = irq_get_msi_desc(irq); 171 - pp = sys_to_pcie(msi->dev->bus->sysdata); 171 + msi = irq_data_get_msi_desc(d); 172 + pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 172 173 ks_pcie = to_keystone_pcie(pp); 173 - offset = irq - irq_linear_revmap(pp->irq_domain, 0); 174 + offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 174 175 175 176 /* Mask the end point if PVM implemented */ 176 177 if (IS_ENABLED(CONFIG_PCI_MSI)) {
+8 -5
drivers/pci/host/pci-keystone.c
··· 110 110 return -EINVAL; 111 111 } 112 112 113 - static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc) 113 + static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc) 114 114 { 115 + unsigned int irq = irq_desc_get_irq(desc); 115 116 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 116 117 u32 offset = irq - ks_pcie->msi_host_irqs[0]; 117 118 struct pcie_port *pp = &ks_pcie->pp; ··· 138 137 * Traverse through pending legacy interrupts and invoke handler for each. Also 139 138 * takes care of interrupt controller level mask/ack operation. 140 139 */ 141 - static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc) 140 + static void ks_pcie_legacy_irq_handler(unsigned int __irq, 141 + struct irq_desc *desc) 142 142 { 143 + unsigned int irq = irq_desc_get_irq(desc); 143 144 struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); 144 145 struct pcie_port *pp = &ks_pcie->pp; 145 146 u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; ··· 215 212 216 213 /* Legacy IRQ */ 217 214 for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { 218 - irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie); 219 - irq_set_chained_handler(ks_pcie->legacy_host_irqs[i], 220 - ks_pcie_legacy_irq_handler); 215 + irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], 216 + ks_pcie_legacy_irq_handler, 217 + ks_pcie); 221 218 } 222 219 ks_dw_pcie_enable_legacy_irqs(ks_pcie); 223 220
+22 -33
drivers/pci/host/pci-xgene-msi.c
··· 40 40 41 41 struct xgene_msi { 42 42 struct device_node *node; 43 - struct msi_controller mchip; 44 - struct irq_domain *domain; 43 + struct irq_domain *inner_domain; 44 + struct irq_domain *msi_domain; 45 45 u64 msi_addr; 46 46 void __iomem *msi_regs; 47 47 unsigned long *bitmap; ··· 251 251 252 252 static int xgene_allocate_domains(struct xgene_msi *msi) 253 253 { 254 - msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, 255 - &msi_domain_ops, msi); 256 - if (!msi->domain) 254 + msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, 255 + &msi_domain_ops, msi); 256 + if (!msi->inner_domain) 257 257 return -ENOMEM; 258 258 259 - msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, 260 - &xgene_msi_domain_info, 261 - msi->domain); 259 + msi->msi_domain = pci_msi_create_irq_domain(msi->node, 260 + &xgene_msi_domain_info, 261 + msi->inner_domain); 262 262 263 - if (!msi->mchip.domain) { 264 - irq_domain_remove(msi->domain); 263 + if (!msi->msi_domain) { 264 + irq_domain_remove(msi->inner_domain); 265 265 return -ENOMEM; 266 266 } 267 267 ··· 270 270 271 271 static void xgene_free_domains(struct xgene_msi *msi) 272 272 { 273 - if (msi->mchip.domain) 274 - irq_domain_remove(msi->mchip.domain); 275 - if (msi->domain) 276 - irq_domain_remove(msi->domain); 273 + if (msi->msi_domain) 274 + irq_domain_remove(msi->msi_domain); 275 + if (msi->inner_domain) 276 + irq_domain_remove(msi->inner_domain); 277 277 } 278 278 279 279 static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) ··· 339 339 * CPU0 340 340 */ 341 341 hw_irq = hwirq_to_canonical_hwirq(hw_irq); 342 - virq = irq_find_mapping(xgene_msi->domain, hw_irq); 342 + virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq); 343 343 WARN_ON(!virq); 344 344 if (virq != 0) 345 345 generic_handle_irq(virq); ··· 367 367 368 368 for (i = 0; i < NR_HW_IRQS; i++) { 369 369 virq = msi->msi_groups[i].gic_irq; 370 - if (virq != 0) { 371 - irq_set_chained_handler(virq, NULL); 372 - irq_set_handler_data(virq, NULL); 373 - } 370 + if (virq != 0) 371 + irq_set_chained_handler_and_data(virq, NULL, NULL); 374 372 } 375 373 kfree(msi->msi_groups); 376 374 ··· 418 420 } 419 421 420 422 if (err) { 421 - irq_set_chained_handler(msi_group->gic_irq, NULL); 422 - irq_set_handler_data(msi_group->gic_irq, NULL); 423 + irq_set_chained_handler_and_data(msi_group->gic_irq, 424 + NULL, NULL); 423 425 return err; 424 426 } 425 427 } ··· 438 440 if (!msi_group->gic_irq) 439 441 continue; 440 442 441 - irq_set_chained_handler(msi_group->gic_irq, NULL); 442 - irq_set_handler_data(msi_group->gic_irq, NULL); 443 + irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, 444 + NULL); 443 445 } 444 446 } 445 447 ··· 494 496 goto error; 495 497 } 496 498 xgene_msi->msi_addr = res->start; 497 - 499 + xgene_msi->node = pdev->dev.of_node; 498 500 xgene_msi->num_cpus = num_possible_cpus(); 499 501 500 502 rc = xgene_msi_init_allocator(xgene_msi); ··· 558 560 559 561 cpu_notifier_register_done(); 560 562 561 - xgene_msi->mchip.of_node = pdev->dev.of_node; 562 - rc = of_pci_msi_chip_add(&xgene_msi->mchip); 563 - if (rc) { 564 - dev_err(&pdev->dev, "failed to add MSI controller chip\n"); 565 - goto error_notifier; 566 - } 567 - 568 563 dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); 569 564 570 565 return 0; 571 566 572 - error_notifier: 573 - unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); 574 567 error: 575 568 xgene_msi_remove(pdev); 576 569 return rc;
+3 -3
drivers/pci/host/pcie-designware.c
··· 255 255 static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 256 256 { 257 257 int irq, pos0, i; 258 - struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); 258 + struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(desc)); 259 259 260 260 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, 261 261 order_base_2(no_irqs)); ··· 326 326 static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) 327 327 { 328 328 struct irq_data *data = irq_get_irq_data(irq); 329 - struct msi_desc *msi = irq_data_get_msi(data); 330 - struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata); 329 + struct msi_desc *msi = irq_data_get_msi_desc(data); 330 + struct pcie_port *pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 331 331 332 332 clear_irq_range(pp, irq, 1, data->hwirq); 333 333 }
+5 -7
drivers/pci/host/pcie-xilinx.c
··· 227 227 */ 228 228 static void xilinx_pcie_destroy_msi(unsigned int irq) 229 229 { 230 - struct irq_desc *desc; 231 230 struct msi_desc *msi; 232 231 struct xilinx_pcie_port *port; 233 232 234 - desc = irq_to_desc(irq); 235 - msi = irq_desc_get_msi_desc(desc); 236 - port = sys_to_pcie(msi->dev->bus->sysdata); 237 - 238 - if (!test_bit(irq, msi_irq_in_use)) 233 + if (!test_bit(irq, msi_irq_in_use)) { 234 + msi = irq_get_msi_desc(irq); 235 + port = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 239 236 dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); 240 - else 237 + } else { 241 238 clear_bit(irq, msi_irq_in_use); 239 + } 242 240 } 243 241 244 242 /**
+58 -48
drivers/pci/msi.c
··· 39 39 40 40 static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) 41 41 { 42 - struct irq_domain *domain = NULL; 42 + struct irq_domain *domain; 43 43 44 - if (dev->bus->msi) 45 - domain = dev->bus->msi->domain; 46 - if (!domain) 47 - domain = arch_get_pci_msi_domain(dev); 44 + domain = dev_get_msi_domain(&dev->dev); 45 + if (domain) 46 + return domain; 48 47 49 - return domain; 48 + return arch_get_pci_msi_domain(dev); 50 49 } 51 50 52 51 static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) ··· 115 116 if (type == PCI_CAP_ID_MSI && nvec > 1) 116 117 return 1; 117 118 118 - list_for_each_entry(entry, &dev->msi_list, list) { 119 + for_each_pci_msi_entry(entry, dev) { 119 120 ret = arch_setup_msi_irq(dev, entry); 120 121 if (ret < 0) 121 122 return ret; ··· 135 136 int i; 136 137 struct msi_desc *entry; 137 138 138 - list_for_each_entry(entry, &dev->msi_list, list) 139 + for_each_pci_msi_entry(entry, dev) 139 140 if (entry->irq) 140 141 for (i = 0; i < entry->nvec_used; i++) 141 142 arch_teardown_msi_irq(entry->irq + i); ··· 152 153 153 154 entry = NULL; 154 155 if (dev->msix_enabled) { 155 - list_for_each_entry(entry, &dev->msi_list, list) { 156 + for_each_pci_msi_entry(entry, dev) { 156 157 if (irq == entry->irq) 157 158 break; 158 159 } ··· 192 193 193 194 mask_bits &= ~mask; 194 195 mask_bits |= flag; 195 - pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); 196 + pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, 197 + mask_bits); 196 198 197 199 return mask_bits; 198 200 } ··· 234 234 235 235 static void msi_set_mask_bit(struct irq_data *data, u32 flag) 236 236 { 237 - struct msi_desc *desc = irq_data_get_msi(data); 237 + struct msi_desc *desc = irq_data_get_msi_desc(data); 238 238 239 239 if (desc->msi_attrib.is_msix) { 240 240 msix_mask_irq(desc, flag); ··· 267 267 { 268 268 struct msi_desc *entry; 269 269 270 - list_for_each_entry(entry, &dev->msi_list, list) 270 + for_each_pci_msi_entry(entry, dev) 271 271 default_restore_msi_irq(dev, entry->irq); 272 272 } 273 273 274 274 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 275 275 { 276 - BUG_ON(entry->dev->current_state != PCI_D0); 276 + struct pci_dev *dev = msi_desc_to_pci_dev(entry); 277 + 278 + BUG_ON(dev->current_state != PCI_D0); 277 279 278 280 if (entry->msi_attrib.is_msix) { 279 281 void __iomem *base = entry->mask_base + ··· 285 283 msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); 286 284 msg->data = readl(base + PCI_MSIX_ENTRY_DATA); 287 285 } else { 288 - struct pci_dev *dev = entry->dev; 289 286 int pos = dev->msi_cap; 290 287 u16 data; 291 288 ··· 304 303 305 304 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 306 305 { 307 - if (entry->dev->current_state != PCI_D0) { 306 + struct pci_dev *dev = msi_desc_to_pci_dev(entry); 307 + 308 + if (dev->current_state != PCI_D0) { 308 309 /* Don't touch the hardware now */ 309 310 } else if (entry->msi_attrib.is_msix) { 310 311 void __iomem *base; ··· 317 314 writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); 318 315 writel(msg->data, base + PCI_MSIX_ENTRY_DATA); 319 316 } else { 320 - struct pci_dev *dev = entry->dev; 321 317 int pos = dev->msi_cap; 322 318 u16 msgctl; 323 319 ··· 350 348 351 349 static void free_msi_irqs(struct pci_dev *dev) 352 350 { 351 + struct list_head *msi_list = dev_to_msi_list(&dev->dev); 353 352 struct msi_desc *entry, *tmp; 354 353 struct attribute **msi_attrs; 355 354 struct device_attribute *dev_attr; 356 355 int i, count = 0; 357 356 358 - list_for_each_entry(entry, &dev->msi_list, list) 357 + for_each_pci_msi_entry(entry, dev) 359 358 if (entry->irq) 360 359 for (i = 0; i < entry->nvec_used; i++) 361 360 BUG_ON(irq_has_action(entry->irq + i)); 362 361 363 362 pci_msi_teardown_msi_irqs(dev); 364 363 365 - list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { 364 + list_for_each_entry_safe(entry, tmp, msi_list, list) { 366 365 if (entry->msi_attrib.is_msix) { 367 - if (list_is_last(&entry->list, &dev->msi_list)) 366 + if (list_is_last(&entry->list, msi_list)) 368 367 iounmap(entry->mask_base); 369 368 } 370 369 ··· 388 385 kfree(dev->msi_irq_groups); 389 386 dev->msi_irq_groups = NULL; 390 387 } 391 - } 392 - 393 - static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) 394 - { 395 - struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); 396 - if (!desc) 397 - return NULL; 398 - 399 - INIT_LIST_HEAD(&desc->list); 400 - desc->dev = dev; 401 - 402 - return desc; 403 388 } 404 389 405 390 static void pci_intx_for_msi(struct pci_dev *dev, int enable) ··· 424 433 425 434 if (!dev->msix_enabled) 426 435 return; 427 - BUG_ON(list_empty(&dev->msi_list)); 436 + BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); 428 437 429 438 /* route the table */ 430 439 pci_intx_for_msi(dev, 0); ··· 432 441 PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); 433 442 434 443 arch_restore_msi_irqs(dev); 435 - list_for_each_entry(entry, &dev->msi_list, list) 444 + for_each_pci_msi_entry(entry, dev) 436 445 msix_mask_irq(entry, entry->masked); 437 446 438 447 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); ··· 477 486 int count = 0; 478 487 479 488 /* Determine how many msi entries we have */ 480 - list_for_each_entry(entry, &pdev->msi_list, list) 489 + for_each_pci_msi_entry(entry, pdev) 481 490 ++num_msi; 482 491 if (!num_msi) 483 492 return 0; ··· 486 495 msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); 487 496 if (!msi_attrs) 488 497 return -ENOMEM; 489 - list_for_each_entry(entry, &pdev->msi_list, list) { 498 + for_each_pci_msi_entry(entry, pdev) { 490 499 msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); 491 500 if (!msi_dev_attr) 492 501 goto error_attrs; ··· 544 553 struct msi_desc *entry; 545 554 546 555 /* MSI Entry Initialization */ 547 - entry = alloc_msi_entry(dev); 556 + entry = alloc_msi_entry(&dev->dev); 548 557 if (!entry) 549 558 return NULL; 550 559 ··· 575 584 { 576 585 struct msi_desc *entry; 577 586 578 - list_for_each_entry(entry, &dev->msi_list, list) { 587 + for_each_pci_msi_entry(entry, dev) { 579 588 if (!dev->no_64bit_msi || !entry->msg.address_hi) 580 589 continue; 581 590 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" ··· 612 621 mask = msi_mask(entry->msi_attrib.multi_cap); 613 622 msi_mask_irq(entry, mask, mask); 614 623 615 - list_add_tail(&entry->list, &dev->msi_list); 624 + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 616 625 617 626 /* Configure MSI capability structure */ 618 627 ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); ··· 673 682 int i; 674 683 675 684 for (i = 0; i < nvec; i++) { 676 - entry = alloc_msi_entry(dev); 685 + entry = alloc_msi_entry(&dev->dev); 677 686 if (!entry) { 678 687 if (!i) 679 688 iounmap(base); ··· 690 699 entry->mask_base = base; 691 700 entry->nvec_used = 1; 692 701 693 - list_add_tail(&entry->list, &dev->msi_list); 702 + list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 694 703 } 695 704 696 705 return 0; ··· 702 711 struct msi_desc *entry; 703 712 int i = 0; 704 713 705 - list_for_each_entry(entry, &dev->msi_list, list) { 714 + for_each_pci_msi_entry(entry, dev) { 706 715 int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + 707 716 PCI_MSIX_ENTRY_VECTOR_CTRL; 708 717 ··· 783 792 struct msi_desc *entry; 784 793 int avail = 0; 785 794 786 - list_for_each_entry(entry, &dev->msi_list, list) { 795 + for_each_pci_msi_entry(entry, dev) { 787 796 if (entry->irq != 0) 788 797 avail++; 789 798 } ··· 872 881 if (!pci_msi_enable || !dev || !dev->msi_enabled) 873 882 return; 874 883 875 - BUG_ON(list_empty(&dev->msi_list)); 876 - desc = list_first_entry(&dev->msi_list, struct msi_desc, list); 884 + BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); 885 + desc = first_pci_msi_entry(dev); 877 886 878 887 pci_msi_set_enable(dev, 0); 879 888 pci_intx_for_msi(dev, 1); ··· 979 988 return; 980 989 981 990 /* Return the device with MSI-X masked as initial states */ 982 - list_for_each_entry(entry, &dev->msi_list, list) { 991 + for_each_pci_msi_entry(entry, dev) { 983 992 /* Keep cached states to be restored */ 984 993 __pci_msix_desc_mask_irq(entry, 1); 985 994 } ··· 1019 1028 1020 1029 void pci_msi_init_pci_dev(struct pci_dev *dev) 1021 1030 { 1022 - INIT_LIST_HEAD(&dev->msi_list); 1023 1031 } 1024 1032 1025 1033 /** ··· 1115 1125 } 1116 1126 EXPORT_SYMBOL(pci_enable_msix_range); 1117 1127 1128 + struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 1129 + { 1130 + return to_pci_dev(desc->dev); 1131 + } 1132 + 1133 + void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 1134 + { 1135 + struct pci_dev *dev = msi_desc_to_pci_dev(desc); 1136 + 1137 + return dev->bus->sysdata; 1138 + } 1139 + EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); 1140 + 1118 1141 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN 1119 1142 /** 1120 1143 * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space ··· 1136 1133 */ 1137 1134 void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) 1138 1135 { 1139 - struct msi_desc *desc = irq_data->msi_desc; 1136 + struct msi_desc *desc = irq_data_get_msi_desc(irq_data); 1140 1137 1141 1138 /* 1142 1139 * For MSI-X desc->irq is always equal to irq_data->irq. For ··· 1260 1257 struct msi_domain_info *info, 1261 1258 struct irq_domain *parent) 1262 1259 { 1260 + struct irq_domain *domain; 1261 + 1263 1262 if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) 1264 1263 pci_msi_domain_update_dom_ops(info); 1265 1264 if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) 1266 1265 pci_msi_domain_update_chip_ops(info); 1267 1266 1268 - return msi_create_irq_domain(node, info, parent); 1267 + domain = msi_create_irq_domain(node, info, parent); 1268 + if (!domain) 1269 + return NULL; 1270 + 1271 + domain->bus_token = DOMAIN_BUS_PCI_MSI; 1272 + return domain; 1269 1273 } 1270 1274 1271 1275 /**
+30
drivers/pci/of.c
··· 9 9 * 2 of the License, or (at your option) any later version. 10 10 */ 11 11 12 + #include <linux/irqdomain.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/pci.h> 14 15 #include <linux/of.h> ··· 59 58 if (bus->bridge->parent && bus->bridge->parent->of_node) 60 59 return of_node_get(bus->bridge->parent->of_node); 61 60 return NULL; 61 + } 62 + 63 + struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) 64 + { 65 + #ifdef CONFIG_IRQ_DOMAIN 66 + struct device_node *np; 67 + struct irq_domain *d; 68 + 69 + if (!bus->dev.of_node) 70 + return NULL; 71 + 72 + /* Start looking for a phandle to an MSI controller. */ 73 + np = of_parse_phandle(bus->dev.of_node, "msi-parent", 0); 74 + 75 + /* 76 + * If we don't have an msi-parent property, look for a domain 77 + * directly attached to the host bridge. 78 + */ 79 + if (!np) 80 + np = bus->dev.of_node; 81 + 82 + d = irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI); 83 + if (d) 84 + return d; 85 + 86 + return irq_find_host(np); 87 + #else 88 + return NULL; 89 + #endif 62 90 }
+45
drivers/pci/probe.c
··· 661 661 } 662 662 } 663 663 664 + static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) 665 + { 666 + struct irq_domain *d; 667 + 668 + /* 669 + * Any firmware interface that can resolve the msi_domain 670 + * should be called from here. 671 + */ 672 + d = pci_host_bridge_of_msi_domain(bus); 673 + 674 + return d; 675 + } 676 + 677 + static void pci_set_bus_msi_domain(struct pci_bus *bus) 678 + { 679 + struct irq_domain *d; 680 + 681 + /* 682 + * Either bus is the root, and we must obtain it from the 683 + * firmware, or we inherit it from the bridge device. 684 + */ 685 + if (pci_is_root_bus(bus)) 686 + d = pci_host_bridge_msi_domain(bus); 687 + else 688 + d = dev_get_msi_domain(&bus->self->dev); 689 + 690 + dev_set_msi_domain(&bus->dev, d); 691 + } 692 + 664 693 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, 665 694 struct pci_dev *bridge, int busnr) 666 695 { ··· 743 714 bridge->subordinate = child; 744 715 745 716 add_dev: 717 + pci_set_bus_msi_domain(child); 746 718 ret = device_register(&child->dev); 747 719 WARN_ON(ret < 0); 748 720 ··· 1624 1594 pci_enable_acs(dev); 1625 1595 } 1626 1596 1597 + static void pci_set_msi_domain(struct pci_dev *dev) 1598 + { 1599 + /* 1600 + * If no domain has been set through the pcibios_add_device 1601 + * callback, inherit the default from the bus device. 1602 + */ 1603 + if (!dev_get_msi_domain(&dev->dev)) 1604 + dev_set_msi_domain(&dev->dev, 1605 + dev_get_msi_domain(&dev->bus->dev)); 1606 + } 1607 + 1627 1608 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1628 1609 { 1629 1610 int ret; ··· 1675 1634 1676 1635 ret = pcibios_add_device(dev); 1677 1636 WARN_ON(ret < 0); 1637 + 1638 + /* Setup MSI irq domain */ 1639 + pci_set_msi_domain(dev); 1678 1640 1679 1641 /* Notifier could use PCI capabilities */ 1680 1642 dev->match_driver = false; ··· 2052 2008 b->bridge = get_device(&bridge->dev); 2053 2009 device_enable_async_suspend(b->bridge); 2054 2010 pci_set_bus_of_node(b); 2011 + pci_set_bus_msi_domain(b); 2055 2012 2056 2013 if (!parent) 2057 2014 set_dev_node(b->bridge, pcibus_to_node(b));
+1 -1
drivers/pci/xen-pcifront.c
··· 265 265 } 266 266 267 267 i = 0; 268 - list_for_each_entry(entry, &dev->msi_list, list) { 268 + for_each_pci_msi_entry(entry, dev) { 269 269 op.msix_entries[i].entry = entry->msi_attrib.entry_nr; 270 270 /* Vector is useless at this point. */ 271 271 op.msix_entries[i].vector = -1;
+3 -3
drivers/sh/intc/chip.c
··· 22 22 23 23 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { 24 24 #ifdef CONFIG_SMP 25 - if (!cpumask_test_cpu(cpu, data->affinity)) 25 + if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data))) 26 26 continue; 27 27 #endif 28 28 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); ··· 50 50 51 51 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 52 52 #ifdef CONFIG_SMP 53 - if (!cpumask_test_cpu(cpu, data->affinity)) 53 + if (!cpumask_test_cpu(cpu, irq_data_get_affinity_mask(data))) 54 54 continue; 55 55 #endif 56 56 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); ··· 72 72 if (!cpumask_intersects(cpumask, cpu_online_mask)) 73 73 return -1; 74 74 75 - cpumask_copy(data->affinity, cpumask); 75 + cpumask_copy(irq_data_get_affinity_mask(data), cpumask); 76 76 77 77 return IRQ_SET_MASK_OK_NOCOPY; 78 78 }
+1 -1
drivers/sh/intc/core.c
··· 67 67 68 68 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) 69 69 { 70 - generic_handle_irq((unsigned int)irq_get_handler_data(irq)); 70 + generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); 71 71 } 72 72 73 73 static void __init intc_register_irq(struct intc_desc *desc,
+17 -12
drivers/sh/intc/virq.c
··· 83 83 84 84 static int add_virq_to_pirq(unsigned int irq, unsigned int virq) 85 85 { 86 - struct intc_virq_list **last, *entry; 87 - struct irq_data *data = irq_get_irq_data(irq); 86 + struct intc_virq_list *entry; 87 + struct intc_virq_list **last = NULL; 88 88 89 89 /* scan for duplicates */ 90 - last = (struct intc_virq_list **)&data->handler_data; 91 - for_each_virq(entry, data->handler_data) { 90 + for_each_virq(entry, irq_get_handler_data(irq)) { 92 91 if (entry->irq == virq) 93 92 return 0; 94 93 last = &entry->next; ··· 101 102 102 103 entry->irq = virq; 103 104 104 - *last = entry; 105 + if (last) 106 + *last = entry; 107 + else 108 + irq_set_handler_data(irq, entry); 105 109 106 110 return 0; 107 111 } 108 112 109 - static void intc_virq_handler(unsigned int irq, struct irq_desc *desc) 113 + static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc) 110 114 { 111 - struct irq_data *data = irq_get_irq_data(irq); 115 + unsigned int irq = irq_desc_get_irq(desc); 116 + struct irq_data *data = irq_desc_get_irq_data(desc); 112 117 struct irq_chip *chip = irq_data_get_irq_chip(data); 113 118 struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data); 114 119 struct intc_desc_int *d = get_intc_desc(irq); ··· 121 118 122 119 for_each_virq(entry, vlist) { 123 120 unsigned long addr, handle; 121 + struct irq_desc *vdesc = irq_to_desc(entry->irq); 124 122 125 - handle = (unsigned long)irq_get_handler_data(entry->irq); 126 - addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); 127 - 128 - if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) 129 - generic_handle_irq(entry->irq); 123 + if (vdesc) { 124 + handle = (unsigned long)irq_desc_get_handler_data(vdesc); 125 + addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); 126 + if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) 127 + generic_handle_irq_desc(entry->irq, vdesc); 128 + } 130 129 } 131 130 132 131 chip->irq_unmask(data);
+5 -8
drivers/spmi/spmi-pmic-arb.c
··· 453 453 454 454 static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc) 455 455 { 456 - struct spmi_pmic_arb_dev *pa = irq_get_handler_data(irq); 457 - struct irq_chip *chip = irq_get_chip(irq); 456 + struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc); 457 + struct irq_chip *chip = irq_desc_get_chip(desc); 458 458 void __iomem *intr = pa->intr; 459 459 int first = pa->min_apid >> 5; 460 460 int last = pa->max_apid >> 5; ··· 945 945 goto err_put_ctrl; 946 946 } 947 947 948 - irq_set_handler_data(pa->irq, pa); 949 - irq_set_chained_handler(pa->irq, pmic_arb_chained_irq); 948 + irq_set_chained_handler_and_data(pa->irq, pmic_arb_chained_irq, pa); 950 949 951 950 err = spmi_controller_add(ctrl); 952 951 if (err) ··· 954 955 return 0; 955 956 956 957 err_domain_remove: 957 - irq_set_chained_handler(pa->irq, NULL); 958 - irq_set_handler_data(pa->irq, NULL); 958 + irq_set_chained_handler_and_data(pa->irq, NULL, NULL); 959 959 irq_domain_remove(pa->domain); 960 960 err_put_ctrl: 961 961 spmi_controller_put(ctrl); ··· 966 968 struct spmi_controller *ctrl = platform_get_drvdata(pdev); 967 969 struct spmi_pmic_arb_dev *pa = spmi_controller_get_drvdata(ctrl); 968 970 spmi_controller_remove(ctrl); 969 - irq_set_chained_handler(pa->irq, NULL); 970 - irq_set_handler_data(pa->irq, NULL); 971 + irq_set_chained_handler_and_data(pa->irq, NULL, NULL); 971 972 irq_domain_remove(pa->domain); 972 973 spmi_controller_put(ctrl); 973 974 return 0;
+24
include/linux/device.h
··· 714 714 * along with subsystem-level and driver-level callbacks. 715 715 * @pins: For device pin management. 716 716 * See Documentation/pinctrl.txt for details. 717 + * @msi_list: Hosts MSI descriptors 718 + * @msi_domain: The generic MSI domain this device is using. 717 719 * @numa_node: NUMA node this device is close to. 718 720 * @dma_mask: Dma mask (if dma'ble device). 719 721 * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all ··· 776 774 struct dev_pm_info power; 777 775 struct dev_pm_domain *pm_domain; 778 776 777 + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 778 + struct irq_domain *msi_domain; 779 + #endif 779 780 #ifdef CONFIG_PINCTRL 780 781 struct dev_pin_info *pins; 782 + #endif 783 + #ifdef CONFIG_GENERIC_MSI_IRQ 784 + struct list_head msi_list; 781 785 #endif 782 786 783 787 #ifdef CONFIG_NUMA ··· 868 860 { 869 861 } 870 862 #endif 863 + 864 + static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) 865 + { 866 + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 867 + return dev->msi_domain; 868 + #else 869 + return NULL; 870 + #endif 871 + } 872 + 873 + static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) 874 + { 875 + #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 876 + dev->msi_domain = d; 877 + #endif 878 + } 871 879 872 880 static inline void *dev_get_drvdata(const struct device *dev) 873 881 {
+14 -5
include/linux/irq.h
··· 324 324 * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips 325 325 * @irq_cpu_online: configure an interrupt source for a secondary CPU 326 326 * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU 327 - * @irq_suspend: function called from core code on suspend once per chip 328 - * @irq_resume: function called from core code on resume once per chip 327 + * @irq_suspend: function called from core code on suspend once per 328 + * chip, when one or more interrupts are installed 329 + * @irq_resume: function called from core code on resume once per chip, 330 + * when one ore more interrupts are installed 329 331 * @irq_pm_shutdown: function called from core code on shutdown once per chip 330 332 * @irq_calc_mask: Optional function to set irq_data.mask for special cases 331 333 * @irq_print_chip: optional to print special chip info in show_interrupts ··· 490 488 #endif 491 489 492 490 /* Handling of unhandled and spurious interrupts: */ 493 - extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 494 - irqreturn_t action_ret); 491 + extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); 495 492 496 493 497 494 /* Enable/disable irq debugging output: */ ··· 641 640 return d ? d->msi_desc : NULL; 642 641 } 643 642 644 - static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) 643 + static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) 645 644 { 646 645 return d->msi_desc; 647 646 } ··· 763 762 * @reg_base: Register base address (virtual) 764 763 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) 765 764 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) 765 + * @suspend: Function called from core code on suspend once per 766 + * chip; can be useful instead of irq_chip::suspend to 767 + * handle chip details even when no interrupts are in use 768 + * @resume: Function called from core code on resume once per chip; 769 + * can be useful instead of irq_chip::suspend to handle 770 + * chip details even when no interrupts are in use 766 771 * @irq_base: Interrupt base nr for this chip 767 772 * @irq_cnt: Number of interrupts handled by this chip 768 773 * @mask_cache: Cached mask register shared between all chip types ··· 795 788 void __iomem *reg_base; 796 789 u32 (*reg_readl)(void __iomem *addr); 797 790 void (*reg_writel)(u32 val, void __iomem *addr); 791 + void (*suspend)(struct irq_chip_generic *gc); 792 + void (*resume)(struct irq_chip_generic *gc); 798 793 unsigned int irq_base; 799 794 unsigned int irq_cnt; 800 795 u32 mask_cache;
+1
include/linux/irqchip/arm-gic-v3.h
··· 360 360 #ifndef __ASSEMBLY__ 361 361 362 362 #include <linux/stringify.h> 363 + #include <asm/msi.h> 363 364 364 365 /* 365 366 * We need a value to serve as a irq-type for LPIs. Choose one that will
+1 -2
include/linux/irqchip/arm-gic.h
··· 95 95 96 96 struct device_node; 97 97 98 - void gic_set_irqchip_flags(unsigned long flags); 99 98 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, 100 99 u32 offset, struct device_node *); 101 100 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 102 - void gic_cpu_if_down(void); 101 + int gic_cpu_if_down(unsigned int gic_nr); 103 102 104 103 static inline void gic_init(unsigned int nr, int start, 105 104 void __iomem *dist , void __iomem *cpu)
+24 -2
include/linux/irqdomain.h
··· 45 45 /* Number of irqs reserved for a legacy isa controller */ 46 46 #define NUM_ISA_INTERRUPTS 16 47 47 48 + /* 49 + * Should several domains have the same device node, but serve 50 + * different purposes (for example one domain is for PCI/MSI, and the 51 + * other for wired IRQs), they can be distinguished using a 52 + * bus-specific token. Most domains are expected to only carry 53 + * DOMAIN_BUS_ANY. 54 + */ 55 + enum irq_domain_bus_token { 56 + DOMAIN_BUS_ANY = 0, 57 + DOMAIN_BUS_PCI_MSI, 58 + DOMAIN_BUS_PLATFORM_MSI, 59 + DOMAIN_BUS_NEXUS, 60 + }; 61 + 48 62 /** 49 63 * struct irq_domain_ops - Methods for irq_domain objects 50 64 * @match: Match an interrupt controller device node to a host, returns ··· 75 61 * to setup the irq_desc when returning from map(). 76 62 */ 77 63 struct irq_domain_ops { 78 - int (*match)(struct irq_domain *d, struct device_node *node); 64 + int (*match)(struct irq_domain *d, struct device_node *node, 65 + enum irq_domain_bus_token bus_token); 79 66 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); 80 67 void (*unmap)(struct irq_domain *d, unsigned int virq); 81 68 int (*xlate)(struct irq_domain *d, struct device_node *node, ··· 131 116 132 117 /* Optional data */ 133 118 struct device_node *of_node; 119 + enum irq_domain_bus_token bus_token; 134 120 struct irq_domain_chip_generic *gc; 135 121 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 136 122 struct irq_domain *parent; ··· 177 161 irq_hw_number_t first_hwirq, 178 162 const struct irq_domain_ops *ops, 179 163 void *host_data); 180 - extern struct irq_domain *irq_find_host(struct device_node *node); 164 + extern struct irq_domain *irq_find_matching_host(struct device_node *node, 165 + enum irq_domain_bus_token bus_token); 181 166 extern void irq_set_default_host(struct irq_domain *host); 167 + 168 + static inline struct irq_domain *irq_find_host(struct device_node *node) 169 + { 170 + return irq_find_matching_host(node, DOMAIN_BUS_ANY); 171 + } 182 172 183 173 /** 184 174 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
+82 -27
include/linux/msi.h
··· 14 14 /* Helper functions */ 15 15 struct irq_data; 16 16 struct msi_desc; 17 + struct pci_dev; 18 + struct platform_msi_priv_data; 17 19 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 18 20 void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 19 21 20 - struct msi_desc { 21 - struct { 22 - __u8 is_msix : 1; 23 - __u8 multiple: 3; /* log2 num of messages allocated */ 24 - __u8 multi_cap : 3; /* log2 num of messages supported */ 25 - __u8 maskbit : 1; /* mask-pending bit supported ? */ 26 - __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 27 - __u16 entry_nr; /* specific enabled entry */ 28 - unsigned default_irq; /* default pre-assigned irq */ 29 - } msi_attrib; 22 + typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc, 23 + struct msi_msg *msg); 30 24 31 - u32 masked; /* mask bits */ 32 - unsigned int irq; 33 - unsigned int nvec_used; /* number of messages */ 34 - struct list_head list; 25 + /** 26 + * platform_msi_desc - Platform device specific msi descriptor data 27 + * @msi_priv_data: Pointer to platform private data 28 + * @msi_index: The index of the MSI descriptor for multi MSI 29 + */ 30 + struct platform_msi_desc { 31 + struct platform_msi_priv_data *msi_priv_data; 32 + u16 msi_index; 33 + }; 34 + 35 + /** 36 + * struct msi_desc - Descriptor structure for MSI based interrupts 37 + * @list: List head for management 38 + * @irq: The base interrupt number 39 + * @nvec_used: The number of vectors used 40 + * @dev: Pointer to the device which uses this descriptor 41 + * @msg: The last set MSI message cached for reuse 42 + * 43 + * @masked: [PCI MSI/X] Mask bits 44 + * @is_msix: [PCI MSI/X] True if MSI-X 45 + * @multiple: [PCI MSI/X] log2 num of messages allocated 46 + * @multi_cap: [PCI MSI/X] log2 num of messages supported 47 + * @maskbit: [PCI MSI/X] Mask-Pending bit supported? 48 + * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit 49 + * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor 50 + * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq 51 + * @mask_pos: [PCI MSI] Mask register position 52 + * @mask_base: [PCI MSI-X] Mask register base address 53 + * @platform: [platform] Platform device specific msi descriptor data 54 + */ 55 + struct msi_desc { 56 + /* Shared device/bus type independent data */ 57 + struct list_head list; 58 + unsigned int irq; 59 + unsigned int nvec_used; 60 + struct device *dev; 61 + struct msi_msg msg; 35 62 36 63 union { 37 - void __iomem *mask_base; 38 - u8 mask_pos; 39 - }; 40 - struct pci_dev *dev; 64 + /* PCI MSI/X specific data */ 65 + struct { 66 + u32 masked; 67 + struct { 68 + __u8 is_msix : 1; 69 + __u8 multiple : 3; 70 + __u8 multi_cap : 3; 71 + __u8 maskbit : 1; 72 + __u8 is_64 : 1; 73 + __u16 entry_nr; 74 + unsigned default_irq; 75 + } msi_attrib; 76 + union { 77 + u8 mask_pos; 78 + void __iomem *mask_base; 79 + }; 80 + }; 41 81 42 - /* Last set MSI message */ 43 - struct msi_msg msg; 82 + /* 83 + * Non PCI variants add their data structure here. New 84 + * entries need to use a named structure. We want 85 + * proper name spaces for this. The PCI part is 86 + * anonymous for now as it would require an immediate 87 + * tree wide cleanup. 88 + */ 89 + struct platform_msi_desc platform; 90 + }; 44 91 }; 45 92 46 93 /* Helpers to hide struct msi_desc implementation details */ 47 - #define msi_desc_to_dev(desc) (&(desc)->dev.dev) 48 - #define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list) 94 + #define msi_desc_to_dev(desc) ((desc)->dev) 95 + #define dev_to_msi_list(dev) (&(dev)->msi_list) 49 96 #define first_msi_entry(dev) \ 50 97 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) 51 98 #define for_each_msi_entry(desc, dev) \ ··· 103 56 #define for_each_pci_msi_entry(desc, pdev) \ 104 57 for_each_msi_entry((desc), &(pdev)->dev) 105 58 106 - static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 59 + struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); 60 + void *msi_desc_to_pci_sysdata(struct msi_desc *desc); 61 + #else /* CONFIG_PCI_MSI */ 62 + static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) 107 63 { 108 - return desc->dev; 64 + return NULL; 109 65 } 110 66 #endif /* CONFIG_PCI_MSI */ 111 67 68 + struct msi_desc *alloc_msi_entry(struct device *dev); 69 + void free_msi_entry(struct msi_desc *entry); 112 70 void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 113 71 void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 114 72 void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); ··· 160 108 struct device *dev; 161 109 struct device_node *of_node; 162 110 struct list_head list; 163 - #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN 164 - struct irq_domain *domain; 165 - #endif 166 111 167 112 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, 168 113 struct msi_desc *desc); ··· 270 221 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); 271 222 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); 272 223 224 + struct irq_domain *platform_msi_create_irq_domain(struct device_node *np, 225 + struct msi_domain_info *info, 226 + struct irq_domain *parent); 227 + int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec, 228 + irq_write_msi_msg_t write_msi_msg); 229 + void platform_msi_domain_free_irqs(struct device *dev); 273 230 #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ 274 231 275 232 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
+1
include/linux/of_irq.h
··· 74 74 */ 75 75 extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); 76 76 extern struct device_node *of_irq_find_parent(struct device_node *child); 77 + extern void of_msi_configure(struct device *dev, struct device_node *np); 77 78 78 79 #else /* !CONFIG_OF */ 79 80 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
+4 -1
include/linux/pci.h
··· 369 369 struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ 370 370 struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ 371 371 #ifdef CONFIG_PCI_MSI 372 - struct list_head msi_list; 373 372 const struct attribute_group **msi_irq_groups; 374 373 #endif 375 374 struct pci_vpd *vpd; ··· 1891 1892 /* PCI <-> OF binding helpers */ 1892 1893 #ifdef CONFIG_OF 1893 1894 struct device_node; 1895 + struct irq_domain; 1894 1896 void pci_set_of_node(struct pci_dev *dev); 1895 1897 void pci_release_of_node(struct pci_dev *dev); 1896 1898 void pci_set_bus_of_node(struct pci_bus *bus); 1897 1899 void pci_release_bus_of_node(struct pci_bus *bus); 1900 + struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); 1898 1901 1899 1902 /* Arch may override this (weak) */ 1900 1903 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); ··· 1919 1918 static inline void pci_release_bus_of_node(struct pci_bus *bus) { } 1920 1919 static inline struct device_node * 1921 1920 pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } 1921 + static inline struct irq_domain * 1922 + pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } 1922 1923 #endif /* CONFIG_OF */ 1923 1924 1924 1925 #ifdef CONFIG_EEH
+12 -12
kernel/irq/chip.c
··· 63 63 return -EINVAL; 64 64 65 65 type &= IRQ_TYPE_SENSE_MASK; 66 - ret = __irq_set_trigger(desc, irq, type); 66 + ret = __irq_set_trigger(desc, type); 67 67 irq_put_desc_busunlock(desc, flags); 68 68 return ret; 69 69 } ··· 187 187 irq_enable(desc); 188 188 } 189 189 if (resend) 190 - check_irq_resend(desc, desc->irq_data.irq); 190 + check_irq_resend(desc); 191 191 return ret; 192 192 } 193 193 ··· 315 315 raw_spin_lock_irq(&desc->lock); 316 316 317 317 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 318 - kstat_incr_irqs_this_cpu(irq, desc); 318 + kstat_incr_irqs_this_cpu(desc); 319 319 320 320 action = desc->action; 321 321 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { ··· 328 328 329 329 action_ret = action->thread_fn(action->irq, action->dev_id); 330 330 if (!noirqdebug) 331 - note_interrupt(irq, desc, action_ret); 331 + note_interrupt(desc, action_ret); 332 332 333 333 raw_spin_lock_irq(&desc->lock); 334 334 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); ··· 391 391 goto out_unlock; 392 392 393 393 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 394 - kstat_incr_irqs_this_cpu(irq, desc); 394 + kstat_incr_irqs_this_cpu(desc); 395 395 396 396 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 397 397 desc->istate |= IRQS_PENDING; ··· 443 443 goto out_unlock; 444 444 445 445 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 446 - kstat_incr_irqs_this_cpu(irq, desc); 446 + kstat_incr_irqs_this_cpu(desc); 447 447 448 448 /* 449 449 * If its disabled or no action available ··· 515 515 goto out; 516 516 517 517 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 518 - kstat_incr_irqs_this_cpu(irq, desc); 518 + kstat_incr_irqs_this_cpu(desc); 519 519 520 520 /* 521 521 * If its disabled or no action available ··· 583 583 goto out_unlock; 584 584 } 585 585 586 - kstat_incr_irqs_this_cpu(irq, desc); 586 + kstat_incr_irqs_this_cpu(desc); 587 587 588 588 /* Start handling the irq */ 589 589 desc->irq_data.chip->irq_ack(&desc->irq_data); ··· 646 646 goto out_eoi; 647 647 } 648 648 649 - kstat_incr_irqs_this_cpu(irq, desc); 649 + kstat_incr_irqs_this_cpu(desc); 650 650 651 651 do { 652 652 if (unlikely(!desc->action)) ··· 675 675 { 676 676 struct irq_chip *chip = irq_desc_get_chip(desc); 677 677 678 - kstat_incr_irqs_this_cpu(irq, desc); 678 + kstat_incr_irqs_this_cpu(desc); 679 679 680 680 if (chip->irq_ack) 681 681 chip->irq_ack(&desc->irq_data); ··· 705 705 void *dev_id = raw_cpu_ptr(action->percpu_dev_id); 706 706 irqreturn_t res; 707 707 708 - kstat_incr_irqs_this_cpu(irq, desc); 708 + kstat_incr_irqs_this_cpu(desc); 709 709 710 710 if (chip->irq_ack) 711 711 chip->irq_ack(&desc->irq_data); ··· 1020 1020 /** 1021 1021 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1022 1022 * @data: Pointer to interrupt specific data 1023 - * @dest: The vcpu affinity information 1023 + * @vcpu_info: The vcpu affinity information 1024 1024 */ 1025 1025 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1026 1026 {
+6
kernel/irq/generic-chip.c
··· 553 553 if (data) 554 554 ct->chip.irq_suspend(data); 555 555 } 556 + 557 + if (gc->suspend) 558 + gc->suspend(gc); 556 559 } 557 560 return 0; 558 561 } ··· 566 563 567 564 list_for_each_entry(gc, &gc_list, list) { 568 565 struct irq_chip_type *ct = gc->chip_types; 566 + 567 + if (gc->resume) 568 + gc->resume(gc); 569 569 570 570 if (ct->chip.irq_resume) { 571 571 struct irq_data *data = irq_gc_get_irq_data(gc);
+2 -2
kernel/irq/handle.c
··· 30 30 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 31 31 { 32 32 print_irq_desc(irq, desc); 33 - kstat_incr_irqs_this_cpu(irq, desc); 33 + kstat_incr_irqs_this_cpu(desc); 34 34 ack_bad_irq(irq); 35 35 } 36 36 ··· 176 176 add_interrupt_randomness(irq, flags); 177 177 178 178 if (!noirqdebug) 179 - note_interrupt(irq, desc, retval); 179 + note_interrupt(desc, retval); 180 180 return retval; 181 181 } 182 182
+5 -6
kernel/irq/internals.h
··· 59 59 #include "debug.h" 60 60 #include "settings.h" 61 61 62 - extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 63 - unsigned long flags); 64 - extern void __disable_irq(struct irq_desc *desc, unsigned int irq); 65 - extern void __enable_irq(struct irq_desc *desc, unsigned int irq); 62 + extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); 63 + extern void __disable_irq(struct irq_desc *desc); 64 + extern void __enable_irq(struct irq_desc *desc); 66 65 67 66 extern int irq_startup(struct irq_desc *desc, bool resend); 68 67 extern void irq_shutdown(struct irq_desc *desc); ··· 85 86 irqreturn_t handle_irq_event(struct irq_desc *desc); 86 87 87 88 /* Resending of interrupts :*/ 88 - void check_irq_resend(struct irq_desc *desc, unsigned int irq); 89 + void check_irq_resend(struct irq_desc *desc); 89 90 bool irq_wait_for_poll(struct irq_desc *desc); 90 91 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); 91 92 ··· 186 187 return __irqd_to_state(d) & mask; 187 188 } 188 189 189 - static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) 190 + static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) 190 191 { 191 192 __this_cpu_inc(*desc->kstat_irqs); 192 193 __this_cpu_inc(kstat.irqs_sum);
+1 -1
kernel/irq/irqdesc.c
··· 582 582 583 583 void kstat_incr_irq_this_cpu(unsigned int irq) 584 584 { 585 - kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 585 + kstat_incr_irqs_this_cpu(irq_to_desc(irq)); 586 586 } 587 587 588 588 /**
+13 -5
kernel/irq/irqdomain.c
··· 187 187 EXPORT_SYMBOL_GPL(irq_domain_add_legacy); 188 188 189 189 /** 190 - * irq_find_host() - Locates a domain for a given device node 190 + * irq_find_matching_host() - Locates a domain for a given device node 191 191 * @node: device-tree node of the interrupt controller 192 + * @bus_token: domain-specific data 192 193 */ 193 - struct irq_domain *irq_find_host(struct device_node *node) 194 + struct irq_domain *irq_find_matching_host(struct device_node *node, 195 + enum irq_domain_bus_token bus_token) 194 196 { 195 197 struct irq_domain *h, *found = NULL; 196 198 int rc; ··· 201 199 * it might potentially be set to match all interrupts in 202 200 * the absence of a device node. This isn't a problem so far 203 201 * yet though... 202 + * 203 + * bus_token == DOMAIN_BUS_ANY matches any domain, any other 204 + * values must generate an exact match for the domain to be 205 + * selected. 204 206 */ 205 207 mutex_lock(&irq_domain_mutex); 206 208 list_for_each_entry(h, &irq_domain_list, link) { 207 209 if (h->ops->match) 208 - rc = h->ops->match(h, node); 210 + rc = h->ops->match(h, node, bus_token); 209 211 else 210 - rc = (h->of_node != NULL) && (h->of_node == node); 212 + rc = ((h->of_node != NULL) && (h->of_node == node) && 213 + ((bus_token == DOMAIN_BUS_ANY) || 214 + (h->bus_token == bus_token))); 211 215 212 216 if (rc) { 213 217 found = h; ··· 223 215 mutex_unlock(&irq_domain_mutex); 224 216 return found; 225 217 } 226 - EXPORT_SYMBOL_GPL(irq_find_host); 218 + EXPORT_SYMBOL_GPL(irq_find_matching_host); 227 219 228 220 /** 229 221 * irq_set_default_host() - Set a "default" irq domain
+34 -30
kernel/irq/manage.c
··· 115 115 #ifdef CONFIG_SMP 116 116 cpumask_var_t irq_default_affinity; 117 117 118 + static int __irq_can_set_affinity(struct irq_desc *desc) 119 + { 120 + if (!desc || !irqd_can_balance(&desc->irq_data) || 121 + !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 122 + return 0; 123 + return 1; 124 + } 125 + 118 126 /** 119 127 * irq_can_set_affinity - Check if the affinity of a given irq can be set 120 128 * @irq: Interrupt to check ··· 130 122 */ 131 123 int irq_can_set_affinity(unsigned int irq) 132 124 { 133 - struct irq_desc *desc = irq_to_desc(irq); 134 - 135 - if (!desc || !irqd_can_balance(&desc->irq_data) || 136 - !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 137 - return 0; 138 - 139 - return 1; 125 + return __irq_can_set_affinity(irq_to_desc(irq)); 140 126 } 141 127 142 128 /** ··· 361 359 /* 362 360 * Generic version of the affinity autoselector. 363 361 */ 364 - static int 365 - setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 362 + static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) 366 363 { 367 364 struct cpumask *set = irq_default_affinity; 368 365 int node = irq_desc_get_node(desc); 369 366 370 367 /* Excludes PER_CPU and NO_BALANCE interrupts */ 371 - if (!irq_can_set_affinity(irq)) 368 + if (!__irq_can_set_affinity(desc)) 372 369 return 0; 373 370 374 371 /* ··· 394 393 return 0; 395 394 } 396 395 #else 397 - static inline int 398 - setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) 396 + /* Wrapper for ALPHA specific affinity selector magic */ 397 + static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask) 399 398 { 400 - return irq_select_affinity(irq); 399 + return irq_select_affinity(irq_desc_get_irq(d)); 401 400 } 402 401 #endif 403 402 ··· 411 410 int ret; 412 411 413 412 raw_spin_lock_irqsave(&desc->lock, flags); 414 - ret = setup_affinity(irq, desc, mask); 413 + ret = setup_affinity(desc, mask); 415 414 raw_spin_unlock_irqrestore(&desc->lock, flags); 416 415 return ret; 417 416 } 418 417 419 418 #else 420 419 static inline int 421 - setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 420 + setup_affinity(struct irq_desc *desc, struct cpumask *mask) 422 421 { 423 422 return 0; 424 423 } 425 424 #endif 426 425 427 - void __disable_irq(struct irq_desc *desc, unsigned int irq) 426 + void __disable_irq(struct irq_desc *desc) 428 427 { 429 428 if (!desc->depth++) 430 429 irq_disable(desc); ··· 437 436 438 437 if (!desc) 439 438 return -EINVAL; 440 - __disable_irq(desc, irq); 439 + __disable_irq(desc); 441 440 irq_put_desc_busunlock(desc, flags); 442 441 return 0; 443 442 } ··· 504 503 } 505 504 EXPORT_SYMBOL_GPL(disable_hardirq); 506 505 507 - void __enable_irq(struct irq_desc *desc, unsigned int irq) 506 + void __enable_irq(struct irq_desc *desc) 508 507 { 509 508 switch (desc->depth) { 510 509 case 0: 511 510 err_out: 512 - WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 511 + WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", 512 + irq_desc_get_irq(desc)); 513 513 break; 514 514 case 1: { 515 515 if (desc->istate & IRQS_SUSPENDED) ··· 518 516 /* Prevent probing on this irq: */ 519 517 irq_settings_set_noprobe(desc); 520 518 irq_enable(desc); 521 - check_irq_resend(desc, irq); 519 + check_irq_resend(desc); 522 520 /* fall-through */ 523 521 } 524 522 default: ··· 548 546 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 549 547 goto out; 550 548 551 - __enable_irq(desc, irq); 549 + __enable_irq(desc); 552 550 out: 553 551 irq_put_desc_busunlock(desc, flags); 554 552 } ··· 639 637 return canrequest; 640 638 } 641 639 642 - int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 643 - unsigned long flags) 640 + int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) 644 641 { 645 642 struct irq_chip *chip = desc->irq_data.chip; 646 643 int ret, unmask = 0; ··· 649 648 * IRQF_TRIGGER_* but the PIC does not support multiple 650 649 * flow-types? 651 650 */ 652 - pr_debug("No set_type function for IRQ %d (%s)\n", irq, 651 + pr_debug("No set_type function for IRQ %d (%s)\n", 652 + irq_desc_get_irq(desc), 653 653 chip ? (chip->name ? : "unknown") : "unknown"); 654 654 return 0; 655 655 } ··· 687 685 break; 688 686 default: 689 687 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 690 - flags, irq, chip->irq_set_type); 688 + flags, irq_desc_get_irq(desc), chip->irq_set_type); 691 689 } 692 690 if (unmask) 693 691 unmask_irq(desc); ··· 1223 1221 1224 1222 /* Setup the type (level, edge polarity) if configured: */ 1225 1223 if (new->flags & IRQF_TRIGGER_MASK) { 1226 - ret = __irq_set_trigger(desc, irq, 1227 - new->flags & IRQF_TRIGGER_MASK); 1224 + ret = __irq_set_trigger(desc, 1225 + new->flags & IRQF_TRIGGER_MASK); 1228 1226 1229 1227 if (ret) 1230 1228 goto out_mask; ··· 1255 1253 } 1256 1254 1257 1255 /* Set default affinity mask once everything is setup */ 1258 - setup_affinity(irq, desc, mask); 1256 + setup_affinity(desc, mask); 1259 1257 1260 1258 } else if (new->flags & IRQF_TRIGGER_MASK) { 1261 1259 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; ··· 1282 1280 */ 1283 1281 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1284 1282 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1285 - __enable_irq(desc, irq); 1283 + __enable_irq(desc); 1286 1284 } 1287 1285 1288 1286 raw_spin_unlock_irqrestore(&desc->lock, flags); ··· 1652 1650 if (type != IRQ_TYPE_NONE) { 1653 1651 int ret; 1654 1652 1655 - ret = __irq_set_trigger(desc, irq, type); 1653 + ret = __irq_set_trigger(desc, type); 1656 1654 1657 1655 if (ret) { 1658 1656 WARN(1, "failed to set type for IRQ%d\n", irq); ··· 1877 1875 irq_put_desc_busunlock(desc, flags); 1878 1876 return err; 1879 1877 } 1878 + EXPORT_SYMBOL_GPL(irq_get_irqchip_state); 1880 1879 1881 1880 /** 1882 1881 * irq_set_irqchip_state - set the state of a forwarded interrupt. ··· 1923 1920 irq_put_desc_busunlock(desc, flags); 1924 1921 return err; 1925 1922 } 1923 + EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
+17
kernel/irq/msi.c
··· 18 18 /* Temparory solution for building, will be removed later */ 19 19 #include <linux/pci.h> 20 20 21 + struct msi_desc *alloc_msi_entry(struct device *dev) 22 + { 23 + struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); 24 + if (!desc) 25 + return NULL; 26 + 27 + INIT_LIST_HEAD(&desc->list); 28 + desc->dev = dev; 29 + 30 + return desc; 31 + } 32 + 33 + void free_msi_entry(struct msi_desc *entry) 34 + { 35 + kfree(entry); 36 + } 37 + 21 38 void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) 22 39 { 23 40 *msg = entry->msg;
+6 -6
kernel/irq/pm.c
··· 68 68 desc->cond_suspend_depth--; 69 69 } 70 70 71 - static bool suspend_device_irq(struct irq_desc *desc, int irq) 71 + static bool suspend_device_irq(struct irq_desc *desc) 72 72 { 73 73 if (!desc->action || desc->no_suspend_depth) 74 74 return false; ··· 85 85 } 86 86 87 87 desc->istate |= IRQS_SUSPENDED; 88 - __disable_irq(desc, irq); 88 + __disable_irq(desc); 89 89 90 90 /* 91 91 * Hardware which has no wakeup source configuration facility ··· 126 126 if (irq_settings_is_nested_thread(desc)) 127 127 continue; 128 128 raw_spin_lock_irqsave(&desc->lock, flags); 129 - sync = suspend_device_irq(desc, irq); 129 + sync = suspend_device_irq(desc); 130 130 raw_spin_unlock_irqrestore(&desc->lock, flags); 131 131 132 132 if (sync) ··· 135 135 } 136 136 EXPORT_SYMBOL_GPL(suspend_device_irqs); 137 137 138 - static void resume_irq(struct irq_desc *desc, int irq) 138 + static void resume_irq(struct irq_desc *desc) 139 139 { 140 140 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); 141 141 ··· 150 150 desc->depth++; 151 151 resume: 152 152 desc->istate &= ~IRQS_SUSPENDED; 153 - __enable_irq(desc, irq); 153 + __enable_irq(desc); 154 154 } 155 155 156 156 static void resume_irqs(bool want_early) ··· 169 169 continue; 170 170 171 171 raw_spin_lock_irqsave(&desc->lock, flags); 172 - resume_irq(desc, irq); 172 + resume_irq(desc); 173 173 raw_spin_unlock_irqrestore(&desc->lock, flags); 174 174 } 175 175 }
+3 -1
kernel/irq/resend.c
··· 53 53 * 54 54 * Is called with interrupts disabled and desc->lock held. 55 55 */ 56 - void check_irq_resend(struct irq_desc *desc, unsigned int irq) 56 + void check_irq_resend(struct irq_desc *desc) 57 57 { 58 58 /* 59 59 * We do not resend level type interrupts. Level type ··· 74 74 if (!desc->irq_data.chip->irq_retrigger || 75 75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { 76 76 #ifdef CONFIG_HARDIRQS_SW_RESEND 77 + unsigned int irq = irq_desc_get_irq(desc); 78 + 77 79 /* 78 80 * If the interrupt is running in the thread 79 81 * context of the parent irq we need to be
+13 -13
kernel/irq/spurious.c
··· 60 60 /* 61 61 * Recovery handler for misrouted interrupts. 62 62 */ 63 - static int try_one_irq(int irq, struct irq_desc *desc, bool force) 63 + static int try_one_irq(struct irq_desc *desc, bool force) 64 64 { 65 65 irqreturn_t ret = IRQ_NONE; 66 66 struct irqaction *action; ··· 133 133 if (i == irq) /* Already tried */ 134 134 continue; 135 135 136 - if (try_one_irq(i, desc, false)) 136 + if (try_one_irq(desc, false)) 137 137 ok = 1; 138 138 } 139 139 out: ··· 164 164 continue; 165 165 166 166 local_irq_disable(); 167 - try_one_irq(i, desc, true); 167 + try_one_irq(desc, true); 168 168 local_irq_enable(); 169 169 } 170 170 out: ··· 188 188 * (The other 100-of-100,000 interrupts may have been a correctly 189 189 * functioning device sharing an IRQ with the failing one) 190 190 */ 191 - static void 192 - __report_bad_irq(unsigned int irq, struct irq_desc *desc, 193 - irqreturn_t action_ret) 191 + static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) 194 192 { 193 + unsigned int irq = irq_desc_get_irq(desc); 195 194 struct irqaction *action; 196 195 unsigned long flags; 197 196 ··· 223 224 raw_spin_unlock_irqrestore(&desc->lock, flags); 224 225 } 225 226 226 - static void 227 - report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) 227 + static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) 228 228 { 229 229 static int count = 100; 230 230 231 231 if (count > 0) { 232 232 count--; 233 - __report_bad_irq(irq, desc, action_ret); 233 + __report_bad_irq(desc, action_ret); 234 234 } 235 235 } 236 236 ··· 270 272 271 273 #define SPURIOUS_DEFERRED 0x80000000 272 274 273 - void note_interrupt(unsigned int irq, struct irq_desc *desc, 274 - irqreturn_t action_ret) 275 + void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) 275 276 { 277 + unsigned int irq; 278 + 276 279 if (desc->istate & IRQS_POLL_INPROGRESS || 277 280 irq_settings_is_polled(desc)) 278 281 return; 279 282 280 283 if (bad_action_ret(action_ret)) { 281 - report_bad_irq(irq, desc, action_ret); 284 + report_bad_irq(desc, action_ret); 282 285 return; 283 286 } 284 287 ··· 397 398 desc->last_unhandled = jiffies; 398 399 } 399 400 401 + irq = irq_desc_get_irq(desc); 400 402 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 401 403 int ok = misrouted_irq(irq); 402 404 if (action_ret == IRQ_NONE) ··· 413 413 /* 414 414 * The interrupt is stuck 415 415 */ 416 - __report_bad_irq(irq, desc, action_ret); 416 + __report_bad_irq(desc, action_ret); 417 417 /* 418 418 * Now kill the IRQ 419 419 */