Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
"The interrupt subsystem delivers this time:

- Refactoring of the GIC-V3 driver to prepare for the GIC-V4 support

- Initial GIC-V4 support

- Consolidation of the FSL MSI support

- Utilize the effective affinity interface in various ARM irqchip
drivers

- Yet another interrupt chip driver (UniPhier AIDET)

- Bulk conversion of the irq chip driver to use %pOF

- The usual small fixes and improvements all over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (77 commits)
irqchip/ls-scfg-msi: Add MSI affinity support
irqchip/ls-scfg-msi: Add LS1043a v1.1 MSI support
irqchip/ls-scfg-msi: Add LS1046a MSI support
arm64: dts: ls1046a: Add MSI dts node
arm64: dts: ls1043a: Share all MSIs
arm: dts: ls1021a: Share all MSIs
arm64: dts: ls1043a: Fix typo of MSI compatible string
arm: dts: ls1021a: Fix typo of MSI compatible string
irqchip/ls-scfg-msi: Fix typo of MSI compatible strings
irqchip/irq-bcm7120-l2: Use correct I/O accessors for irq_fwd_mask
irqchip/mmp: Make mmp_intc_conf const
irqchip/gic: Make irq_chip const
irqchip/gic-v3: Advertise GICv4 support to KVM
irqchip/gic-v4: Enable low-level GICv4 operations
irqchip/gic-v4: Add some basic documentation
irqchip/gic-v4: Add VLPI configuration interface
irqchip/gic-v4: Add VPE command interface
irqchip/gic-v4: Add per-VM VPE domain creation
irqchip/gic-v3-its: Set implementation defined bit to enable VLPIs
irqchip/gic-v3-its: Allow doorbell interrupts to be injected/cleared
...

+3182 -293
+5 -3
Documentation/devicetree/bindings/interrupt-controller/fsl,ls-scfg-msi.txt
··· 4 4 5 5 - compatible: should be "fsl,<soc-name>-msi" to identify 6 6 Layerscape PCIe MSI controller block such as: 7 - "fsl,1s1021a-msi" 8 - "fsl,1s1043a-msi" 7 + "fsl,ls1021a-msi" 8 + "fsl,ls1043a-msi" 9 + "fsl,ls1046a-msi" 10 + "fsl,ls1043a-v1.1-msi" 9 11 - msi-controller: indicates that this is a PCIe MSI controller node 10 12 - reg: physical base address of the controller and length of memory mapped. 11 13 - interrupts: an interrupt to the parent interrupt controller. ··· 25 23 Examples: 26 24 27 25 msi1: msi-controller@1571000 { 28 - compatible = "fsl,1s1043a-msi"; 26 + compatible = "fsl,ls1043a-msi"; 29 27 reg = <0x0 0x1571000 0x0 0x8>, 30 28 msi-controller; 31 29 interrupts = <0 116 0x4>;
+32
Documentation/devicetree/bindings/interrupt-controller/socionext,uniphier-aidet.txt
··· 1 + UniPhier AIDET 2 + 3 + UniPhier AIDET (ARM Interrupt Detector) is an add-on block for ARM GIC (Generic 4 + Interrupt Controller). GIC itself can handle only high level and rising edge 5 + interrupts. The AIDET provides logic inverter to support low level and falling 6 + edge interrupts. 7 + 8 + Required properties: 9 + - compatible: Should be one of the following: 10 + "socionext,uniphier-ld4-aidet" - for LD4 SoC 11 + "socionext,uniphier-pro4-aidet" - for Pro4 SoC 12 + "socionext,uniphier-sld8-aidet" - for sLD8 SoC 13 + "socionext,uniphier-pro5-aidet" - for Pro5 SoC 14 + "socionext,uniphier-pxs2-aidet" - for PXs2/LD6b SoC 15 + "socionext,uniphier-ld11-aidet" - for LD11 SoC 16 + "socionext,uniphier-ld20-aidet" - for LD20 SoC 17 + "socionext,uniphier-pxs3-aidet" - for PXs3 SoC 18 + - reg: Specifies offset and length of the register set for the device. 19 + - interrupt-controller: Identifies the node as an interrupt controller 20 + - #interrupt-cells : Specifies the number of cells needed to encode an interrupt 21 + source. The value should be 2. The first cell defines the interrupt number 22 + (corresponds to the SPI interrupt number of GIC). The second cell specifies 23 + the trigger type as defined in interrupts.txt in this directory. 24 + 25 + Example: 26 + 27 + aidet: aidet@5fc20000 { 28 + compatible = "socionext,uniphier-pro4-aidet"; 29 + reg = <0x5fc20000 0x200>; 30 + interrupt-controller; 31 + #interrupt-cells = <2>; 32 + };
+1
Documentation/driver-model/devres.txt
··· 312 312 devm_irq_alloc_descs_from() 313 313 devm_irq_alloc_generic_chip() 314 314 devm_irq_setup_generic_chip() 315 + devm_irq_sim_init() 315 316 316 317 LED 317 318 devm_led_classdev_register()
+1
MAINTAINERS
··· 1993 1993 F: drivers/bus/uniphier-system-bus.c 1994 1994 F: drivers/clk/uniphier/ 1995 1995 F: drivers/i2c/busses/i2c-uniphier* 1996 + F: drivers/irqchip/irq-uniphier-aidet.c 1996 1997 F: drivers/pinctrl/uniphier/ 1997 1998 F: drivers/reset/reset-uniphier.c 1998 1999 F: drivers/tty/serial/8250/8250_uniphier.c
+4 -4
arch/arm/boot/dts/ls1021a.dtsi
··· 129 129 }; 130 130 131 131 msi1: msi-controller@1570e00 { 132 - compatible = "fsl,1s1021a-msi"; 132 + compatible = "fsl,ls1021a-msi"; 133 133 reg = <0x0 0x1570e00 0x0 0x8>; 134 134 msi-controller; 135 135 interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>; 136 136 }; 137 137 138 138 msi2: msi-controller@1570e08 { 139 - compatible = "fsl,1s1021a-msi"; 139 + compatible = "fsl,ls1021a-msi"; 140 140 reg = <0x0 0x1570e08 0x0 0x8>; 141 141 msi-controller; 142 142 interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; ··· 699 699 bus-range = <0x0 0xff>; 700 700 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 701 701 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 702 - msi-parent = <&msi1>; 702 + msi-parent = <&msi1>, <&msi2>; 703 703 #interrupt-cells = <1>; 704 704 interrupt-map-mask = <0 0 0 7>; 705 705 interrupt-map = <0000 0 0 1 &gic GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, ··· 722 722 bus-range = <0x0 0xff>; 723 723 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 724 724 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 725 - msi-parent = <&msi2>; 725 + msi-parent = <&msi1>, <&msi2>; 726 726 #interrupt-cells = <1>; 727 727 interrupt-map-mask = <0 0 0 7>; 728 728 interrupt-map = <0000 0 0 1 &gic GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
+34
arch/arm/include/asm/arch_gicv3.h
··· 276 276 #define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c) 277 277 278 278 /* 279 + * GICR_xLPIR - only the lower bits are significant 280 + */ 281 + #define gic_read_lpir(c) readl_relaxed(c) 282 + #define gic_write_lpir(v, c) writel_relaxed(lower_32_bits(v), c) 283 + 284 + /* 279 285 * GITS_TYPER is an ID register and doesn't need atomicity. 280 286 */ 281 287 #define gits_read_typer(c) __gic_readq_nonatomic(c) ··· 296 290 * GITS_CWRITER - hi and lo bits may be accessed independently. 297 291 */ 298 292 #define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) 293 + 294 + /* 295 + * GITS_VPROPBASER - hi and lo bits may be accessed independently. 296 + */ 297 + #define gits_write_vpropbaser(v, c) __gic_writeq_nonatomic(v, c) 298 + 299 + /* 300 + * GITS_VPENDBASER - the Valid bit must be cleared before changing 301 + * anything else. 302 + */ 303 + static inline void gits_write_vpendbaser(u64 val, void * __iomem addr) 304 + { 305 + u32 tmp; 306 + 307 + tmp = readl_relaxed(addr + 4); 308 + if (tmp & (GICR_VPENDBASER_Valid >> 32)) { 309 + tmp &= ~(GICR_VPENDBASER_Valid >> 32); 310 + writel_relaxed(tmp, addr + 4); 311 + } 312 + 313 + /* 314 + * Use the fact that __gic_writeq_nonatomic writes the second 315 + * half of the 64bit quantity after the first. 316 + */ 317 + __gic_writeq_nonatomic(val, addr); 318 + } 319 + 320 + #define gits_read_vpendbaser(c) __gic_readq_nonatomic(c) 299 321 300 322 #endif /* !__ASSEMBLY__ */ 301 323 #endif /* !__ASM_ARCH_GICV3_H */
+1
arch/arm/mach-hisi/Kconfig
··· 39 39 select HAVE_ARM_ARCH_TIMER 40 40 select MCPM if SMP 41 41 select MCPM_QUAD_CLUSTER if SMP 42 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 42 43 help 43 44 Support for Hisilicon HiP04 SoC family 44 45
+6 -6
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
··· 653 653 }; 654 654 655 655 msi1: msi-controller1@1571000 { 656 - compatible = "fsl,1s1043a-msi"; 656 + compatible = "fsl,ls1043a-msi"; 657 657 reg = <0x0 0x1571000 0x0 0x8>; 658 658 msi-controller; 659 659 interrupts = <0 116 0x4>; 660 660 }; 661 661 662 662 msi2: msi-controller2@1572000 { 663 - compatible = "fsl,1s1043a-msi"; 663 + compatible = "fsl,ls1043a-msi"; 664 664 reg = <0x0 0x1572000 0x0 0x8>; 665 665 msi-controller; 666 666 interrupts = <0 126 0x4>; 667 667 }; 668 668 669 669 msi3: msi-controller3@1573000 { 670 - compatible = "fsl,1s1043a-msi"; 670 + compatible = "fsl,ls1043a-msi"; 671 671 reg = <0x0 0x1573000 0x0 0x8>; 672 672 msi-controller; 673 673 interrupts = <0 160 0x4>; ··· 689 689 bus-range = <0x0 0xff>; 690 690 ranges = <0x81000000 0x0 0x00000000 0x40 0x00010000 0x0 0x00010000 /* downstream I/O */ 691 691 0x82000000 0x0 0x40000000 0x40 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 692 - msi-parent = <&msi1>; 692 + msi-parent = <&msi1>, <&msi2>, <&msi3>; 693 693 #interrupt-cells = <1>; 694 694 interrupt-map-mask = <0 0 0 7>; 695 695 interrupt-map = <0000 0 0 1 &gic 0 110 0x4>, ··· 714 714 bus-range = <0x0 0xff>; 715 715 ranges = <0x81000000 0x0 0x00000000 0x48 0x00010000 0x0 0x00010000 /* downstream I/O */ 716 716 0x82000000 0x0 0x40000000 0x48 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 717 - msi-parent = <&msi2>; 717 + msi-parent = <&msi1>, <&msi2>, <&msi3>; 718 718 #interrupt-cells = <1>; 719 719 interrupt-map-mask = <0 0 0 7>; 720 720 interrupt-map = <0000 0 0 1 &gic 0 120 0x4>, ··· 739 739 bus-range = <0x0 0xff>; 740 740 ranges = <0x81000000 0x0 0x00000000 0x50 0x00010000 0x0 0x00010000 /* downstream I/O */ 741 741 0x82000000 0x0 0x40000000 0x50 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ 742 - msi-parent = <&msi3>; 742 + msi-parent = <&msi1>, <&msi2>, <&msi3>; 743 743 #interrupt-cells = <1>; 744 744 interrupt-map-mask = <0 0 0 7>; 745 745 interrupt-map = <0000 0 0 1 &gic 0 154 0x4>,
+31
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
··· 630 630 interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; 631 631 clocks = <&clockgen 4 1>; 632 632 }; 633 + 634 + msi1: msi-controller@1580000 { 635 + compatible = "fsl,ls1046a-msi"; 636 + msi-controller; 637 + reg = <0x0 0x1580000 0x0 0x10000>; 638 + interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>, 639 + <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>, 640 + <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>, 641 + <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; 642 + }; 643 + 644 + msi2: msi-controller@1590000 { 645 + compatible = "fsl,ls1046a-msi"; 646 + msi-controller; 647 + reg = <0x0 0x1590000 0x0 0x10000>; 648 + interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>, 649 + <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, 650 + <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 651 + <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>; 652 + }; 653 + 654 + msi3: msi-controller@15a0000 { 655 + compatible = "fsl,ls1046a-msi"; 656 + msi-controller; 657 + reg = <0x0 0x15a0000 0x0 0x10000>; 658 + interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, 659 + <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>, 660 + <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>, 661 + <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>; 662 + }; 663 + 633 664 }; 634 665 635 666 reserved-memory {
+7
arch/arm64/include/asm/arch_gicv3.h
··· 116 116 117 117 #define gic_read_typer(c) readq_relaxed(c) 118 118 #define gic_write_irouter(v, c) writeq_relaxed(v, c) 119 + #define gic_read_lpir(c) readq_relaxed(c) 120 + #define gic_write_lpir(v, c) writeq_relaxed(v, c) 119 121 120 122 #define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l)) 121 123 ··· 134 132 135 133 #define gicr_write_pendbaser(v, c) writeq_relaxed(v, c) 136 134 #define gicr_read_pendbaser(c) readq_relaxed(c) 135 + 136 + #define gits_write_vpropbaser(v, c) writeq_relaxed(v, c) 137 + 138 + #define gits_write_vpendbaser(v, c) writeq_relaxed(v, c) 139 + #define gits_read_vpendbaser(c) readq_relaxed(c) 137 140 138 141 #endif /* __ASSEMBLY__ */ 139 142 #endif /* __ASM_ARCH_GICV3_H */
+1
arch/metag/Kconfig
··· 26 26 select HAVE_SYSCALL_TRACEPOINTS 27 27 select HAVE_UNDERSCORE_SYMBOL_PREFIX 28 28 select IRQ_DOMAIN 29 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 29 30 select MODULES_USE_ELF_RELA 30 31 select OF 31 32 select OF_EARLY_FLATTREE
+15
drivers/irqchip/Kconfig
··· 7 7 select IRQ_DOMAIN 8 8 select IRQ_DOMAIN_HIERARCHY 9 9 select MULTI_IRQ_HANDLER 10 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 10 11 11 12 config ARM_GIC_PM 12 13 bool ··· 35 34 select MULTI_IRQ_HANDLER 36 35 select IRQ_DOMAIN_HIERARCHY 37 36 select PARTITION_PERCPU 37 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 38 38 39 39 config ARM_GIC_V3_ITS 40 40 bool ··· 66 64 bool 67 65 select GENERIC_IRQ_CHIP 68 66 select PCI_MSI if PCI 67 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 69 68 70 69 config ALPINE_MSI 71 70 bool ··· 96 93 bool 97 94 select GENERIC_IRQ_CHIP 98 95 select IRQ_DOMAIN 96 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 99 97 100 98 config BCM7038_L1_IRQ 101 99 bool 102 100 select GENERIC_IRQ_CHIP 103 101 select IRQ_DOMAIN 102 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 104 103 105 104 config BCM7120_L2_IRQ 106 105 bool ··· 141 136 select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING 142 137 select IRQ_DOMAIN 143 138 select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI 139 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 144 140 145 141 config CLPS711X_IRQCHIP 146 142 bool ··· 223 217 config XTENSA_MX 224 218 bool 225 219 select IRQ_DOMAIN 220 + select GENERIC_IRQ_EFFECTIVE_AFF_MASK 226 221 227 222 config XILINX_INTC 228 223 bool ··· 313 306 help 314 307 Say yes here to add support for the IRQ combiner devices embedded 315 308 in Qualcomm Technologies chips. 309 + 310 + config IRQ_UNIPHIER_AIDET 311 + bool "UniPhier AIDET support" if COMPILE_TEST 312 + depends on ARCH_UNIPHIER || COMPILE_TEST 313 + default ARCH_UNIPHIER 314 + select IRQ_DOMAIN_HIERARCHY 315 + help 316 + Support for the UniPhier AIDET (ARM Interrupt Detector).
+2 -1
drivers/irqchip/Makefile
··· 28 28 obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o 29 29 obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o 30 30 obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o 31 - obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o 31 + obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o 32 32 obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o 33 33 obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o 34 34 obj-$(CONFIG_ARM_NVIC) += irq-nvic.o ··· 78 78 obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o 79 79 obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o 80 80 obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o 81 + obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
+4 -1
drivers/irqchip/irq-armada-370-xp.c
··· 203 203 204 204 static struct msi_domain_info armada_370_xp_msi_domain_info = { 205 205 .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 206 - MSI_FLAG_MULTI_PCI_MSI), 206 + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), 207 207 .chip = &armada_370_xp_msi_irq_chip, 208 208 }; 209 209 ··· 330 330 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 331 331 raw_spin_unlock(&irq_controller_lock); 332 332 333 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 334 + 333 335 return IRQ_SET_MASK_OK; 334 336 } 335 337 #endif ··· 365 363 } else { 366 364 irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, 367 365 handle_level_irq); 366 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 368 367 } 369 368 irq_set_probe(virq); 370 369
+4 -5
drivers/irqchip/irq-bcm2835.c
··· 147 147 148 148 base = of_iomap(node, 0); 149 149 if (!base) 150 - panic("%s: unable to map IC registers\n", 151 - node->full_name); 150 + panic("%pOF: unable to map IC registers\n", node); 152 151 153 152 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), 154 153 &armctrl_ops, NULL); 155 154 if (!intc.domain) 156 - panic("%s: unable to create IRQ domain\n", node->full_name); 155 + panic("%pOF: unable to create IRQ domain\n", node); 157 156 158 157 for (b = 0; b < NR_BANKS; b++) { 159 158 intc.pending[b] = base + reg_pending[b]; ··· 172 173 int parent_irq = irq_of_parse_and_map(node, 0); 173 174 174 175 if (!parent_irq) { 175 - panic("%s: unable to get parent interrupt.\n", 176 - node->full_name); 176 + panic("%pOF: unable to get parent interrupt.\n", 177 + node); 177 178 } 178 179 irq_set_chained_handler(parent_irq, bcm2836_chained_handle_irq); 179 180 } else {
+2 -3
drivers/irqchip/irq-bcm2836.c
··· 282 282 { 283 283 intc.base = of_iomap(node, 0); 284 284 if (!intc.base) { 285 - panic("%s: unable to map local interrupt registers\n", 286 - node->full_name); 285 + panic("%pOF: unable to map local interrupt registers\n", node); 287 286 } 288 287 289 288 bcm2835_init_local_timer_frequency(); ··· 291 292 &bcm2836_arm_irqchip_intc_ops, 292 293 NULL); 293 294 if (!intc.domain) 294 - panic("%s: unable to create IRQ domain\n", node->full_name); 295 + panic("%pOF: unable to create IRQ domain\n", node); 295 296 296 297 bcm2836_arm_irqchip_register_irq(LOCAL_IRQ_CNTPSIRQ, 297 298 &bcm2836_arm_irqchip_timer);
+3
drivers/irqchip/irq-bcm6345-l1.c
··· 231 231 } 232 232 raw_spin_unlock_irqrestore(&intc->lock, flags); 233 233 234 + irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); 235 + 234 236 return IRQ_SET_MASK_OK_NOCOPY; 235 237 } 236 238 ··· 293 291 irq_set_chip_and_handler(virq, 294 292 &bcm6345_l1_irq_chip, handle_percpu_irq); 295 293 irq_set_chip_data(virq, d->host_data); 294 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 296 295 return 0; 297 296 } 298 297
+3
drivers/irqchip/irq-bcm7038-l1.c
··· 212 212 __bcm7038_l1_unmask(d, first_cpu); 213 213 214 214 raw_spin_unlock_irqrestore(&intc->lock, flags); 215 + irq_data_update_effective_affinity(d, cpumask_of(first_cpu)); 216 + 215 217 return 0; 216 218 } 217 219 ··· 301 299 { 302 300 irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); 303 301 irq_set_chip_data(virq, d->host_data); 302 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 304 303 return 0; 305 304 } 306 305
+4 -6
drivers/irqchip/irq-bcm7120-l2.c
··· 250 250 if (ret < 0) 251 251 goto out_free_l1_data; 252 252 253 - for (idx = 0; idx < data->n_words; idx++) { 254 - __raw_writel(data->irq_fwd_mask[idx], 255 - data->pair_base[idx] + 256 - data->en_offset[idx]); 257 - } 258 - 259 253 for (irq = 0; irq < data->num_parent_irqs; irq++) { 260 254 ret = bcm7120_l2_intc_init_one(dn, data, irq, valid_mask); 261 255 if (ret) ··· 290 296 291 297 gc->reg_base = data->pair_base[idx]; 292 298 ct->regs.mask = data->en_offset[idx]; 299 + 300 + /* gc->reg_base is defined and so is gc->writel */ 301 + irq_reg_writel(gc, data->irq_fwd_mask[idx], 302 + data->en_offset[idx]); 293 303 294 304 ct->chip.irq_mask = irq_gc_mask_clr_bit; 295 305 ct->chip.irq_unmask = irq_gc_mask_set_bit;
+3 -3
drivers/irqchip/irq-crossbar.c
··· 341 341 int err; 342 342 343 343 if (!parent) { 344 - pr_err("%s: no parent, giving up\n", node->full_name); 344 + pr_err("%pOF: no parent, giving up\n", node); 345 345 return -ENODEV; 346 346 } 347 347 348 348 parent_domain = irq_find_host(parent); 349 349 if (!parent_domain) { 350 - pr_err("%s: unable to obtain parent domain\n", node->full_name); 350 + pr_err("%pOF: unable to obtain parent domain\n", node); 351 351 return -ENXIO; 352 352 } 353 353 ··· 360 360 node, &crossbar_domain_ops, 361 361 NULL); 362 362 if (!domain) { 363 - pr_err("%s: failed to allocated domain\n", node->full_name); 363 + pr_err("%pOF: failed to allocated domain\n", node); 364 364 return -ENOMEM; 365 365 } 366 366
+4 -4
drivers/irqchip/irq-digicolor.c
··· 78 78 79 79 reg_base = of_iomap(node, 0); 80 80 if (!reg_base) { 81 - pr_err("%s: unable to map IC registers\n", node->full_name); 81 + pr_err("%pOF: unable to map IC registers\n", node); 82 82 return -ENXIO; 83 83 } 84 84 ··· 88 88 89 89 ucregs = syscon_regmap_lookup_by_phandle(node, "syscon"); 90 90 if (IS_ERR(ucregs)) { 91 - pr_err("%s: unable to map UC registers\n", node->full_name); 91 + pr_err("%pOF: unable to map UC registers\n", node); 92 92 return PTR_ERR(ucregs); 93 93 } 94 94 /* channel 1, regular IRQs */ ··· 97 97 digicolor_irq_domain = 98 98 irq_domain_add_linear(node, 64, &irq_generic_chip_ops, NULL); 99 99 if (!digicolor_irq_domain) { 100 - pr_err("%s: unable to create IRQ domain\n", node->full_name); 100 + pr_err("%pOF: unable to create IRQ domain\n", node); 101 101 return -ENOMEM; 102 102 } 103 103 ··· 105 105 "digicolor_irq", handle_level_irq, 106 106 clr, 0, 0); 107 107 if (ret) { 108 - pr_err("%s: unable to allocate IRQ gc\n", node->full_name); 108 + pr_err("%pOF: unable to allocate IRQ gc\n", node); 109 109 return ret; 110 110 } 111 111
+6 -6
drivers/irqchip/irq-dw-apb-ictl.c
··· 79 79 /* Map the parent interrupt for the chained handler */ 80 80 irq = irq_of_parse_and_map(np, 0); 81 81 if (irq <= 0) { 82 - pr_err("%s: unable to parse irq\n", np->full_name); 82 + pr_err("%pOF: unable to parse irq\n", np); 83 83 return -EINVAL; 84 84 } 85 85 86 86 ret = of_address_to_resource(np, 0, &r); 87 87 if (ret) { 88 - pr_err("%s: unable to get resource\n", np->full_name); 88 + pr_err("%pOF: unable to get resource\n", np); 89 89 return ret; 90 90 } 91 91 92 92 if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { 93 - pr_err("%s: unable to request mem region\n", np->full_name); 93 + pr_err("%pOF: unable to request mem region\n", np); 94 94 return -ENOMEM; 95 95 } 96 96 97 97 iobase = ioremap(r.start, resource_size(&r)); 98 98 if (!iobase) { 99 - pr_err("%s: unable to map resource\n", np->full_name); 99 + pr_err("%pOF: unable to map resource\n", np); 100 100 ret = -ENOMEM; 101 101 goto err_release; 102 102 } ··· 123 123 domain = irq_domain_add_linear(np, nrirqs, 124 124 &irq_generic_chip_ops, NULL); 125 125 if (!domain) { 126 - pr_err("%s: unable to add irq domain\n", np->full_name); 126 + pr_err("%pOF: unable to add irq domain\n", np); 127 127 ret = -ENOMEM; 128 128 goto err_unmap; 129 129 } ··· 132 132 handle_level_irq, clr, 0, 133 133 IRQ_GC_INIT_MASK_CACHE); 134 134 if (ret) { 135 - pr_err("%s: unable to alloc irq domain gc\n", np->full_name); 135 + pr_err("%pOF: unable to alloc irq domain gc\n", np); 136 136 goto err_unmap; 137 137 } 138 138
+1 -1
drivers/irqchip/irq-gic-v3-its-pci-msi.c
··· 138 138 if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name)) 139 139 continue; 140 140 141 - pr_info("PCI/MSI: %s domain created\n", np->full_name); 141 + pr_info("PCI/MSI: %pOF domain created\n", np); 142 142 } 143 143 144 144 return 0;
+1380 -116
drivers/irqchip/irq-gic-v3-its.c
··· 1 1 /* 2 - * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 2 + * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 3 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify ··· 36 36 37 37 #include <linux/irqchip.h> 38 38 #include <linux/irqchip/arm-gic-v3.h> 39 + #include <linux/irqchip/arm-gic-v4.h> 39 40 40 41 #include <asm/cputype.h> 41 42 #include <asm/exception.h> ··· 48 47 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) 49 48 50 49 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) 50 + 51 + static u32 lpi_id_bits; 52 + 53 + /* 54 + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 55 + * deal with (one configuration byte per interrupt). PENDBASE has to 56 + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 57 + */ 58 + #define LPI_NRBITS lpi_id_bits 59 + #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) 60 + #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) 61 + 62 + #define LPI_PROP_DEFAULT_PRIO 0xa0 51 63 52 64 /* 53 65 * Collection structure - just an ID, and a redistributor address to ··· 102 88 u32 ite_size; 103 89 u32 device_ids; 104 90 int numa_node; 91 + bool is_v4; 105 92 }; 106 93 107 94 #define ITS_ITT_ALIGN SZ_256 ··· 115 100 u16 *col_map; 116 101 irq_hw_number_t lpi_base; 117 102 int nr_lpis; 103 + struct mutex vlpi_lock; 104 + struct its_vm *vm; 105 + struct its_vlpi_map *vlpi_maps; 106 + int nr_vlpis; 118 107 }; 119 108 120 109 /* 121 - * The ITS view of a device - belongs to an ITS, a collection, owns an 122 - * interrupt translation table, and a list of interrupts. 110 + * The ITS view of a device - belongs to an ITS, owns an interrupt 111 + * translation table, and a list of interrupts. If it some of its 112 + * LPIs are injected into a guest (GICv4), the event_map.vm field 113 + * indicates which one. 123 114 */ 124 115 struct its_device { 125 116 struct list_head entry; ··· 136 115 u32 device_id; 137 116 }; 138 117 118 + static struct { 119 + raw_spinlock_t lock; 120 + struct its_device *dev; 121 + struct its_vpe **vpes; 122 + int next_victim; 123 + } vpe_proxy; 124 + 139 125 static LIST_HEAD(its_nodes); 140 126 static DEFINE_SPINLOCK(its_lock); 141 127 static struct rdists *gic_rdists; 142 128 static struct irq_domain *its_parent; 143 129 130 + /* 131 + * We have a maximum number of 16 ITSs in the whole system if we're 132 + * using the ITSList mechanism 133 + */ 134 + #define ITS_LIST_MAX 16 135 + 136 + static unsigned long its_list_map; 137 + static u16 vmovp_seq_num; 138 + static DEFINE_RAW_SPINLOCK(vmovp_lock); 139 + 140 + static DEFINE_IDA(its_vpeid_ida); 141 + 144 142 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 145 143 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 144 + #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) 146 145 147 146 static struct its_collection *dev_event_to_col(struct its_device *its_dev, 148 147 u32 event) ··· 182 141 struct its_device *dev; 183 142 u32 event_id; 184 143 } its_inv_cmd; 144 + 145 + struct { 146 + struct its_device *dev; 147 + u32 event_id; 148 + } its_clear_cmd; 185 149 186 150 struct { 187 151 struct its_device *dev; ··· 223 177 struct { 224 178 struct its_collection *col; 225 179 } its_invall_cmd; 180 + 181 + struct { 182 + struct its_vpe *vpe; 183 + } its_vinvall_cmd; 184 + 185 + struct { 186 + struct its_vpe *vpe; 187 + struct its_collection *col; 188 + bool valid; 189 + } its_vmapp_cmd; 190 + 191 + struct { 192 + struct its_vpe *vpe; 193 + struct its_device *dev; 194 + u32 virt_id; 195 + u32 event_id; 196 + bool db_enabled; 197 + } its_vmapti_cmd; 198 + 199 + struct { 200 + struct its_vpe *vpe; 201 + struct its_device *dev; 202 + u32 event_id; 203 + bool db_enabled; 204 + } its_vmovi_cmd; 205 + 206 + struct { 207 + struct its_vpe *vpe; 208 + struct its_collection *col; 209 + u16 seq_num; 210 + u16 its_list; 211 + } its_vmovp_cmd; 226 212 }; 227 213 }; 228 214 ··· 270 192 271 193 typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, 272 194 struct its_cmd_desc *); 195 + 196 + typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, 197 + struct its_cmd_desc *); 273 198 274 199 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) 275 200 { ··· 324 243 static void its_encode_collection(struct its_cmd_block *cmd, u16 col) 325 244 { 326 245 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); 246 + } 247 + 248 + static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) 249 + { 250 + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); 251 + } 252 + 253 + static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) 254 + { 255 + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); 256 + } 257 + 258 + static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) 259 + { 260 + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); 261 + } 262 + 263 + static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) 264 + { 265 + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); 266 + } 267 + 268 + static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) 269 + { 270 + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); 271 + } 272 + 273 + static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) 274 + { 275 + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); 276 + } 277 + 278 + static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) 279 + { 280 + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); 281 + } 282 + 283 + static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) 284 + { 285 + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); 327 286 } 328 287 329 288 static inline void its_fixup_cmd(struct its_cmd_block *cmd) ··· 479 358 return col; 480 359 } 481 360 361 + static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, 362 + struct its_cmd_desc *desc) 363 + { 364 + struct its_collection *col; 365 + 366 + col = dev_event_to_col(desc->its_int_cmd.dev, 367 + desc->its_int_cmd.event_id); 368 + 369 + its_encode_cmd(cmd, GITS_CMD_INT); 370 + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); 371 + its_encode_event_id(cmd, desc->its_int_cmd.event_id); 372 + 373 + its_fixup_cmd(cmd); 374 + 375 + return col; 376 + } 377 + 378 + static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, 379 + struct its_cmd_desc *desc) 380 + { 381 + struct its_collection *col; 382 + 383 + col = dev_event_to_col(desc->its_clear_cmd.dev, 384 + desc->its_clear_cmd.event_id); 385 + 386 + its_encode_cmd(cmd, GITS_CMD_CLEAR); 387 + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); 388 + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); 389 + 390 + its_fixup_cmd(cmd); 391 + 392 + return col; 393 + } 394 + 482 395 static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, 483 396 struct its_cmd_desc *desc) 484 397 { ··· 522 367 its_fixup_cmd(cmd); 523 368 524 369 return NULL; 370 + } 371 + 372 + static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, 373 + struct its_cmd_desc *desc) 374 + { 375 + its_encode_cmd(cmd, GITS_CMD_VINVALL); 376 + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); 377 + 378 + its_fixup_cmd(cmd); 379 + 380 + return desc->its_vinvall_cmd.vpe; 381 + } 382 + 383 + static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, 384 + struct its_cmd_desc *desc) 385 + { 386 + unsigned long vpt_addr; 387 + 388 + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); 389 + 390 + its_encode_cmd(cmd, GITS_CMD_VMAPP); 391 + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); 392 + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); 393 + its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address); 394 + its_encode_vpt_addr(cmd, vpt_addr); 395 + its_encode_vpt_size(cmd, LPI_NRBITS - 1); 396 + 397 + its_fixup_cmd(cmd); 398 + 399 + return desc->its_vmapp_cmd.vpe; 400 + } 401 + 402 + static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, 403 + struct its_cmd_desc *desc) 404 + { 405 + u32 db; 406 + 407 + if (desc->its_vmapti_cmd.db_enabled) 408 + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; 409 + else 410 + db = 1023; 411 + 412 + its_encode_cmd(cmd, GITS_CMD_VMAPTI); 413 + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); 414 + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); 415 + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); 416 + its_encode_db_phys_id(cmd, db); 417 + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); 418 + 419 + its_fixup_cmd(cmd); 420 + 421 + return desc->its_vmapti_cmd.vpe; 422 + } 423 + 424 + static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, 425 + struct its_cmd_desc *desc) 426 + { 427 + u32 db; 428 + 429 + if (desc->its_vmovi_cmd.db_enabled) 430 + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; 431 + else 432 + db = 1023; 433 + 434 + its_encode_cmd(cmd, GITS_CMD_VMOVI); 435 + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); 436 + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); 437 + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); 438 + its_encode_db_phys_id(cmd, db); 439 + its_encode_db_valid(cmd, true); 440 + 441 + its_fixup_cmd(cmd); 442 + 443 + return desc->its_vmovi_cmd.vpe; 444 + } 445 + 446 + static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd, 447 + struct its_cmd_desc *desc) 448 + { 449 + its_encode_cmd(cmd, GITS_CMD_VMOVP); 450 + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); 451 + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); 452 + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); 453 + its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address); 454 + 455 + its_fixup_cmd(cmd); 456 + 457 + return desc->its_vmovp_cmd.vpe; 525 458 } 526 459 527 460 static u64 its_cmd_ptr_to_offset(struct its_node *its, ··· 696 453 697 454 while (1) { 698 455 rd_idx = readl_relaxed(its->base + GITS_CREADR); 699 - if (rd_idx >= to_idx || rd_idx < from_idx) 456 + 457 + /* Direct case */ 458 + if (from_idx < to_idx && rd_idx >= to_idx) 459 + break; 460 + 461 + /* Wrapped case */ 462 + if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) 700 463 break; 701 464 702 465 count--; ··· 715 466 } 716 467 } 717 468 718 - static void its_send_single_command(struct its_node *its, 719 - its_cmd_builder_t builder, 720 - struct its_cmd_desc *desc) 469 + /* Warning, macro hell follows */ 470 + #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ 471 + void name(struct its_node *its, \ 472 + buildtype builder, \ 473 + struct its_cmd_desc *desc) \ 474 + { \ 475 + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ 476 + synctype *sync_obj; \ 477 + unsigned long flags; \ 478 + \ 479 + raw_spin_lock_irqsave(&its->lock, flags); \ 480 + \ 481 + cmd = its_allocate_entry(its); \ 482 + if (!cmd) { /* We're soooooo screewed... */ \ 483 + raw_spin_unlock_irqrestore(&its->lock, flags); \ 484 + return; \ 485 + } \ 486 + sync_obj = builder(cmd, desc); \ 487 + its_flush_cmd(its, cmd); \ 488 + \ 489 + if (sync_obj) { \ 490 + sync_cmd = its_allocate_entry(its); \ 491 + if (!sync_cmd) \ 492 + goto post; \ 493 + \ 494 + buildfn(sync_cmd, sync_obj); \ 495 + its_flush_cmd(its, sync_cmd); \ 496 + } \ 497 + \ 498 + post: \ 499 + next_cmd = its_post_commands(its); \ 500 + raw_spin_unlock_irqrestore(&its->lock, flags); \ 501 + \ 502 + its_wait_for_range_completion(its, cmd, next_cmd); \ 503 + } 504 + 505 + static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, 506 + struct its_collection *sync_col) 721 507 { 722 - struct its_cmd_block *cmd, *sync_cmd, *next_cmd; 723 - struct its_collection *sync_col; 724 - unsigned long flags; 508 + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 509 + its_encode_target(sync_cmd, sync_col->target_address); 725 510 726 - raw_spin_lock_irqsave(&its->lock, flags); 511 + its_fixup_cmd(sync_cmd); 512 + } 727 513 728 - cmd = its_allocate_entry(its); 729 - if (!cmd) { /* We're soooooo screewed... */ 730 - pr_err_ratelimited("ITS can't allocate, dropping command\n"); 731 - raw_spin_unlock_irqrestore(&its->lock, flags); 732 - return; 733 - } 734 - sync_col = builder(cmd, desc); 735 - its_flush_cmd(its, cmd); 514 + static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, 515 + struct its_collection, its_build_sync_cmd) 736 516 737 - if (sync_col) { 738 - sync_cmd = its_allocate_entry(its); 739 - if (!sync_cmd) { 740 - pr_err_ratelimited("ITS can't SYNC, skipping\n"); 741 - goto post; 742 - } 743 - its_encode_cmd(sync_cmd, GITS_CMD_SYNC); 744 - its_encode_target(sync_cmd, sync_col->target_address); 745 - its_fixup_cmd(sync_cmd); 746 - its_flush_cmd(its, sync_cmd); 747 - } 517 + static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, 518 + struct its_vpe *sync_vpe) 519 + { 520 + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); 521 + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); 748 522 749 - post: 750 - next_cmd = its_post_commands(its); 751 - raw_spin_unlock_irqrestore(&its->lock, flags); 523 + its_fixup_cmd(sync_cmd); 524 + } 752 525 753 - its_wait_for_range_completion(its, cmd, next_cmd); 526 + static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, 527 + struct its_vpe, its_build_vsync_cmd) 528 + 529 + static void its_send_int(struct its_device *dev, u32 event_id) 530 + { 531 + struct its_cmd_desc desc; 532 + 533 + desc.its_int_cmd.dev = dev; 534 + desc.its_int_cmd.event_id = event_id; 535 + 536 + its_send_single_command(dev->its, its_build_int_cmd, &desc); 537 + } 538 + 539 + static void its_send_clear(struct its_device *dev, u32 event_id) 540 + { 541 + struct its_cmd_desc desc; 542 + 543 + desc.its_clear_cmd.dev = dev; 544 + desc.its_clear_cmd.event_id = event_id; 545 + 546 + its_send_single_command(dev->its, its_build_clear_cmd, &desc); 754 547 } 755 548 756 549 static void its_send_inv(struct its_device *dev, u32 event_id) ··· 868 577 its_send_single_command(its, its_build_invall_cmd, &desc); 869 578 } 870 579 580 + static void its_send_vmapti(struct its_device *dev, u32 id) 581 + { 582 + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 583 + struct its_cmd_desc desc; 584 + 585 + desc.its_vmapti_cmd.vpe = map->vpe; 586 + desc.its_vmapti_cmd.dev = dev; 587 + desc.its_vmapti_cmd.virt_id = map->vintid; 588 + desc.its_vmapti_cmd.event_id = id; 589 + desc.its_vmapti_cmd.db_enabled = map->db_enabled; 590 + 591 + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); 592 + } 593 + 594 + static void its_send_vmovi(struct its_device *dev, u32 id) 595 + { 596 + struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; 597 + struct its_cmd_desc desc; 598 + 599 + desc.its_vmovi_cmd.vpe = map->vpe; 600 + desc.its_vmovi_cmd.dev = dev; 601 + desc.its_vmovi_cmd.event_id = id; 602 + desc.its_vmovi_cmd.db_enabled = map->db_enabled; 603 + 604 + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); 605 + } 606 + 607 + static void its_send_vmapp(struct its_vpe *vpe, bool valid) 608 + { 609 + struct its_cmd_desc desc; 610 + struct its_node *its; 611 + 612 + desc.its_vmapp_cmd.vpe = vpe; 613 + desc.its_vmapp_cmd.valid = valid; 614 + 615 + list_for_each_entry(its, &its_nodes, entry) { 616 + if (!its->is_v4) 617 + continue; 618 + 619 + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; 620 + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); 621 + } 622 + } 623 + 624 + static void its_send_vmovp(struct its_vpe *vpe) 625 + { 626 + struct its_cmd_desc desc; 627 + struct its_node *its; 628 + unsigned long flags; 629 + int col_id = vpe->col_idx; 630 + 631 + desc.its_vmovp_cmd.vpe = vpe; 632 + desc.its_vmovp_cmd.its_list = (u16)its_list_map; 633 + 634 + if (!its_list_map) { 635 + its = list_first_entry(&its_nodes, struct its_node, entry); 636 + desc.its_vmovp_cmd.seq_num = 0; 637 + desc.its_vmovp_cmd.col = &its->collections[col_id]; 638 + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 639 + return; 640 + } 641 + 642 + /* 643 + * Yet another marvel of the architecture. If using the 644 + * its_list "feature", we need to make sure that all ITSs 645 + * receive all VMOVP commands in the same order. The only way 646 + * to guarantee this is to make vmovp a serialization point. 647 + * 648 + * Wall <-- Head. 649 + */ 650 + raw_spin_lock_irqsave(&vmovp_lock, flags); 651 + 652 + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; 653 + 654 + /* Emit VMOVPs */ 655 + list_for_each_entry(its, &its_nodes, entry) { 656 + if (!its->is_v4) 657 + continue; 658 + 659 + desc.its_vmovp_cmd.col = &its->collections[col_id]; 660 + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); 661 + } 662 + 663 + raw_spin_unlock_irqrestore(&vmovp_lock, flags); 664 + } 665 + 666 + static void its_send_vinvall(struct its_vpe *vpe) 667 + { 668 + struct its_cmd_desc desc; 669 + struct its_node *its; 670 + 671 + desc.its_vinvall_cmd.vpe = vpe; 672 + 673 + list_for_each_entry(its, &its_nodes, entry) { 674 + if (!its->is_v4) 675 + continue; 676 + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); 677 + } 678 + } 679 + 871 680 /* 872 681 * irqchip functions - assumes MSI, mostly. 873 682 */ ··· 978 587 return d->hwirq - its_dev->event_map.lpi_base; 979 588 } 980 589 981 - static void lpi_set_config(struct irq_data *d, bool enable) 590 + static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) 982 591 { 983 - struct its_device *its_dev = irq_data_get_irq_chip_data(d); 984 - irq_hw_number_t hwirq = d->hwirq; 985 - u32 id = its_get_event_id(d); 986 - u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; 592 + irq_hw_number_t hwirq; 593 + struct page *prop_page; 594 + u8 *cfg; 987 595 988 - if (enable) 989 - *cfg |= LPI_PROP_ENABLED; 990 - else 991 - *cfg &= ~LPI_PROP_ENABLED; 596 + if (irqd_is_forwarded_to_vcpu(d)) { 597 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 598 + u32 event = its_get_event_id(d); 599 + 600 + prop_page = its_dev->event_map.vm->vprop_page; 601 + hwirq = its_dev->event_map.vlpi_maps[event].vintid; 602 + } else { 603 + prop_page = gic_rdists->prop_page; 604 + hwirq = d->hwirq; 605 + } 606 + 607 + cfg = page_address(prop_page) + hwirq - 8192; 608 + *cfg &= ~clr; 609 + *cfg |= set | LPI_PROP_GROUP1; 992 610 993 611 /* 994 612 * Make the above write visible to the redistributors. ··· 1008 608 gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); 1009 609 else 1010 610 dsb(ishst); 1011 - its_send_inv(its_dev, id); 611 + } 612 + 613 + static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) 614 + { 615 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 616 + 617 + lpi_write_config(d, clr, set); 618 + its_send_inv(its_dev, its_get_event_id(d)); 619 + } 620 + 621 + static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) 622 + { 623 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 624 + u32 event = its_get_event_id(d); 625 + 626 + if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) 627 + return; 628 + 629 + its_dev->event_map.vlpi_maps[event].db_enabled = enable; 630 + 631 + /* 632 + * More fun with the architecture: 633 + * 634 + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI 635 + * value or to 1023, depending on the enable bit. But that 636 + * would be issueing a mapping for an /existing/ DevID+EventID 637 + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI 638 + * to the /same/ vPE, using this opportunity to adjust the 639 + * doorbell. Mouahahahaha. We loves it, Precious. 640 + */ 641 + its_send_vmovi(its_dev, event); 1012 642 } 1013 643 1014 644 static void its_mask_irq(struct irq_data *d) 1015 645 { 1016 - lpi_set_config(d, false); 646 + if (irqd_is_forwarded_to_vcpu(d)) 647 + its_vlpi_set_doorbell(d, false); 648 + 649 + lpi_update_config(d, LPI_PROP_ENABLED, 0); 1017 650 } 1018 651 1019 652 static void its_unmask_irq(struct irq_data *d) 1020 653 { 1021 - lpi_set_config(d, true); 654 + if (irqd_is_forwarded_to_vcpu(d)) 655 + its_vlpi_set_doorbell(d, true); 656 + 657 + lpi_update_config(d, 0, LPI_PROP_ENABLED); 1022 658 } 1023 659 1024 660 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ··· 1065 629 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 1066 630 struct its_collection *target_col; 1067 631 u32 id = its_get_event_id(d); 632 + 633 + /* A forwarded interrupt should use irq_set_vcpu_affinity */ 634 + if (irqd_is_forwarded_to_vcpu(d)) 635 + return -EINVAL; 1068 636 1069 637 /* lpi cannot be routed to a redistributor that is on a foreign node */ 1070 638 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { ··· 1089 649 target_col = &its_dev->its->collections[cpu]; 1090 650 its_send_movi(its_dev, target_col, id); 1091 651 its_dev->event_map.col_map[id] = cpu; 652 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 1092 653 } 1093 654 1094 655 return IRQ_SET_MASK_OK_DONE; ··· 1111 670 iommu_dma_map_msi_msg(d->irq, msg); 1112 671 } 1113 672 673 + static int its_irq_set_irqchip_state(struct irq_data *d, 674 + enum irqchip_irq_state which, 675 + bool state) 676 + { 677 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 678 + u32 event = its_get_event_id(d); 679 + 680 + if (which != IRQCHIP_STATE_PENDING) 681 + return -EINVAL; 682 + 683 + if (state) 684 + its_send_int(its_dev, event); 685 + else 686 + its_send_clear(its_dev, event); 687 + 688 + return 0; 689 + } 690 + 691 + static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) 692 + { 693 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 694 + u32 event = its_get_event_id(d); 695 + int ret = 0; 696 + 697 + if (!info->map) 698 + return -EINVAL; 699 + 700 + mutex_lock(&its_dev->event_map.vlpi_lock); 701 + 702 + if (!its_dev->event_map.vm) { 703 + struct its_vlpi_map *maps; 704 + 705 + maps = kzalloc(sizeof(*maps) * its_dev->event_map.nr_lpis, 706 + GFP_KERNEL); 707 + if (!maps) { 708 + ret = -ENOMEM; 709 + goto out; 710 + } 711 + 712 + its_dev->event_map.vm = info->map->vm; 713 + its_dev->event_map.vlpi_maps = maps; 714 + } else if (its_dev->event_map.vm != info->map->vm) { 715 + ret = -EINVAL; 716 + goto out; 717 + } 718 + 719 + /* Get our private copy of the mapping information */ 720 + its_dev->event_map.vlpi_maps[event] = *info->map; 721 + 722 + if (irqd_is_forwarded_to_vcpu(d)) { 723 + /* Already mapped, move it around */ 724 + its_send_vmovi(its_dev, event); 725 + } else { 726 + /* Drop the physical mapping */ 727 + its_send_discard(its_dev, event); 728 + 729 + /* and install the virtual one */ 730 + its_send_vmapti(its_dev, event); 731 + irqd_set_forwarded_to_vcpu(d); 732 + 733 + /* Increment the number of VLPIs */ 734 + its_dev->event_map.nr_vlpis++; 735 + } 736 + 737 + out: 738 + mutex_unlock(&its_dev->event_map.vlpi_lock); 739 + return ret; 740 + } 741 + 742 + static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) 743 + { 744 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 745 + u32 event = its_get_event_id(d); 746 + int ret = 0; 747 + 748 + mutex_lock(&its_dev->event_map.vlpi_lock); 749 + 750 + if (!its_dev->event_map.vm || 751 + !its_dev->event_map.vlpi_maps[event].vm) { 752 + ret = -EINVAL; 753 + goto out; 754 + } 755 + 756 + /* Copy our mapping information to the incoming request */ 757 + *info->map = its_dev->event_map.vlpi_maps[event]; 758 + 759 + out: 760 + mutex_unlock(&its_dev->event_map.vlpi_lock); 761 + return ret; 762 + } 763 + 764 + static int its_vlpi_unmap(struct irq_data *d) 765 + { 766 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 767 + u32 event = its_get_event_id(d); 768 + int ret = 0; 769 + 770 + mutex_lock(&its_dev->event_map.vlpi_lock); 771 + 772 + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { 773 + ret = -EINVAL; 774 + goto out; 775 + } 776 + 777 + /* Drop the virtual mapping */ 778 + its_send_discard(its_dev, event); 779 + 780 + /* and restore the physical one */ 781 + irqd_clr_forwarded_to_vcpu(d); 782 + its_send_mapti(its_dev, d->hwirq, event); 783 + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | 784 + LPI_PROP_ENABLED | 785 + LPI_PROP_GROUP1)); 786 + 787 + /* 788 + * Drop the refcount and make the device available again if 789 + * this was the last VLPI. 790 + */ 791 + if (!--its_dev->event_map.nr_vlpis) { 792 + its_dev->event_map.vm = NULL; 793 + kfree(its_dev->event_map.vlpi_maps); 794 + } 795 + 796 + out: 797 + mutex_unlock(&its_dev->event_map.vlpi_lock); 798 + return ret; 799 + } 800 + 801 + static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) 802 + { 803 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 804 + 805 + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) 806 + return -EINVAL; 807 + 808 + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) 809 + lpi_update_config(d, 0xff, info->config); 810 + else 811 + lpi_write_config(d, 0xff, info->config); 812 + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); 813 + 814 + return 0; 815 + } 816 + 817 + static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 818 + { 819 + struct its_device *its_dev = irq_data_get_irq_chip_data(d); 820 + struct its_cmd_info *info = vcpu_info; 821 + 822 + /* Need a v4 ITS */ 823 + if (!its_dev->its->is_v4) 824 + return -EINVAL; 825 + 826 + /* Unmap request? */ 827 + if (!info) 828 + return its_vlpi_unmap(d); 829 + 830 + switch (info->cmd_type) { 831 + case MAP_VLPI: 832 + return its_vlpi_map(d, info); 833 + 834 + case GET_VLPI: 835 + return its_vlpi_get(d, info); 836 + 837 + case PROP_UPDATE_VLPI: 838 + case PROP_UPDATE_AND_INV_VLPI: 839 + return its_vlpi_prop_update(d, info); 840 + 841 + default: 842 + return -EINVAL; 843 + } 844 + } 845 + 1114 846 static struct irq_chip its_irq_chip = { 1115 847 .name = "ITS", 1116 848 .irq_mask = its_mask_irq, ··· 1291 677 .irq_eoi = irq_chip_eoi_parent, 1292 678 .irq_set_affinity = its_set_affinity, 1293 679 .irq_compose_msi_msg = its_irq_compose_msi_msg, 680 + .irq_set_irqchip_state = its_irq_set_irqchip_state, 681 + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, 1294 682 }; 1295 683 1296 684 /* ··· 1311 695 1312 696 static unsigned long *lpi_bitmap; 1313 697 static u32 lpi_chunks; 1314 - static u32 lpi_id_bits; 1315 698 static DEFINE_SPINLOCK(lpi_lock); 1316 699 1317 700 static int its_lpi_to_chunk(int lpi) ··· 1381 766 return bitmap; 1382 767 } 1383 768 1384 - static void its_lpi_free(struct event_lpi_map *map) 769 + static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids) 1385 770 { 1386 - int base = map->lpi_base; 1387 - int nr_ids = map->nr_lpis; 1388 771 int lpi; 1389 772 1390 773 spin_lock(&lpi_lock); 1391 774 1392 775 for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { 1393 776 int chunk = its_lpi_to_chunk(lpi); 777 + 1394 778 BUG_ON(chunk > lpi_chunks); 1395 779 if (test_bit(chunk, lpi_bitmap)) { 1396 780 clear_bit(chunk, lpi_bitmap); ··· 1400 786 1401 787 spin_unlock(&lpi_lock); 1402 788 1403 - kfree(map->lpi_map); 1404 - kfree(map->col_map); 789 + kfree(bitmap); 1405 790 } 1406 791 1407 - /* 1408 - * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to 1409 - * deal with (one configuration byte per interrupt). PENDBASE has to 1410 - * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). 1411 - */ 1412 - #define LPI_NRBITS lpi_id_bits 1413 - #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) 1414 - #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) 792 + static struct page *its_allocate_prop_table(gfp_t gfp_flags) 793 + { 794 + struct page *prop_page; 1415 795 1416 - #define LPI_PROP_DEFAULT_PRIO 0xa0 796 + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); 797 + if (!prop_page) 798 + return NULL; 799 + 800 + /* Priority 0xa0, Group-1, disabled */ 801 + memset(page_address(prop_page), 802 + LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, 803 + LPI_PROPBASE_SZ); 804 + 805 + /* Make sure the GIC will observe the written configuration */ 806 + gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ); 807 + 808 + return prop_page; 809 + } 810 + 811 + static void its_free_prop_table(struct page *prop_page) 812 + { 813 + free_pages((unsigned long)page_address(prop_page), 814 + get_order(LPI_PROPBASE_SZ)); 815 + } 1417 816 1418 817 static int __init its_alloc_lpi_tables(void) 1419 818 { 1420 819 phys_addr_t paddr; 1421 820 1422 821 lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS); 1423 - gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, 1424 - get_order(LPI_PROPBASE_SZ)); 822 + gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT); 1425 823 if (!gic_rdists->prop_page) { 1426 824 pr_err("Failed to allocate PROPBASE\n"); 1427 825 return -ENOMEM; ··· 1441 815 1442 816 paddr = page_to_phys(gic_rdists->prop_page); 1443 817 pr_info("GIC: using LPI property table @%pa\n", &paddr); 1444 - 1445 - /* Priority 0xa0, Group-1, disabled */ 1446 - memset(page_address(gic_rdists->prop_page), 1447 - LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, 1448 - LPI_PROPBASE_SZ); 1449 - 1450 - /* Make sure the GIC will observe the written configuration */ 1451 - gic_flush_dcache_to_poc(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); 1452 818 1453 819 return its_lpi_init(lpi_id_bits); 1454 820 } ··· 1580 962 return 0; 1581 963 } 1582 964 1583 - static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, 1584 - u32 psz, u32 *order) 965 + static bool its_parse_indirect_baser(struct its_node *its, 966 + struct its_baser *baser, 967 + u32 psz, u32 *order) 1585 968 { 1586 - u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); 969 + u64 tmp = its_read_baser(its, baser); 970 + u64 type = GITS_BASER_TYPE(tmp); 971 + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); 1587 972 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; 1588 973 u32 ids = its->device_ids; 1589 974 u32 new_order = *order; ··· 1625 1004 if (new_order >= MAX_ORDER) { 1626 1005 new_order = MAX_ORDER - 1; 1627 1006 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); 1628 - pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", 1629 - &its->phys_base, its->device_ids, ids); 1007 + pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n", 1008 + &its->phys_base, its_base_type_string[type], 1009 + its->device_ids, ids); 1630 1010 } 1631 1011 1632 1012 *order = new_order; ··· 1675 1053 u32 order = get_order(psz); 1676 1054 bool indirect = false; 1677 1055 1678 - if (type == GITS_BASER_TYPE_NONE) 1056 + switch (type) { 1057 + case GITS_BASER_TYPE_NONE: 1679 1058 continue; 1680 1059 1681 - if (type == GITS_BASER_TYPE_DEVICE) 1682 - indirect = its_parse_baser_device(its, baser, psz, &order); 1060 + case GITS_BASER_TYPE_DEVICE: 1061 + case GITS_BASER_TYPE_VCPU: 1062 + indirect = its_parse_indirect_baser(its, baser, 1063 + psz, &order); 1064 + break; 1065 + } 1683 1066 1684 1067 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 1685 1068 if (err < 0) { ··· 1711 1084 return 0; 1712 1085 } 1713 1086 1087 + static struct page *its_allocate_pending_table(gfp_t gfp_flags) 1088 + { 1089 + struct page *pend_page; 1090 + /* 1091 + * The pending pages have to be at least 64kB aligned, 1092 + * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. 1093 + */ 1094 + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, 1095 + get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 1096 + if (!pend_page) 1097 + return NULL; 1098 + 1099 + /* Make sure the GIC will observe the zero-ed page */ 1100 + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 1101 + 1102 + return pend_page; 1103 + } 1104 + 1105 + static void its_free_pending_table(struct page *pt) 1106 + { 1107 + free_pages((unsigned long)page_address(pt), 1108 + get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 1109 + } 1110 + 1714 1111 static void its_cpu_init_lpis(void) 1715 1112 { 1716 1113 void __iomem *rbase = gic_data_rdist_rd_base(); ··· 1745 1094 pend_page = gic_data_rdist()->pend_page; 1746 1095 if (!pend_page) { 1747 1096 phys_addr_t paddr; 1748 - /* 1749 - * The pending pages have to be at least 64kB aligned, 1750 - * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. 1751 - */ 1752 - pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, 1753 - get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); 1097 + 1098 + pend_page = its_allocate_pending_table(GFP_NOWAIT); 1754 1099 if (!pend_page) { 1755 1100 pr_err("Failed to allocate PENDBASE for CPU%d\n", 1756 1101 smp_processor_id()); 1757 1102 return; 1758 1103 } 1759 - 1760 - /* Make sure the GIC will observe the zero-ed page */ 1761 - gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); 1762 1104 1763 1105 paddr = page_to_phys(pend_page); 1764 1106 pr_info("CPU%d: using LPI pending table @%pa\n", ··· 1903 1259 return NULL; 1904 1260 } 1905 1261 1906 - static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 1262 + static bool its_alloc_table_entry(struct its_baser *baser, u32 id) 1907 1263 { 1908 - struct its_baser *baser; 1909 1264 struct page *page; 1910 1265 u32 esz, idx; 1911 1266 __le64 *table; 1912 1267 1913 - baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 1914 - 1915 - /* Don't allow device id that exceeds ITS hardware limit */ 1916 - if (!baser) 1917 - return (ilog2(dev_id) < its->device_ids); 1918 - 1919 1268 /* Don't allow device id that exceeds single, flat table limit */ 1920 1269 esz = GITS_BASER_ENTRY_SIZE(baser->val); 1921 1270 if (!(baser->val & GITS_BASER_INDIRECT)) 1922 - return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 1271 + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 1923 1272 1924 1273 /* Compute 1st level table index & check if that exceeds table limit */ 1925 - idx = dev_id >> ilog2(baser->psz / esz); 1274 + idx = id >> ilog2(baser->psz / esz); 1926 1275 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 1927 1276 return false; 1928 1277 ··· 1944 1307 return true; 1945 1308 } 1946 1309 1310 + static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 1311 + { 1312 + struct its_baser *baser; 1313 + 1314 + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 1315 + 1316 + /* Don't allow device id that exceeds ITS hardware limit */ 1317 + if (!baser) 1318 + return (ilog2(dev_id) < its->device_ids); 1319 + 1320 + return its_alloc_table_entry(baser, dev_id); 1321 + } 1322 + 1323 + static bool its_alloc_vpe_table(u32 vpe_id) 1324 + { 1325 + struct its_node *its; 1326 + 1327 + /* 1328 + * Make sure the L2 tables are allocated on *all* v4 ITSs. We 1329 + * could try and only do it on ITSs corresponding to devices 1330 + * that have interrupts targeted at this VPE, but the 1331 + * complexity becomes crazy (and you have tons of memory 1332 + * anyway, right?). 1333 + */ 1334 + list_for_each_entry(its, &its_nodes, entry) { 1335 + struct its_baser *baser; 1336 + 1337 + if (!its->is_v4) 1338 + continue; 1339 + 1340 + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); 1341 + if (!baser) 1342 + return false; 1343 + 1344 + if (!its_alloc_table_entry(baser, vpe_id)) 1345 + return false; 1346 + } 1347 + 1348 + return true; 1349 + } 1350 + 1947 1351 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 1948 - int nvecs) 1352 + int nvecs, bool alloc_lpis) 1949 1353 { 1950 1354 struct its_device *dev; 1951 - unsigned long *lpi_map; 1355 + unsigned long *lpi_map = NULL; 1952 1356 unsigned long flags; 1953 1357 u16 *col_map = NULL; 1954 1358 void *itt; ··· 2011 1333 sz = nr_ites * its->ite_size; 2012 1334 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2013 1335 itt = kzalloc(sz, GFP_KERNEL); 2014 - lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 2015 - if (lpi_map) 2016 - col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); 1336 + if (alloc_lpis) { 1337 + lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); 1338 + if (lpi_map) 1339 + col_map = kzalloc(sizeof(*col_map) * nr_lpis, 1340 + GFP_KERNEL); 1341 + } else { 1342 + col_map = kzalloc(sizeof(*col_map) * nr_ites, GFP_KERNEL); 1343 + nr_lpis = 0; 1344 + lpi_base = 0; 1345 + } 2017 1346 2018 - if (!dev || !itt || !lpi_map || !col_map) { 1347 + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { 2019 1348 kfree(dev); 2020 1349 kfree(itt); 2021 1350 kfree(lpi_map); ··· 2039 1354 dev->event_map.col_map = col_map; 2040 1355 dev->event_map.lpi_base = lpi_base; 2041 1356 dev->event_map.nr_lpis = nr_lpis; 1357 + mutex_init(&dev->event_map.vlpi_lock); 2042 1358 dev->device_id = dev_id; 2043 1359 INIT_LIST_HEAD(&dev->entry); 2044 1360 ··· 2098 1412 msi_info = msi_get_domain_info(domain); 2099 1413 its = msi_info->data; 2100 1414 1415 + if (!gic_rdists->has_direct_lpi && 1416 + vpe_proxy.dev && 1417 + vpe_proxy.dev->its == its && 1418 + dev_id == vpe_proxy.dev->device_id) { 1419 + /* Bad luck. Get yourself a better implementation */ 1420 + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", 1421 + dev_id); 1422 + return -EINVAL; 1423 + } 1424 + 2101 1425 its_dev = its_find_device(its, dev_id); 2102 1426 if (its_dev) { 2103 1427 /* ··· 2119 1423 goto out; 2120 1424 } 2121 1425 2122 - its_dev = its_create_device(its, dev_id, nvec); 1426 + its_dev = its_create_device(its, dev_id, nvec, true); 2123 1427 if (!its_dev) 2124 1428 return -ENOMEM; 2125 1429 ··· 2177 1481 2178 1482 irq_domain_set_hwirq_and_chip(domain, virq + i, 2179 1483 hwirq, &its_irq_chip, its_dev); 1484 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); 2180 1485 pr_debug("ID:%d pID:%d vID:%d\n", 2181 1486 (int)(hwirq - its_dev->event_map.lpi_base), 2182 1487 (int) hwirq, virq + i); ··· 2192 1495 struct its_device *its_dev = irq_data_get_irq_chip_data(d); 2193 1496 u32 event = its_get_event_id(d); 2194 1497 const struct cpumask *cpu_mask = cpu_online_mask; 1498 + int cpu; 2195 1499 2196 1500 /* get the cpu_mask of local node */ 2197 1501 if (its_dev->its->numa_node >= 0) 2198 1502 cpu_mask = cpumask_of_node(its_dev->its->numa_node); 2199 1503 2200 1504 /* Bind the LPI to the first possible CPU */ 2201 - its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); 1505 + cpu = cpumask_first(cpu_mask); 1506 + its_dev->event_map.col_map[event] = cpu; 1507 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 2202 1508 2203 1509 /* Map the GIC IRQ and event to the device */ 2204 1510 its_send_mapti(its_dev, d->hwirq, event); ··· 2239 1539 /* If all interrupts have been freed, start mopping the floor */ 2240 1540 if (bitmap_empty(its_dev->event_map.lpi_map, 2241 1541 its_dev->event_map.nr_lpis)) { 2242 - its_lpi_free(&its_dev->event_map); 1542 + its_lpi_free_chunks(its_dev->event_map.lpi_map, 1543 + its_dev->event_map.lpi_base, 1544 + its_dev->event_map.nr_lpis); 1545 + kfree(its_dev->event_map.col_map); 2243 1546 2244 1547 /* Unmap device/itt */ 2245 1548 its_send_mapd(its_dev, 0); ··· 2257 1554 .free = its_irq_domain_free, 2258 1555 .activate = its_irq_domain_activate, 2259 1556 .deactivate = its_irq_domain_deactivate, 1557 + }; 1558 + 1559 + /* 1560 + * This is insane. 1561 + * 1562 + * If a GICv4 doesn't implement Direct LPIs (which is extremely 1563 + * likely), the only way to perform an invalidate is to use a fake 1564 + * device to issue an INV command, implying that the LPI has first 1565 + * been mapped to some event on that device. Since this is not exactly 1566 + * cheap, we try to keep that mapping around as long as possible, and 1567 + * only issue an UNMAP if we're short on available slots. 1568 + * 1569 + * Broken by design(tm). 1570 + */ 1571 + static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) 1572 + { 1573 + /* Already unmapped? */ 1574 + if (vpe->vpe_proxy_event == -1) 1575 + return; 1576 + 1577 + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); 1578 + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; 1579 + 1580 + /* 1581 + * We don't track empty slots at all, so let's move the 1582 + * next_victim pointer if we can quickly reuse that slot 1583 + * instead of nuking an existing entry. Not clear that this is 1584 + * always a win though, and this might just generate a ripple 1585 + * effect... Let's just hope VPEs don't migrate too often. 1586 + */ 1587 + if (vpe_proxy.vpes[vpe_proxy.next_victim]) 1588 + vpe_proxy.next_victim = vpe->vpe_proxy_event; 1589 + 1590 + vpe->vpe_proxy_event = -1; 1591 + } 1592 + 1593 + static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) 1594 + { 1595 + if (!gic_rdists->has_direct_lpi) { 1596 + unsigned long flags; 1597 + 1598 + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 1599 + its_vpe_db_proxy_unmap_locked(vpe); 1600 + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 1601 + } 1602 + } 1603 + 1604 + static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) 1605 + { 1606 + /* Already mapped? */ 1607 + if (vpe->vpe_proxy_event != -1) 1608 + return; 1609 + 1610 + /* This slot was already allocated. Kick the other VPE out. */ 1611 + if (vpe_proxy.vpes[vpe_proxy.next_victim]) 1612 + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); 1613 + 1614 + /* Map the new VPE instead */ 1615 + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; 1616 + vpe->vpe_proxy_event = vpe_proxy.next_victim; 1617 + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; 1618 + 1619 + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; 1620 + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); 1621 + } 1622 + 1623 + static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) 1624 + { 1625 + unsigned long flags; 1626 + struct its_collection *target_col; 1627 + 1628 + if (gic_rdists->has_direct_lpi) { 1629 + void __iomem *rdbase; 1630 + 1631 + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; 1632 + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 1633 + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 1634 + cpu_relax(); 1635 + 1636 + return; 1637 + } 1638 + 1639 + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 1640 + 1641 + its_vpe_db_proxy_map_locked(vpe); 1642 + 1643 + target_col = &vpe_proxy.dev->its->collections[to]; 1644 + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); 1645 + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; 1646 + 1647 + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 1648 + } 1649 + 1650 + static int its_vpe_set_affinity(struct irq_data *d, 1651 + const struct cpumask *mask_val, 1652 + bool force) 1653 + { 1654 + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 1655 + int cpu = cpumask_first(mask_val); 1656 + 1657 + /* 1658 + * Changing affinity is mega expensive, so let's be as lazy as 1659 + * we can and only do it if we really have to. Also, if mapped 1660 + * into the proxy device, we need to move the doorbell 1661 + * interrupt to its new location. 1662 + */ 1663 + if (vpe->col_idx != cpu) { 1664 + int from = vpe->col_idx; 1665 + 1666 + vpe->col_idx = cpu; 1667 + its_send_vmovp(vpe); 1668 + its_vpe_db_proxy_move(vpe, from, cpu); 1669 + } 1670 + 1671 + return IRQ_SET_MASK_OK_DONE; 1672 + } 1673 + 1674 + static void its_vpe_schedule(struct its_vpe *vpe) 1675 + { 1676 + void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 1677 + u64 val; 1678 + 1679 + /* Schedule the VPE */ 1680 + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & 1681 + GENMASK_ULL(51, 12); 1682 + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; 1683 + val |= GICR_VPROPBASER_RaWb; 1684 + val |= GICR_VPROPBASER_InnerShareable; 1685 + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); 1686 + 1687 + val = virt_to_phys(page_address(vpe->vpt_page)) & 1688 + GENMASK_ULL(51, 16); 1689 + val |= GICR_VPENDBASER_RaWaWb; 1690 + val |= GICR_VPENDBASER_NonShareable; 1691 + /* 1692 + * There is no good way of finding out if the pending table is 1693 + * empty as we can race against the doorbell interrupt very 1694 + * easily. So in the end, vpe->pending_last is only an 1695 + * indication that the vcpu has something pending, not one 1696 + * that the pending table is empty. A good implementation 1697 + * would be able to read its coarse map pretty quickly anyway, 1698 + * making this a tolerable issue. 1699 + */ 1700 + val |= GICR_VPENDBASER_PendingLast; 1701 + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; 1702 + val |= GICR_VPENDBASER_Valid; 1703 + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 1704 + } 1705 + 1706 + static void its_vpe_deschedule(struct its_vpe *vpe) 1707 + { 1708 + void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 1709 + u32 count = 1000000; /* 1s! */ 1710 + bool clean; 1711 + u64 val; 1712 + 1713 + /* We're being scheduled out */ 1714 + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 1715 + val &= ~GICR_VPENDBASER_Valid; 1716 + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); 1717 + 1718 + do { 1719 + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); 1720 + clean = !(val & GICR_VPENDBASER_Dirty); 1721 + if (!clean) { 1722 + count--; 1723 + cpu_relax(); 1724 + udelay(1); 1725 + } 1726 + } while (!clean && count); 1727 + 1728 + if (unlikely(!clean && !count)) { 1729 + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); 1730 + vpe->idai = false; 1731 + vpe->pending_last = true; 1732 + } else { 1733 + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); 1734 + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); 1735 + } 1736 + } 1737 + 1738 + static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) 1739 + { 1740 + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 1741 + struct its_cmd_info *info = vcpu_info; 1742 + 1743 + switch (info->cmd_type) { 1744 + case SCHEDULE_VPE: 1745 + its_vpe_schedule(vpe); 1746 + return 0; 1747 + 1748 + case DESCHEDULE_VPE: 1749 + its_vpe_deschedule(vpe); 1750 + return 0; 1751 + 1752 + case INVALL_VPE: 1753 + its_send_vinvall(vpe); 1754 + return 0; 1755 + 1756 + default: 1757 + return -EINVAL; 1758 + } 1759 + } 1760 + 1761 + static void its_vpe_send_cmd(struct its_vpe *vpe, 1762 + void (*cmd)(struct its_device *, u32)) 1763 + { 1764 + unsigned long flags; 1765 + 1766 + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); 1767 + 1768 + its_vpe_db_proxy_map_locked(vpe); 1769 + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); 1770 + 1771 + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); 1772 + } 1773 + 1774 + static void its_vpe_send_inv(struct irq_data *d) 1775 + { 1776 + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 1777 + 1778 + if (gic_rdists->has_direct_lpi) { 1779 + void __iomem *rdbase; 1780 + 1781 + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 1782 + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR); 1783 + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 1784 + cpu_relax(); 1785 + } else { 1786 + its_vpe_send_cmd(vpe, its_send_inv); 1787 + } 1788 + } 1789 + 1790 + static void its_vpe_mask_irq(struct irq_data *d) 1791 + { 1792 + /* 1793 + * We need to unmask the LPI, which is described by the parent 1794 + * irq_data. Instead of calling into the parent (which won't 1795 + * exactly do the right thing, let's simply use the 1796 + * parent_data pointer. Yes, I'm naughty. 1797 + */ 1798 + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); 1799 + its_vpe_send_inv(d); 1800 + } 1801 + 1802 + static void its_vpe_unmask_irq(struct irq_data *d) 1803 + { 1804 + /* Same hack as above... */ 1805 + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); 1806 + its_vpe_send_inv(d); 1807 + } 1808 + 1809 + static int its_vpe_set_irqchip_state(struct irq_data *d, 1810 + enum irqchip_irq_state which, 1811 + bool state) 1812 + { 1813 + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 1814 + 1815 + if (which != IRQCHIP_STATE_PENDING) 1816 + return -EINVAL; 1817 + 1818 + if (gic_rdists->has_direct_lpi) { 1819 + void __iomem *rdbase; 1820 + 1821 + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 1822 + if (state) { 1823 + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); 1824 + } else { 1825 + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); 1826 + while (gic_read_lpir(rdbase + GICR_SYNCR) & 1) 1827 + cpu_relax(); 1828 + } 1829 + } else { 1830 + if (state) 1831 + its_vpe_send_cmd(vpe, its_send_int); 1832 + else 1833 + its_vpe_send_cmd(vpe, its_send_clear); 1834 + } 1835 + 1836 + return 0; 1837 + } 1838 + 1839 + static struct irq_chip its_vpe_irq_chip = { 1840 + .name = "GICv4-vpe", 1841 + .irq_mask = its_vpe_mask_irq, 1842 + .irq_unmask = its_vpe_unmask_irq, 1843 + .irq_eoi = irq_chip_eoi_parent, 1844 + .irq_set_affinity = its_vpe_set_affinity, 1845 + .irq_set_irqchip_state = its_vpe_set_irqchip_state, 1846 + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, 1847 + }; 1848 + 1849 + static int its_vpe_id_alloc(void) 1850 + { 1851 + return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); 1852 + } 1853 + 1854 + static void its_vpe_id_free(u16 id) 1855 + { 1856 + ida_simple_remove(&its_vpeid_ida, id); 1857 + } 1858 + 1859 + static int its_vpe_init(struct its_vpe *vpe) 1860 + { 1861 + struct page *vpt_page; 1862 + int vpe_id; 1863 + 1864 + /* Allocate vpe_id */ 1865 + vpe_id = its_vpe_id_alloc(); 1866 + if (vpe_id < 0) 1867 + return vpe_id; 1868 + 1869 + /* Allocate VPT */ 1870 + vpt_page = its_allocate_pending_table(GFP_KERNEL); 1871 + if (!vpt_page) { 1872 + its_vpe_id_free(vpe_id); 1873 + return -ENOMEM; 1874 + } 1875 + 1876 + if (!its_alloc_vpe_table(vpe_id)) { 1877 + its_vpe_id_free(vpe_id); 1878 + its_free_pending_table(vpe->vpt_page); 1879 + return -ENOMEM; 1880 + } 1881 + 1882 + vpe->vpe_id = vpe_id; 1883 + vpe->vpt_page = vpt_page; 1884 + vpe->vpe_proxy_event = -1; 1885 + 1886 + return 0; 1887 + } 1888 + 1889 + static void its_vpe_teardown(struct its_vpe *vpe) 1890 + { 1891 + its_vpe_db_proxy_unmap(vpe); 1892 + its_vpe_id_free(vpe->vpe_id); 1893 + its_free_pending_table(vpe->vpt_page); 1894 + } 1895 + 1896 + static void its_vpe_irq_domain_free(struct irq_domain *domain, 1897 + unsigned int virq, 1898 + unsigned int nr_irqs) 1899 + { 1900 + struct its_vm *vm = domain->host_data; 1901 + int i; 1902 + 1903 + irq_domain_free_irqs_parent(domain, virq, nr_irqs); 1904 + 1905 + for (i = 0; i < nr_irqs; i++) { 1906 + struct irq_data *data = irq_domain_get_irq_data(domain, 1907 + virq + i); 1908 + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); 1909 + 1910 + BUG_ON(vm != vpe->its_vm); 1911 + 1912 + clear_bit(data->hwirq, vm->db_bitmap); 1913 + its_vpe_teardown(vpe); 1914 + irq_domain_reset_irq_data(data); 1915 + } 1916 + 1917 + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { 1918 + its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); 1919 + its_free_prop_table(vm->vprop_page); 1920 + } 1921 + } 1922 + 1923 + static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1924 + unsigned int nr_irqs, void *args) 1925 + { 1926 + struct its_vm *vm = args; 1927 + unsigned long *bitmap; 1928 + struct page *vprop_page; 1929 + int base, nr_ids, i, err = 0; 1930 + 1931 + BUG_ON(!vm); 1932 + 1933 + bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids); 1934 + if (!bitmap) 1935 + return -ENOMEM; 1936 + 1937 + if (nr_ids < nr_irqs) { 1938 + its_lpi_free_chunks(bitmap, base, nr_ids); 1939 + return -ENOMEM; 1940 + } 1941 + 1942 + vprop_page = its_allocate_prop_table(GFP_KERNEL); 1943 + if (!vprop_page) { 1944 + its_lpi_free_chunks(bitmap, base, nr_ids); 1945 + return -ENOMEM; 1946 + } 1947 + 1948 + vm->db_bitmap = bitmap; 1949 + vm->db_lpi_base = base; 1950 + vm->nr_db_lpis = nr_ids; 1951 + vm->vprop_page = vprop_page; 1952 + 1953 + for (i = 0; i < nr_irqs; i++) { 1954 + vm->vpes[i]->vpe_db_lpi = base + i; 1955 + err = its_vpe_init(vm->vpes[i]); 1956 + if (err) 1957 + break; 1958 + err = its_irq_gic_domain_alloc(domain, virq + i, 1959 + vm->vpes[i]->vpe_db_lpi); 1960 + if (err) 1961 + break; 1962 + irq_domain_set_hwirq_and_chip(domain, virq + i, i, 1963 + &its_vpe_irq_chip, vm->vpes[i]); 1964 + set_bit(i, bitmap); 1965 + } 1966 + 1967 + if (err) { 1968 + if (i > 0) 1969 + its_vpe_irq_domain_free(domain, virq, i - 1); 1970 + 1971 + its_lpi_free_chunks(bitmap, base, nr_ids); 1972 + its_free_prop_table(vprop_page); 1973 + } 1974 + 1975 + return err; 1976 + } 1977 + 1978 + static void its_vpe_irq_domain_activate(struct irq_domain *domain, 1979 + struct irq_data *d) 1980 + { 1981 + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 1982 + 1983 + /* Map the VPE to the first possible CPU */ 1984 + vpe->col_idx = cpumask_first(cpu_online_mask); 1985 + its_send_vmapp(vpe, true); 1986 + its_send_vinvall(vpe); 1987 + } 1988 + 1989 + static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, 1990 + struct irq_data *d) 1991 + { 1992 + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 1993 + 1994 + its_send_vmapp(vpe, false); 1995 + } 1996 + 1997 + static const struct irq_domain_ops its_vpe_domain_ops = { 1998 + .alloc = its_vpe_irq_domain_alloc, 1999 + .free = its_vpe_irq_domain_free, 2000 + .activate = its_vpe_irq_domain_activate, 2001 + .deactivate = its_vpe_irq_domain_deactivate, 2260 2002 }; 2261 2003 2262 2004 static int its_force_quiescent(void __iomem *base) ··· 2719 1571 return 0; 2720 1572 2721 1573 /* Disable the generation of all interrupts to this ITS */ 2722 - val &= ~GITS_CTLR_ENABLE; 1574 + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); 2723 1575 writel_relaxed(val, base + GITS_CTLR); 2724 1576 2725 1577 /* Poll GITS_CTLR and wait until ITS becomes quiescent */ ··· 2820 1672 return 0; 2821 1673 } 2822 1674 1675 + static int its_init_vpe_domain(void) 1676 + { 1677 + struct its_node *its; 1678 + u32 devid; 1679 + int entries; 1680 + 1681 + if (gic_rdists->has_direct_lpi) { 1682 + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); 1683 + return 0; 1684 + } 1685 + 1686 + /* Any ITS will do, even if not v4 */ 1687 + its = list_first_entry(&its_nodes, struct its_node, entry); 1688 + 1689 + entries = roundup_pow_of_two(nr_cpu_ids); 1690 + vpe_proxy.vpes = kzalloc(sizeof(*vpe_proxy.vpes) * entries, 1691 + GFP_KERNEL); 1692 + if (!vpe_proxy.vpes) { 1693 + pr_err("ITS: Can't allocate GICv4 proxy device array\n"); 1694 + return -ENOMEM; 1695 + } 1696 + 1697 + /* Use the last possible DevID */ 1698 + devid = GENMASK(its->device_ids - 1, 0); 1699 + vpe_proxy.dev = its_create_device(its, devid, entries, false); 1700 + if (!vpe_proxy.dev) { 1701 + kfree(vpe_proxy.vpes); 1702 + pr_err("ITS: Can't allocate GICv4 proxy device\n"); 1703 + return -ENOMEM; 1704 + } 1705 + 1706 + BUG_ON(entries != vpe_proxy.dev->nr_ites); 1707 + 1708 + raw_spin_lock_init(&vpe_proxy.lock); 1709 + vpe_proxy.next_victim = 0; 1710 + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", 1711 + devid, vpe_proxy.dev->nr_ites); 1712 + 1713 + return 0; 1714 + } 1715 + 1716 + static int __init its_compute_its_list_map(struct resource *res, 1717 + void __iomem *its_base) 1718 + { 1719 + int its_number; 1720 + u32 ctlr; 1721 + 1722 + /* 1723 + * This is assumed to be done early enough that we're 1724 + * guaranteed to be single-threaded, hence no 1725 + * locking. Should this change, we should address 1726 + * this. 1727 + */ 1728 + its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX); 1729 + if (its_number >= ITS_LIST_MAX) { 1730 + pr_err("ITS@%pa: No ITSList entry available!\n", 1731 + &res->start); 1732 + return -EINVAL; 1733 + } 1734 + 1735 + ctlr = readl_relaxed(its_base + GITS_CTLR); 1736 + ctlr &= ~GITS_CTLR_ITS_NUMBER; 1737 + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; 1738 + writel_relaxed(ctlr, its_base + GITS_CTLR); 1739 + ctlr = readl_relaxed(its_base + GITS_CTLR); 1740 + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { 1741 + its_number = ctlr & GITS_CTLR_ITS_NUMBER; 1742 + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; 1743 + } 1744 + 1745 + if (test_and_set_bit(its_number, &its_list_map)) { 1746 + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", 1747 + &res->start, its_number); 1748 + return -EINVAL; 1749 + } 1750 + 1751 + return its_number; 1752 + } 1753 + 2823 1754 static int __init its_probe_one(struct resource *res, 2824 1755 struct fwnode_handle *handle, int numa_node) 2825 1756 { 2826 1757 struct its_node *its; 2827 1758 void __iomem *its_base; 2828 - u32 val; 2829 - u64 baser, tmp; 1759 + u32 val, ctlr; 1760 + u64 baser, tmp, typer; 2830 1761 int err; 2831 1762 2832 1763 its_base = ioremap(res->start, resource_size(res)); ··· 2938 1711 raw_spin_lock_init(&its->lock); 2939 1712 INIT_LIST_HEAD(&its->entry); 2940 1713 INIT_LIST_HEAD(&its->its_device_list); 1714 + typer = gic_read_typer(its_base + GITS_TYPER); 2941 1715 its->base = its_base; 2942 1716 its->phys_base = res->start; 2943 - its->ite_size = ((gic_read_typer(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1717 + its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); 1718 + its->is_v4 = !!(typer & GITS_TYPER_VLPIS); 1719 + if (its->is_v4) { 1720 + if (!(typer & GITS_TYPER_VMOVP)) { 1721 + err = its_compute_its_list_map(res, its_base); 1722 + if (err < 0) 1723 + goto out_free_its; 1724 + 1725 + pr_info("ITS@%pa: Using ITS number %d\n", 1726 + &res->start, err); 1727 + } else { 1728 + pr_info("ITS@%pa: Single VMOVP capable\n", &res->start); 1729 + } 1730 + } 1731 + 2944 1732 its->numa_node = numa_node; 2945 1733 2946 1734 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, ··· 3002 1760 } 3003 1761 3004 1762 gits_write_cwriter(0, its->base + GITS_CWRITER); 3005 - writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1763 + ctlr = readl_relaxed(its->base + GITS_CTLR); 1764 + ctlr |= GITS_CTLR_ENABLE; 1765 + if (its->is_v4) 1766 + ctlr |= GITS_CTLR_ImDe; 1767 + writel_relaxed(ctlr, its->base + GITS_CTLR); 3006 1768 3007 1769 err = its_init_domain(handle, its); 3008 1770 if (err) ··· 3062 1816 for (np = of_find_matching_node(node, its_device_id); np; 3063 1817 np = of_find_matching_node(np, its_device_id)) { 3064 1818 if (!of_property_read_bool(np, "msi-controller")) { 3065 - pr_warn("%s: no msi-controller property, ITS ignored\n", 3066 - np->full_name); 1819 + pr_warn("%pOF: no msi-controller property, ITS ignored\n", 1820 + np); 3067 1821 continue; 3068 1822 } 3069 1823 3070 1824 if (of_address_to_resource(np, 0, &res)) { 3071 - pr_warn("%s: no regs?\n", np->full_name); 1825 + pr_warn("%pOF: no regs?\n", np); 3072 1826 continue; 3073 1827 } 3074 1828 ··· 3230 1984 struct irq_domain *parent_domain) 3231 1985 { 3232 1986 struct device_node *of_node; 1987 + struct its_node *its; 1988 + bool has_v4 = false; 1989 + int err; 3233 1990 3234 1991 its_parent = parent_domain; 3235 1992 of_node = to_of_node(handle); ··· 3247 1998 } 3248 1999 3249 2000 gic_rdists = rdists; 3250 - return its_alloc_lpi_tables(); 2001 + err = its_alloc_lpi_tables(); 2002 + if (err) 2003 + return err; 2004 + 2005 + list_for_each_entry(its, &its_nodes, entry) 2006 + has_v4 |= its->is_v4; 2007 + 2008 + if (has_v4 & rdists->has_vlpis) { 2009 + if (its_init_vpe_domain() || 2010 + its_init_v4(parent_domain, &its_vpe_domain_ops)) { 2011 + rdists->has_vlpis = false; 2012 + pr_err("ITS: Disabling GICv4 support\n"); 2013 + } 2014 + } 2015 + 2016 + return 0; 3251 2017 }
+79 -30
drivers/irqchip/irq-gic-v3.c
··· 1 1 /* 2 - * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 2 + * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 3 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify ··· 423 423 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 424 424 } 425 425 426 - static int gic_populate_rdist(void) 426 + static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) 427 427 { 428 - unsigned long mpidr = cpu_logical_map(smp_processor_id()); 429 - u64 typer; 430 - u32 aff; 428 + int ret = -ENODEV; 431 429 int i; 432 - 433 - /* 434 - * Convert affinity to a 32bit value that can be matched to 435 - * GICR_TYPER bits [63:32]. 436 - */ 437 - aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 438 - MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 439 - MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 440 - MPIDR_AFFINITY_LEVEL(mpidr, 0)); 441 430 442 431 for (i = 0; i < gic_data.nr_redist_regions; i++) { 443 432 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 433 + u64 typer; 444 434 u32 reg; 445 435 446 436 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; ··· 442 452 443 453 do { 444 454 typer = gic_read_typer(ptr + GICR_TYPER); 445 - if ((typer >> 32) == aff) { 446 - u64 offset = ptr - gic_data.redist_regions[i].redist_base; 447 - gic_data_rdist_rd_base() = ptr; 448 - gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; 449 - pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 450 - smp_processor_id(), mpidr, i, 451 - &gic_data_rdist()->phys_base); 455 + ret = fn(gic_data.redist_regions + i, ptr); 456 + if (!ret) 452 457 return 0; 453 - } 454 458 455 459 if (gic_data.redist_regions[i].single_redist) 456 460 break; ··· 459 475 } while (!(typer & GICR_TYPER_LAST)); 460 476 } 461 477 478 + return ret ? -ENODEV : 0; 479 + } 480 + 481 + static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) 482 + { 483 + unsigned long mpidr = cpu_logical_map(smp_processor_id()); 484 + u64 typer; 485 + u32 aff; 486 + 487 + /* 488 + * Convert affinity to a 32bit value that can be matched to 489 + * GICR_TYPER bits [63:32]. 490 + */ 491 + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 492 + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 493 + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 494 + MPIDR_AFFINITY_LEVEL(mpidr, 0)); 495 + 496 + typer = gic_read_typer(ptr + GICR_TYPER); 497 + if ((typer >> 32) == aff) { 498 + u64 offset = ptr - region->redist_base; 499 + gic_data_rdist_rd_base() = ptr; 500 + gic_data_rdist()->phys_base = region->phys_base + offset; 501 + 502 + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 503 + smp_processor_id(), mpidr, 504 + (int)(region - gic_data.redist_regions), 505 + &gic_data_rdist()->phys_base); 506 + return 0; 507 + } 508 + 509 + /* Try next one */ 510 + return 1; 511 + } 512 + 513 + static int gic_populate_rdist(void) 514 + { 515 + if (gic_iterate_rdists(__gic_populate_rdist) == 0) 516 + return 0; 517 + 462 518 /* We couldn't even deal with ourselves... */ 463 519 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 464 - smp_processor_id(), mpidr); 520 + smp_processor_id(), 521 + (unsigned long)cpu_logical_map(smp_processor_id())); 465 522 return -ENODEV; 523 + } 524 + 525 + static int __gic_update_vlpi_properties(struct redist_region *region, 526 + void __iomem *ptr) 527 + { 528 + u64 typer = gic_read_typer(ptr + GICR_TYPER); 529 + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); 530 + gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); 531 + 532 + return 1; 533 + } 534 + 535 + static void gic_update_vlpi_properties(void) 536 + { 537 + gic_iterate_rdists(__gic_update_vlpi_properties); 538 + pr_info("%sVLPI support, %sdirect LPI support\n", 539 + !gic_data.rdists.has_vlpis ? "no " : "", 540 + !gic_data.rdists.has_direct_lpi ? "no " : ""); 466 541 } 467 542 468 543 static void gic_cpu_sys_reg_init(void) ··· 720 677 else 721 678 gic_dist_wait_for_rwp(); 722 679 680 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 681 + 723 682 return IRQ_SET_MASK_OK_DONE; 724 683 } 725 684 #else ··· 820 775 irq_domain_set_info(d, irq, hw, chip, d->host_data, 821 776 handle_fasteoi_irq, NULL, NULL); 822 777 irq_set_probe(irq); 778 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 823 779 } 824 780 /* LPIs */ 825 781 if (hw >= 8192 && hw < GIC_ID_NR) { ··· 999 953 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 1000 954 &gic_data); 1001 955 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 956 + gic_data.rdists.has_vlpis = true; 957 + gic_data.rdists.has_direct_lpi = true; 1002 958 1003 959 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 1004 960 err = -ENOMEM; ··· 1008 960 } 1009 961 1010 962 set_handle_irq(gic_handle_irq); 963 + 964 + gic_update_vlpi_properties(); 1011 965 1012 966 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) 1013 967 its_init(handle, &gic_data.rdists, gic_data.domain); ··· 1117 1067 if (WARN_ON(cpu == -1)) 1118 1068 continue; 1119 1069 1120 - pr_cont("%s[%d] ", cpu_node->full_name, cpu); 1070 + pr_cont("%pOF[%d] ", cpu_node, cpu); 1121 1071 1122 1072 cpumask_set_cpu(cpu, &part->mask); 1123 1073 } ··· 1172 1122 if (!ret) 1173 1123 gic_v3_kvm_info.vcpu = r; 1174 1124 1125 + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 1175 1126 gic_set_kvm_info(&gic_v3_kvm_info); 1176 1127 } 1177 1128 ··· 1186 1135 1187 1136 dist_base = of_iomap(node, 0); 1188 1137 if (!dist_base) { 1189 - pr_err("%s: unable to map gic dist registers\n", 1190 - node->full_name); 1138 + pr_err("%pOF: unable to map gic dist registers\n", node); 1191 1139 return -ENXIO; 1192 1140 } 1193 1141 1194 1142 err = gic_validate_dist_version(dist_base); 1195 1143 if (err) { 1196 - pr_err("%s: no distributor detected, giving up\n", 1197 - node->full_name); 1144 + pr_err("%pOF: no distributor detected, giving up\n", node); 1198 1145 goto out_unmap_dist; 1199 1146 } 1200 1147 ··· 1212 1163 ret = of_address_to_resource(node, 1 + i, &res); 1213 1164 rdist_regs[i].redist_base = of_iomap(node, 1 + i); 1214 1165 if (ret || !rdist_regs[i].redist_base) { 1215 - pr_err("%s: couldn't map region %d\n", 1216 - node->full_name, i); 1166 + pr_err("%pOF: couldn't map region %d\n", node, i); 1217 1167 err = -ENODEV; 1218 1168 goto out_unmap_rdist; 1219 1169 } ··· 1466 1418 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 1467 1419 } 1468 1420 1421 + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 1469 1422 gic_set_kvm_info(&gic_v3_kvm_info); 1470 1423 } 1471 1424
+225
drivers/irqchip/irq-gic-v4.c
··· 1 + /* 2 + * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #include <linux/interrupt.h> 19 + #include <linux/irq.h> 20 + #include <linux/irqdomain.h> 21 + #include <linux/msi.h> 22 + #include <linux/sched.h> 23 + 24 + #include <linux/irqchip/arm-gic-v4.h> 25 + 26 + /* 27 + * WARNING: The blurb below assumes that you understand the 28 + * intricacies of GICv3, GICv4, and how a guest's view of a GICv3 gets 29 + * translated into GICv4 commands. So it effectively targets at most 30 + * two individuals. You know who you are. 31 + * 32 + * The core GICv4 code is designed to *avoid* exposing too much of the 33 + * core GIC code (that would in turn leak into the hypervisor code), 34 + * and instead provide a hypervisor agnostic interface to the HW (of 35 + * course, the astute reader will quickly realize that hypervisor 36 + * agnostic actually means KVM-specific - what were you thinking?). 37 + * 38 + * In order to achieve a modicum of isolation, we try to hide most of 39 + * the GICv4 "stuff" behind normal irqchip operations: 40 + * 41 + * - Any guest-visible VLPI is backed by a Linux interrupt (and a 42 + * physical LPI which gets unmapped when the guest maps the 43 + * VLPI). This allows the same DevID/EventID pair to be either 44 + * mapped to the LPI (host) or the VLPI (guest). Note that this is 45 + * exclusive, and you cannot have both. 46 + * 47 + * - Enabling/disabling a VLPI is done by issuing mask/unmask calls. 48 + * 49 + * - Guest INT/CLEAR commands are implemented through 50 + * irq_set_irqchip_state(). 51 + * 52 + * - The *bizarre* stuff (mapping/unmapping an interrupt to a VLPI, or 53 + * issuing an INV after changing a priority) gets shoved into the 54 + * irq_set_vcpu_affinity() method. While this is quite horrible 55 + * (let's face it, this is the irqchip version of an ioctl), it 56 + * confines the crap to a single location. And map/unmap really is 57 + * about setting the affinity of a VLPI to a vcpu, so only INV is 58 + * majorly out of place. So there. 59 + * 60 + * A number of commands are simply not provided by this interface, as 61 + * they do not make direct sense. For example, MAPD is purely local to 62 + * the virtual ITS (because it references a virtual device, and the 63 + * physical ITS is still very much in charge of the physical 64 + * device). Same goes for things like MAPC (the physical ITS deals 65 + * with the actual vPE affinity, and not the braindead concept of 66 + * collection). SYNC is not provided either, as each and every command 67 + * is followed by a VSYNC. This could be relaxed in the future, should 68 + * this be seen as a bottleneck (yes, this means *never*). 69 + * 70 + * But handling VLPIs is only one side of the job of the GICv4 71 + * code. The other (darker) side is to take care of the doorbell 72 + * interrupts which are delivered when a VLPI targeting a non-running 73 + * vcpu is being made pending. 74 + * 75 + * The choice made here is that each vcpu (VPE in old northern GICv4 76 + * dialect) gets a single doorbell LPI, no matter how many interrupts 77 + * are targeting it. This has a nice property, which is that the 78 + * interrupt becomes a handle for the VPE, and that the hypervisor 79 + * code can manipulate it through the normal interrupt API: 80 + * 81 + * - VMs (or rather the VM abstraction that matters to the GIC) 82 + * contain an irq domain where each interrupt maps to a VPE. In 83 + * turn, this domain sits on top of the normal LPI allocator, and a 84 + * specially crafted irq_chip implementation. 85 + * 86 + * - mask/unmask do what is expected on the doorbell interrupt. 87 + * 88 + * - irq_set_affinity is used to move a VPE from one redistributor to 89 + * another. 90 + * 91 + * - irq_set_vcpu_affinity once again gets hijacked for the purpose of 92 + * creating a new sub-API, namely scheduling/descheduling a VPE 93 + * (which involves programming GICR_V{PROP,PEND}BASER) and 94 + * performing INVALL operations. 95 + */ 96 + 97 + static struct irq_domain *gic_domain; 98 + static const struct irq_domain_ops *vpe_domain_ops; 99 + 100 + int its_alloc_vcpu_irqs(struct its_vm *vm) 101 + { 102 + int vpe_base_irq, i; 103 + 104 + vm->fwnode = irq_domain_alloc_named_id_fwnode("GICv4-vpe", 105 + task_pid_nr(current)); 106 + if (!vm->fwnode) 107 + goto err; 108 + 109 + vm->domain = irq_domain_create_hierarchy(gic_domain, 0, vm->nr_vpes, 110 + vm->fwnode, vpe_domain_ops, 111 + vm); 112 + if (!vm->domain) 113 + goto err; 114 + 115 + for (i = 0; i < vm->nr_vpes; i++) { 116 + vm->vpes[i]->its_vm = vm; 117 + vm->vpes[i]->idai = true; 118 + } 119 + 120 + vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes, 121 + NUMA_NO_NODE, vm, 122 + false, NULL); 123 + if (vpe_base_irq <= 0) 124 + goto err; 125 + 126 + for (i = 0; i < vm->nr_vpes; i++) 127 + vm->vpes[i]->irq = vpe_base_irq + i; 128 + 129 + return 0; 130 + 131 + err: 132 + if (vm->domain) 133 + irq_domain_remove(vm->domain); 134 + if (vm->fwnode) 135 + irq_domain_free_fwnode(vm->fwnode); 136 + 137 + return -ENOMEM; 138 + } 139 + 140 + void its_free_vcpu_irqs(struct its_vm *vm) 141 + { 142 + irq_domain_free_irqs(vm->vpes[0]->irq, vm->nr_vpes); 143 + irq_domain_remove(vm->domain); 144 + irq_domain_free_fwnode(vm->fwnode); 145 + } 146 + 147 + static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info) 148 + { 149 + return irq_set_vcpu_affinity(vpe->irq, info); 150 + } 151 + 152 + int its_schedule_vpe(struct its_vpe *vpe, bool on) 153 + { 154 + struct its_cmd_info info; 155 + 156 + WARN_ON(preemptible()); 157 + 158 + info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE; 159 + 160 + return its_send_vpe_cmd(vpe, &info); 161 + } 162 + 163 + int its_invall_vpe(struct its_vpe *vpe) 164 + { 165 + struct its_cmd_info info = { 166 + .cmd_type = INVALL_VPE, 167 + }; 168 + 169 + return its_send_vpe_cmd(vpe, &info); 170 + } 171 + 172 + int its_map_vlpi(int irq, struct its_vlpi_map *map) 173 + { 174 + struct its_cmd_info info = { 175 + .cmd_type = MAP_VLPI, 176 + .map = map, 177 + }; 178 + 179 + /* 180 + * The host will never see that interrupt firing again, so it 181 + * is vital that we don't do any lazy masking. 182 + */ 183 + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 184 + 185 + return irq_set_vcpu_affinity(irq, &info); 186 + } 187 + 188 + int its_get_vlpi(int irq, struct its_vlpi_map *map) 189 + { 190 + struct its_cmd_info info = { 191 + .cmd_type = GET_VLPI, 192 + .map = map, 193 + }; 194 + 195 + return irq_set_vcpu_affinity(irq, &info); 196 + } 197 + 198 + int its_unmap_vlpi(int irq) 199 + { 200 + irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY); 201 + return irq_set_vcpu_affinity(irq, NULL); 202 + } 203 + 204 + int its_prop_update_vlpi(int irq, u8 config, bool inv) 205 + { 206 + struct its_cmd_info info = { 207 + .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, 208 + .config = config, 209 + }; 210 + 211 + return irq_set_vcpu_affinity(irq, &info); 212 + } 213 + 214 + int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops) 215 + { 216 + if (domain) { 217 + pr_info("ITS: Enabling GICv4 support\n"); 218 + gic_domain = domain; 219 + vpe_domain_ops = ops; 220 + return 0; 221 + } 222 + 223 + pr_err("ITS: No GICv4 VPE domain allocated\n"); 224 + return -ENODEV; 225 + }
+4 -1
drivers/irqchip/irq-gic.c
··· 344 344 writel_relaxed(val | bit, reg); 345 345 gic_unlock_irqrestore(flags); 346 346 347 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 348 + 347 349 return IRQ_SET_MASK_OK_DONE; 348 350 } 349 351 #endif ··· 415 413 chained_irq_exit(chip, desc); 416 414 } 417 415 418 - static struct irq_chip gic_chip = { 416 + static const struct irq_chip gic_chip = { 419 417 .irq_mask = gic_mask_irq, 420 418 .irq_unmask = gic_unmask_irq, 421 419 .irq_eoi = gic_eoi_irq, ··· 971 969 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 972 970 handle_fasteoi_irq, NULL, NULL); 973 971 irq_set_probe(irq); 972 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 974 973 } 975 974 return 0; 976 975 }
+3
drivers/irqchip/irq-hip04.c
··· 165 165 writel_relaxed(val | bit, reg); 166 166 raw_spin_unlock(&irq_controller_lock); 167 167 168 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 169 + 168 170 return IRQ_SET_MASK_OK; 169 171 } 170 172 #endif ··· 314 312 irq_set_chip_and_handler(irq, &hip04_irq_chip, 315 313 handle_fasteoi_irq); 316 314 irq_set_probe(irq); 315 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 317 316 } 318 317 irq_set_chip_data(irq, d->host_data); 319 318 return 0;
+2 -2
drivers/irqchip/irq-imx-gpcv2.c
··· 214 214 int i; 215 215 216 216 if (!parent) { 217 - pr_err("%s: no parent, giving up\n", node->full_name); 217 + pr_err("%pOF: no parent, giving up\n", node); 218 218 return -ENODEV; 219 219 } 220 220 221 221 parent_domain = irq_find_host(parent); 222 222 if (!parent_domain) { 223 - pr_err("%s: unable to get parent domain\n", node->full_name); 223 + pr_err("%pOF: unable to get parent domain\n", node); 224 224 return -ENXIO; 225 225 } 226 226
+1 -1
drivers/irqchip/irq-lpc32xx.c
··· 191 191 192 192 irqc->base = of_iomap(node, 0); 193 193 if (!irqc->base) { 194 - pr_err("%s: unable to map registers\n", node->full_name); 194 + pr_err("%pOF: unable to map registers\n", node); 195 195 kfree(irqc); 196 196 return -EINVAL; 197 197 }
+222 -34
drivers/irqchip/irq-ls-scfg-msi.c
··· 17 17 #include <linux/irq.h> 18 18 #include <linux/irqchip/chained_irq.h> 19 19 #include <linux/irqdomain.h> 20 + #include <linux/of_irq.h> 20 21 #include <linux/of_pci.h> 21 22 #include <linux/of_platform.h> 22 23 #include <linux/spinlock.h> 23 24 24 - #define MSI_MAX_IRQS 32 25 - #define MSI_IBS_SHIFT 3 26 - #define MSIR 4 25 + #define MSI_IRQS_PER_MSIR 32 26 + #define MSI_MSIR_OFFSET 4 27 + 28 + #define MSI_LS1043V1_1_IRQS_PER_MSIR 8 29 + #define MSI_LS1043V1_1_MSIR_OFFSET 0x10 30 + 31 + struct ls_scfg_msi_cfg { 32 + u32 ibs_shift; /* Shift of interrupt bit select */ 33 + u32 msir_irqs; /* The irq number per MSIR */ 34 + u32 msir_base; /* The base address of MSIR */ 35 + }; 36 + 37 + struct ls_scfg_msir { 38 + struct ls_scfg_msi *msi_data; 39 + unsigned int index; 40 + unsigned int gic_irq; 41 + unsigned int bit_start; 42 + unsigned int bit_end; 43 + unsigned int srs; /* Shared interrupt register select */ 44 + void __iomem *reg; 45 + }; 27 46 28 47 struct ls_scfg_msi { 29 48 spinlock_t lock; ··· 51 32 struct irq_domain *msi_domain; 52 33 void __iomem *regs; 53 34 phys_addr_t msiir_addr; 54 - int irq; 55 - DECLARE_BITMAP(used, MSI_MAX_IRQS); 35 + struct ls_scfg_msi_cfg *cfg; 36 + u32 msir_num; 37 + struct ls_scfg_msir *msir; 38 + u32 irqs_num; 39 + unsigned long *used; 56 40 }; 57 41 58 42 static struct irq_chip ls_scfg_msi_irq_chip = { ··· 71 49 .chip = &ls_scfg_msi_irq_chip, 72 50 }; 73 51 52 + static int msi_affinity_flag = 1; 53 + 54 + static int __init early_parse_ls_scfg_msi(char *p) 55 + { 56 + if (p && strncmp(p, "no-affinity", 11) == 0) 57 + msi_affinity_flag = 0; 58 + else 59 + msi_affinity_flag = 1; 60 + 61 + return 0; 62 + } 63 + early_param("lsmsi", early_parse_ls_scfg_msi); 64 + 74 65 static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) 75 66 { 76 67 struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data); 77 68 78 69 msg->address_hi = upper_32_bits(msi_data->msiir_addr); 79 70 msg->address_lo = lower_32_bits(msi_data->msiir_addr); 80 - msg->data = data->hwirq << MSI_IBS_SHIFT; 71 + msg->data = data->hwirq; 72 + 73 + if (msi_affinity_flag) 74 + msg->data |= cpumask_first(data->common->affinity); 81 75 } 82 76 83 77 static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, 84 78 const struct cpumask *mask, bool force) 85 79 { 86 - return -EINVAL; 80 + struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data); 81 + u32 cpu; 82 + 83 + if (!msi_affinity_flag) 84 + return -EINVAL; 85 + 86 + if (!force) 87 + cpu = cpumask_any_and(mask, cpu_online_mask); 88 + else 89 + cpu = cpumask_first(mask); 90 + 91 + if (cpu >= msi_data->msir_num) 92 + return -EINVAL; 93 + 94 + if (msi_data->msir[cpu].gic_irq <= 0) { 95 + pr_warn("cannot bind the irq to cpu%d\n", cpu); 96 + return -EINVAL; 97 + } 98 + 99 + cpumask_copy(irq_data->common->affinity, mask); 100 + 101 + return IRQ_SET_MASK_OK; 87 102 } 88 103 89 104 static struct irq_chip ls_scfg_msi_parent_chip = { ··· 140 81 WARN_ON(nr_irqs != 1); 141 82 142 83 spin_lock(&msi_data->lock); 143 - pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS); 144 - if (pos < MSI_MAX_IRQS) 84 + pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num); 85 + if (pos < msi_data->irqs_num) 145 86 __set_bit(pos, msi_data->used); 146 87 else 147 88 err = -ENOSPC; ··· 165 106 int pos; 166 107 167 108 pos = d->hwirq; 168 - if (pos < 0 || pos >= MSI_MAX_IRQS) { 109 + if (pos < 0 || pos >= msi_data->irqs_num) { 169 110 pr_err("failed to teardown msi. Invalid hwirq %d\n", pos); 170 111 return; 171 112 } ··· 182 123 183 124 static void ls_scfg_msi_irq_handler(struct irq_desc *desc) 184 125 { 185 - struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc); 126 + struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc); 127 + struct ls_scfg_msi *msi_data = msir->msi_data; 186 128 unsigned long val; 187 - int pos, virq; 129 + int pos, size, virq, hwirq; 188 130 189 131 chained_irq_enter(irq_desc_get_chip(desc), desc); 190 132 191 - val = ioread32be(msi_data->regs + MSIR); 192 - for_each_set_bit(pos, &val, MSI_MAX_IRQS) { 193 - virq = irq_find_mapping(msi_data->parent, (31 - pos)); 133 + val = ioread32be(msir->reg); 134 + 135 + pos = msir->bit_start; 136 + size = msir->bit_end + 1; 137 + 138 + for_each_set_bit_from(pos, &val, size) { 139 + hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) | 140 + msir->srs; 141 + virq = irq_find_mapping(msi_data->parent, hwirq); 194 142 if (virq) 195 143 generic_handle_irq(virq); 196 144 } ··· 209 143 { 210 144 /* Initialize MSI domain parent */ 211 145 msi_data->parent = irq_domain_add_linear(NULL, 212 - MSI_MAX_IRQS, 146 + msi_data->irqs_num, 213 147 &ls_scfg_msi_domain_ops, 214 148 msi_data); 215 149 if (!msi_data->parent) { ··· 230 164 return 0; 231 165 } 232 166 167 + static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index) 168 + { 169 + struct ls_scfg_msir *msir; 170 + int virq, i, hwirq; 171 + 172 + virq = platform_get_irq(msi_data->pdev, index); 173 + if (virq <= 0) 174 + return -ENODEV; 175 + 176 + msir = &msi_data->msir[index]; 177 + msir->index = index; 178 + msir->msi_data = msi_data; 179 + msir->gic_irq = virq; 180 + msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index; 181 + 182 + if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) { 183 + msir->bit_start = 32 - ((msir->index + 1) * 184 + MSI_LS1043V1_1_IRQS_PER_MSIR); 185 + msir->bit_end = msir->bit_start + 186 + MSI_LS1043V1_1_IRQS_PER_MSIR - 1; 187 + } else { 188 + msir->bit_start = 0; 189 + msir->bit_end = msi_data->cfg->msir_irqs - 1; 190 + } 191 + 192 + irq_set_chained_handler_and_data(msir->gic_irq, 193 + ls_scfg_msi_irq_handler, 194 + msir); 195 + 196 + if (msi_affinity_flag) { 197 + /* Associate MSIR interrupt to the cpu */ 198 + irq_set_affinity(msir->gic_irq, get_cpu_mask(index)); 199 + msir->srs = 0; /* This value is determined by the CPU */ 200 + } else 201 + msir->srs = index; 202 + 203 + /* Release the hwirqs corresponding to this MSIR */ 204 + if (!msi_affinity_flag || msir->index == 0) { 205 + for (i = 0; i < msi_data->cfg->msir_irqs; i++) { 206 + hwirq = i << msi_data->cfg->ibs_shift | msir->index; 207 + bitmap_clear(msi_data->used, hwirq, 1); 208 + } 209 + } 210 + 211 + return 0; 212 + } 213 + 214 + static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir) 215 + { 216 + struct ls_scfg_msi *msi_data = msir->msi_data; 217 + int i, hwirq; 218 + 219 + if (msir->gic_irq > 0) 220 + irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL); 221 + 222 + for (i = 0; i < msi_data->cfg->msir_irqs; i++) { 223 + hwirq = i << msi_data->cfg->ibs_shift | msir->index; 224 + bitmap_set(msi_data->used, hwirq, 1); 225 + } 226 + 227 + return 0; 228 + } 229 + 230 + static struct ls_scfg_msi_cfg ls1021_msi_cfg = { 231 + .ibs_shift = 3, 232 + .msir_irqs = MSI_IRQS_PER_MSIR, 233 + .msir_base = MSI_MSIR_OFFSET, 234 + }; 235 + 236 + static struct ls_scfg_msi_cfg ls1046_msi_cfg = { 237 + .ibs_shift = 2, 238 + .msir_irqs = MSI_IRQS_PER_MSIR, 239 + .msir_base = MSI_MSIR_OFFSET, 240 + }; 241 + 242 + static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = { 243 + .ibs_shift = 2, 244 + .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR, 245 + .msir_base = MSI_LS1043V1_1_MSIR_OFFSET, 246 + }; 247 + 248 + static const struct of_device_id ls_scfg_msi_id[] = { 249 + /* The following two misspelled compatibles are obsolete */ 250 + { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg}, 251 + { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg}, 252 + 253 + { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg }, 254 + { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg }, 255 + { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg }, 256 + { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg }, 257 + {}, 258 + }; 259 + MODULE_DEVICE_TABLE(of, ls_scfg_msi_id); 260 + 233 261 static int ls_scfg_msi_probe(struct platform_device *pdev) 234 262 { 263 + const struct of_device_id *match; 235 264 struct ls_scfg_msi *msi_data; 236 265 struct resource *res; 237 - int ret; 266 + int i, ret; 267 + 268 + match = of_match_device(ls_scfg_msi_id, &pdev->dev); 269 + if (!match) 270 + return -ENODEV; 238 271 239 272 msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL); 240 273 if (!msi_data) 241 274 return -ENOMEM; 275 + 276 + msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data; 242 277 243 278 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 244 279 msi_data->regs = devm_ioremap_resource(&pdev->dev, res); ··· 349 182 } 350 183 msi_data->msiir_addr = res->start; 351 184 352 - msi_data->irq = platform_get_irq(pdev, 0); 353 - if (msi_data->irq <= 0) { 354 - dev_err(&pdev->dev, "failed to get MSI irq\n"); 355 - return -ENODEV; 356 - } 357 - 358 185 msi_data->pdev = pdev; 359 186 spin_lock_init(&msi_data->lock); 187 + 188 + msi_data->irqs_num = MSI_IRQS_PER_MSIR * 189 + (1 << msi_data->cfg->ibs_shift); 190 + msi_data->used = devm_kcalloc(&pdev->dev, 191 + BITS_TO_LONGS(msi_data->irqs_num), 192 + sizeof(*msi_data->used), 193 + GFP_KERNEL); 194 + if (!msi_data->used) 195 + return -ENOMEM; 196 + /* 197 + * Reserve all the hwirqs 198 + * The available hwirqs will be released in ls1_msi_setup_hwirq() 199 + */ 200 + bitmap_set(msi_data->used, 0, msi_data->irqs_num); 201 + 202 + msi_data->msir_num = of_irq_count(pdev->dev.of_node); 203 + 204 + if (msi_affinity_flag) { 205 + u32 cpu_num; 206 + 207 + cpu_num = num_possible_cpus(); 208 + if (msi_data->msir_num >= cpu_num) 209 + msi_data->msir_num = cpu_num; 210 + else 211 + msi_affinity_flag = 0; 212 + } 213 + 214 + msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num, 215 + sizeof(*msi_data->msir), 216 + GFP_KERNEL); 217 + if (!msi_data->msir) 218 + return -ENOMEM; 219 + 220 + for (i = 0; i < msi_data->msir_num; i++) 221 + ls_scfg_msi_setup_hwirq(msi_data, i); 360 222 361 223 ret = ls_scfg_msi_domains_init(msi_data); 362 224 if (ret) 363 225 return ret; 364 - 365 - irq_set_chained_handler_and_data(msi_data->irq, 366 - ls_scfg_msi_irq_handler, 367 - msi_data); 368 226 369 227 platform_set_drvdata(pdev, msi_data); 370 228 ··· 399 207 static int ls_scfg_msi_remove(struct platform_device *pdev) 400 208 { 401 209 struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev); 210 + int i; 402 211 403 - irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL); 212 + for (i = 0; i < msi_data->msir_num; i++) 213 + ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]); 404 214 405 215 irq_domain_remove(msi_data->msi_domain); 406 216 irq_domain_remove(msi_data->parent); ··· 411 217 412 218 return 0; 413 219 } 414 - 415 - static const struct of_device_id ls_scfg_msi_id[] = { 416 - { .compatible = "fsl,1s1021a-msi", }, 417 - { .compatible = "fsl,1s1043a-msi", }, 418 - {}, 419 - }; 420 220 421 221 static struct platform_driver ls_scfg_msi_driver = { 422 222 .driver = {
+4
drivers/irqchip/irq-metag-ext.c
··· 518 518 519 519 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr); 520 520 521 + irq_data_update_effective_affinity(data, cpumask_of(cpu)); 522 + 521 523 return 0; 522 524 } 523 525 #else ··· 580 578 else 581 579 irq_set_chip_and_handler(irq, &meta_intc_edge_chip, 582 580 handle_edge_irq); 581 + 582 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 583 583 return 0; 584 584 } 585 585
+7 -3
drivers/irqchip/irq-mips-gic.c
··· 445 445 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 446 446 cpumask_t tmp = CPU_MASK_NONE; 447 447 unsigned long flags; 448 - int i; 448 + int i, cpu; 449 449 450 450 cpumask_and(&tmp, cpumask, cpu_online_mask); 451 451 if (cpumask_empty(&tmp)) 452 452 return -EINVAL; 453 453 454 + cpu = cpumask_first(&tmp); 455 + 454 456 /* Assumption : cpumask refers to a single CPU */ 455 457 spin_lock_irqsave(&gic_lock, flags); 456 458 457 459 /* Re-route this IRQ */ 458 - gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 460 + gic_map_to_vpe(irq, mips_cm_vp_id(cpu)); 459 461 460 462 /* Update the pcpu_masks */ 461 463 for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 462 464 clear_bit(irq, pcpu_masks[i].pcpu_mask); 463 - set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 465 + set_bit(irq, pcpu_masks[cpu].pcpu_mask); 464 466 465 467 cpumask_copy(irq_data_get_affinity_mask(d), cpumask); 468 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 466 469 spin_unlock_irqrestore(&gic_lock, flags); 467 470 468 471 return IRQ_SET_MASK_OK_NOCOPY; ··· 719 716 if (err) 720 717 return err; 721 718 719 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 722 720 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 723 721 } 724 722
+2 -2
drivers/irqchip/irq-mmp.c
··· 181 181 .xlate = mmp_irq_domain_xlate, 182 182 }; 183 183 184 - static struct mmp_intc_conf mmp_conf = { 184 + static const struct mmp_intc_conf mmp_conf = { 185 185 .conf_enable = 0x51, 186 186 .conf_disable = 0x0, 187 187 .conf_mask = 0x7f, 188 188 }; 189 189 190 - static struct mmp_intc_conf mmp2_conf = { 190 + static const struct mmp_intc_conf mmp2_conf = { 191 191 .conf_enable = 0x20, 192 192 .conf_disable = 0x0, 193 193 .conf_mask = 0x7f,
+1 -2
drivers/irqchip/irq-mtk-sysirq.c
··· 178 178 chip_data->intpol_words[i] = size / 4; 179 179 chip_data->intpol_bases[i] = of_iomap(node, i); 180 180 if (ret || !chip_data->intpol_bases[i]) { 181 - pr_err("%s: couldn't map region %d\n", 182 - node->full_name, i); 181 + pr_err("%pOF: couldn't map region %d\n", node, i); 183 182 ret = -ENODEV; 184 183 goto out_free_intpol; 185 184 }
+2 -2
drivers/irqchip/irq-mxs.c
··· 179 179 &icoll_irq_domain_ops, NULL); 180 180 181 181 if (!icoll_domain) 182 - panic("%s: unable to create irq domain", np->full_name); 182 + panic("%pOF: unable to create irq domain", np); 183 183 } 184 184 185 185 static void __iomem * __init icoll_init_iobase(struct device_node *np) ··· 188 188 189 189 icoll_base = of_io_request_and_map(np, 0, np->name); 190 190 if (IS_ERR(icoll_base)) 191 - panic("%s: unable to map resource", np->full_name); 191 + panic("%pOF: unable to map resource", np); 192 192 return icoll_base; 193 193 } 194 194
+4 -4
drivers/irqchip/irq-stm32-exti.c
··· 140 140 141 141 base = of_iomap(node, 0); 142 142 if (!base) { 143 - pr_err("%s: Unable to map registers\n", node->full_name); 143 + pr_err("%pOF: Unable to map registers\n", node); 144 144 return -ENOMEM; 145 145 } 146 146 ··· 149 149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR)); 150 150 writel_relaxed(0, base + EXTI_RTSR); 151 151 152 - pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti); 152 + pr_info("%pOF: %d External IRQs detected\n", node, nr_exti); 153 153 154 154 domain = irq_domain_add_linear(node, nr_exti, 155 155 &irq_exti_domain_ops, NULL); ··· 163 163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", 164 164 handle_edge_irq, clr, 0, 0); 165 165 if (ret) { 166 - pr_err("%s: Could not allocate generic interrupt chip.\n", 167 - node->full_name); 166 + pr_err("%pOF: Could not allocate generic interrupt chip.\n", 167 + node); 168 168 goto out_free_domain; 169 169 } 170 170
+3 -3
drivers/irqchip/irq-sun4i.c
··· 97 97 { 98 98 sun4i_irq_base = of_iomap(node, 0); 99 99 if (!sun4i_irq_base) 100 - panic("%s: unable to map IC registers\n", 101 - node->full_name); 100 + panic("%pOF: unable to map IC registers\n", 101 + node); 102 102 103 103 /* Disable all interrupts */ 104 104 writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0)); ··· 124 124 sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32, 125 125 &sun4i_irq_ops, NULL); 126 126 if (!sun4i_irq_domain) 127 - panic("%s: unable to create IRQ domain\n", node->full_name); 127 + panic("%pOF: unable to create IRQ domain\n", node); 128 128 129 129 set_handle_irq(sun4i_handle_irq); 130 130
+8 -8
drivers/irqchip/irq-tegra.c
··· 291 291 int err; 292 292 293 293 if (!parent) { 294 - pr_err("%s: no parent, giving up\n", node->full_name); 294 + pr_err("%pOF: no parent, giving up\n", node); 295 295 return -ENODEV; 296 296 } 297 297 298 298 parent_domain = irq_find_host(parent); 299 299 if (!parent_domain) { 300 - pr_err("%s: unable to obtain parent domain\n", node->full_name); 300 + pr_err("%pOF: unable to obtain parent domain\n", node); 301 301 return -ENXIO; 302 302 } 303 303 ··· 329 329 } 330 330 331 331 if (!num_ictlrs) { 332 - pr_err("%s: no valid regions, giving up\n", node->full_name); 332 + pr_err("%pOF: no valid regions, giving up\n", node); 333 333 err = -ENOMEM; 334 334 goto out_free; 335 335 } 336 336 337 337 WARN(num_ictlrs != soc->num_ictlrs, 338 - "%s: Found %u interrupt controllers in DT; expected %u.\n", 339 - node->full_name, num_ictlrs, soc->num_ictlrs); 338 + "%pOF: Found %u interrupt controllers in DT; expected %u.\n", 339 + node, num_ictlrs, soc->num_ictlrs); 340 340 341 341 342 342 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, 343 343 node, &tegra_ictlr_domain_ops, 344 344 lic); 345 345 if (!domain) { 346 - pr_err("%s: failed to allocated domain\n", node->full_name); 346 + pr_err("%pOF: failed to allocated domain\n", node); 347 347 err = -ENOMEM; 348 348 goto out_unmap; 349 349 } 350 350 351 351 tegra_ictlr_syscore_init(); 352 352 353 - pr_info("%s: %d interrupts forwarded to %s\n", 354 - node->full_name, num_ictlrs * 32, parent->full_name); 353 + pr_info("%pOF: %d interrupts forwarded to %pOF\n", 354 + node, num_ictlrs * 32, parent); 355 355 356 356 return 0; 357 357
+261
drivers/irqchip/irq-uniphier-aidet.c
··· 1 + /* 2 + * Driver for UniPhier AIDET (ARM Interrupt Detector) 3 + * 4 + * Copyright (C) 2017 Socionext Inc. 5 + * Author: Masahiro Yamada <yamada.masahiro@socionext.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + */ 16 + 17 + #include <linux/bitops.h> 18 + #include <linux/init.h> 19 + #include <linux/irq.h> 20 + #include <linux/irqdomain.h> 21 + #include <linux/kernel.h> 22 + #include <linux/of.h> 23 + #include <linux/of_device.h> 24 + #include <linux/of_irq.h> 25 + #include <linux/platform_device.h> 26 + #include <linux/spinlock.h> 27 + 28 + #define UNIPHIER_AIDET_NR_IRQS 256 29 + 30 + #define UNIPHIER_AIDET_DETCONF 0x04 /* inverter register base */ 31 + 32 + struct uniphier_aidet_priv { 33 + struct irq_domain *domain; 34 + void __iomem *reg_base; 35 + spinlock_t lock; 36 + u32 saved_vals[UNIPHIER_AIDET_NR_IRQS / 32]; 37 + }; 38 + 39 + static void uniphier_aidet_reg_update(struct uniphier_aidet_priv *priv, 40 + unsigned int reg, u32 mask, u32 val) 41 + { 42 + unsigned long flags; 43 + u32 tmp; 44 + 45 + spin_lock_irqsave(&priv->lock, flags); 46 + tmp = readl_relaxed(priv->reg_base + reg); 47 + tmp &= ~mask; 48 + tmp |= mask & val; 49 + writel_relaxed(tmp, priv->reg_base + reg); 50 + spin_unlock_irqrestore(&priv->lock, flags); 51 + } 52 + 53 + static void uniphier_aidet_detconf_update(struct uniphier_aidet_priv *priv, 54 + unsigned long index, unsigned int val) 55 + { 56 + unsigned int reg; 57 + u32 mask; 58 + 59 + reg = UNIPHIER_AIDET_DETCONF + index / 32 * 4; 60 + mask = BIT(index % 32); 61 + 62 + uniphier_aidet_reg_update(priv, reg, mask, val ? mask : 0); 63 + } 64 + 65 + static int uniphier_aidet_irq_set_type(struct irq_data *data, unsigned int type) 66 + { 67 + struct uniphier_aidet_priv *priv = data->chip_data; 68 + unsigned int val; 69 + 70 + /* enable inverter for active low triggers */ 71 + switch (type) { 72 + case IRQ_TYPE_EDGE_RISING: 73 + case IRQ_TYPE_LEVEL_HIGH: 74 + val = 0; 75 + break; 76 + case IRQ_TYPE_EDGE_FALLING: 77 + val = 1; 78 + type = IRQ_TYPE_EDGE_RISING; 79 + break; 80 + case IRQ_TYPE_LEVEL_LOW: 81 + val = 1; 82 + type = IRQ_TYPE_LEVEL_HIGH; 83 + break; 84 + default: 85 + return -EINVAL; 86 + } 87 + 88 + uniphier_aidet_detconf_update(priv, data->hwirq, val); 89 + 90 + return irq_chip_set_type_parent(data, type); 91 + } 92 + 93 + static struct irq_chip uniphier_aidet_irq_chip = { 94 + .name = "AIDET", 95 + .irq_mask = irq_chip_mask_parent, 96 + .irq_unmask = irq_chip_unmask_parent, 97 + .irq_eoi = irq_chip_eoi_parent, 98 + .irq_set_affinity = irq_chip_set_affinity_parent, 99 + .irq_set_type = uniphier_aidet_irq_set_type, 100 + }; 101 + 102 + static int uniphier_aidet_domain_translate(struct irq_domain *domain, 103 + struct irq_fwspec *fwspec, 104 + unsigned long *out_hwirq, 105 + unsigned int *out_type) 106 + { 107 + if (WARN_ON(fwspec->param_count < 2)) 108 + return -EINVAL; 109 + 110 + *out_hwirq = fwspec->param[0]; 111 + *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; 112 + 113 + return 0; 114 + } 115 + 116 + static int uniphier_aidet_domain_alloc(struct irq_domain *domain, 117 + unsigned int virq, unsigned int nr_irqs, 118 + void *arg) 119 + { 120 + struct irq_fwspec parent_fwspec; 121 + irq_hw_number_t hwirq; 122 + unsigned int type; 123 + int ret; 124 + 125 + if (nr_irqs != 1) 126 + return -EINVAL; 127 + 128 + ret = uniphier_aidet_domain_translate(domain, arg, &hwirq, &type); 129 + if (ret) 130 + return ret; 131 + 132 + switch (type) { 133 + case IRQ_TYPE_EDGE_RISING: 134 + case IRQ_TYPE_LEVEL_HIGH: 135 + break; 136 + case IRQ_TYPE_EDGE_FALLING: 137 + type = IRQ_TYPE_EDGE_RISING; 138 + break; 139 + case IRQ_TYPE_LEVEL_LOW: 140 + type = IRQ_TYPE_LEVEL_HIGH; 141 + break; 142 + default: 143 + return -EINVAL; 144 + } 145 + 146 + if (hwirq >= UNIPHIER_AIDET_NR_IRQS) 147 + return -ENXIO; 148 + 149 + ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, 150 + &uniphier_aidet_irq_chip, 151 + domain->host_data); 152 + if (ret) 153 + return ret; 154 + 155 + /* parent is GIC */ 156 + parent_fwspec.fwnode = domain->parent->fwnode; 157 + parent_fwspec.param_count = 3; 158 + parent_fwspec.param[0] = 0; /* SPI */ 159 + parent_fwspec.param[1] = hwirq; 160 + parent_fwspec.param[2] = type; 161 + 162 + return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); 163 + } 164 + 165 + static const struct irq_domain_ops uniphier_aidet_domain_ops = { 166 + .alloc = uniphier_aidet_domain_alloc, 167 + .free = irq_domain_free_irqs_common, 168 + .translate = uniphier_aidet_domain_translate, 169 + }; 170 + 171 + static int uniphier_aidet_probe(struct platform_device *pdev) 172 + { 173 + struct device *dev = &pdev->dev; 174 + struct device_node *parent_np; 175 + struct irq_domain *parent_domain; 176 + struct uniphier_aidet_priv *priv; 177 + struct resource *res; 178 + 179 + parent_np = of_irq_find_parent(dev->of_node); 180 + if (!parent_np) 181 + return -ENXIO; 182 + 183 + parent_domain = irq_find_host(parent_np); 184 + of_node_put(parent_np); 185 + if (!parent_domain) 186 + return -EPROBE_DEFER; 187 + 188 + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 189 + if (!priv) 190 + return -ENOMEM; 191 + 192 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 193 + priv->reg_base = devm_ioremap_resource(dev, res); 194 + if (IS_ERR(priv->reg_base)) 195 + return PTR_ERR(priv->reg_base); 196 + 197 + spin_lock_init(&priv->lock); 198 + 199 + priv->domain = irq_domain_create_hierarchy( 200 + parent_domain, 0, 201 + UNIPHIER_AIDET_NR_IRQS, 202 + of_node_to_fwnode(dev->of_node), 203 + &uniphier_aidet_domain_ops, priv); 204 + if (!priv->domain) 205 + return -ENOMEM; 206 + 207 + platform_set_drvdata(pdev, priv); 208 + 209 + return 0; 210 + } 211 + 212 + static int __maybe_unused uniphier_aidet_suspend(struct device *dev) 213 + { 214 + struct uniphier_aidet_priv *priv = dev_get_drvdata(dev); 215 + int i; 216 + 217 + for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++) 218 + priv->saved_vals[i] = readl_relaxed( 219 + priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4); 220 + 221 + return 0; 222 + } 223 + 224 + static int __maybe_unused uniphier_aidet_resume(struct device *dev) 225 + { 226 + struct uniphier_aidet_priv *priv = dev_get_drvdata(dev); 227 + int i; 228 + 229 + for (i = 0; i < ARRAY_SIZE(priv->saved_vals); i++) 230 + writel_relaxed(priv->saved_vals[i], 231 + priv->reg_base + UNIPHIER_AIDET_DETCONF + i * 4); 232 + 233 + return 0; 234 + } 235 + 236 + static const struct dev_pm_ops uniphier_aidet_pm_ops = { 237 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(uniphier_aidet_suspend, 238 + uniphier_aidet_resume) 239 + }; 240 + 241 + static const struct of_device_id uniphier_aidet_match[] = { 242 + { .compatible = "socionext,uniphier-ld4-aidet" }, 243 + { .compatible = "socionext,uniphier-pro4-aidet" }, 244 + { .compatible = "socionext,uniphier-sld8-aidet" }, 245 + { .compatible = "socionext,uniphier-pro5-aidet" }, 246 + { .compatible = "socionext,uniphier-pxs2-aidet" }, 247 + { .compatible = "socionext,uniphier-ld11-aidet" }, 248 + { .compatible = "socionext,uniphier-ld20-aidet" }, 249 + { .compatible = "socionext,uniphier-pxs3-aidet" }, 250 + { /* sentinel */ } 251 + }; 252 + 253 + static struct platform_driver uniphier_aidet_driver = { 254 + .probe = uniphier_aidet_probe, 255 + .driver = { 256 + .name = "uniphier-aidet", 257 + .of_match_table = uniphier_aidet_match, 258 + .pm = &uniphier_aidet_pm_ops, 259 + }, 260 + }; 261 + builtin_platform_driver(uniphier_aidet_driver);
+2 -2
drivers/irqchip/irq-xilinx-intc.c
··· 186 186 if (irqc->intr_mask >> nr_irq) 187 187 pr_warn("irq-xilinx: mismatch in kind-of-intr param\n"); 188 188 189 - pr_info("irq-xilinx: %s: num_irq=%d, edge=0x%x\n", 190 - intc->full_name, nr_irq, irqc->intr_mask); 189 + pr_info("irq-xilinx: %pOF: num_irq=%d, edge=0x%x\n", 190 + intc, nr_irq, irqc->intr_mask); 191 191 192 192 193 193 /*
+5 -1
drivers/irqchip/irq-xtensa-mx.c
··· 32 32 irq_set_status_flags(irq, IRQ_LEVEL); 33 33 return 0; 34 34 } 35 + irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 35 36 return xtensa_irq_map(d, irq, hw); 36 37 } 37 38 ··· 122 121 static int xtensa_mx_irq_set_affinity(struct irq_data *d, 123 122 const struct cpumask *dest, bool force) 124 123 { 125 - unsigned mask = 1u << cpumask_any_and(dest, cpu_online_mask); 124 + int cpu = cpumask_any_and(dest, cpu_online_mask); 125 + unsigned mask = 1u << cpu; 126 126 127 127 set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE)); 128 + irq_data_update_effective_affinity(d, cpumask_of(cpu)); 129 + 128 130 return 0; 129 131 130 132 }
+6 -1
include/linux/irq.h
··· 568 568 extern int irq_chip_pm_get(struct irq_data *data); 569 569 extern int irq_chip_pm_put(struct irq_data *data); 570 570 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 571 + extern void handle_fasteoi_ack_irq(struct irq_desc *desc); 572 + extern void handle_fasteoi_mask_irq(struct irq_desc *desc); 571 573 extern void irq_chip_enable_parent(struct irq_data *data); 572 574 extern void irq_chip_disable_parent(struct irq_data *data); 573 575 extern void irq_chip_ack_parent(struct irq_data *data); ··· 783 781 static inline 784 782 struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) 785 783 { 786 - return d->common->effective_affinity; 784 + if (!cpumask_empty(d->common->effective_affinity)) 785 + return d->common->effective_affinity; 786 + 787 + return d->common->affinity; 787 788 } 788 789 static inline void irq_data_update_effective_affinity(struct irq_data *d, 789 790 const struct cpumask *m)
+44
include/linux/irq_sim.h
··· 1 + #ifndef _LINUX_IRQ_SIM_H 2 + #define _LINUX_IRQ_SIM_H 3 + /* 4 + * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl> 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms of the GNU General Public License as published by the 8 + * Free Software Foundation; either version 2 of the License, or (at your 9 + * option) any later version. 10 + */ 11 + 12 + #include <linux/irq_work.h> 13 + #include <linux/device.h> 14 + 15 + /* 16 + * Provides a framework for allocating simulated interrupts which can be 17 + * requested like normal irqs and enqueued from process context. 18 + */ 19 + 20 + struct irq_sim_work_ctx { 21 + struct irq_work work; 22 + int irq; 23 + }; 24 + 25 + struct irq_sim_irq_ctx { 26 + int irqnum; 27 + bool enabled; 28 + }; 29 + 30 + struct irq_sim { 31 + struct irq_sim_work_ctx work_ctx; 32 + int irq_base; 33 + unsigned int irq_count; 34 + struct irq_sim_irq_ctx *irqs; 35 + }; 36 + 37 + int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs); 38 + int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, 39 + unsigned int num_irqs); 40 + void irq_sim_fini(struct irq_sim *sim); 41 + void irq_sim_fire(struct irq_sim *sim, unsigned int offset); 42 + int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset); 43 + 44 + #endif /* _LINUX_IRQ_SIM_H */
+2
include/linux/irqchip/arm-gic-common.h
··· 27 27 unsigned int maint_irq; 28 28 /* Virtual control interface */ 29 29 struct resource vctrl; 30 + /* vlpi support */ 31 + bool has_v4; 30 32 }; 31 33 32 34 const struct gic_kvm_info *gic_get_kvm_info(void);
+84
include/linux/irqchip/arm-gic-v3.h
··· 204 204 205 205 #define GICR_TYPER_PLPIS (1U << 0) 206 206 #define GICR_TYPER_VLPIS (1U << 1) 207 + #define GICR_TYPER_DirectLPIS (1U << 3) 207 208 #define GICR_TYPER_LAST (1U << 4) 208 209 209 210 #define GIC_V3_REDIST_SIZE 0x20000 210 211 211 212 #define LPI_PROP_GROUP1 (1 << 1) 212 213 #define LPI_PROP_ENABLED (1 << 0) 214 + 215 + /* 216 + * Re-Distributor registers, offsets from VLPI_base 217 + */ 218 + #define GICR_VPROPBASER 0x0070 219 + 220 + #define GICR_VPROPBASER_IDBITS_MASK 0x1f 221 + 222 + #define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) 223 + #define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) 224 + #define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) 225 + 226 + #define GICR_VPROPBASER_SHAREABILITY_MASK \ 227 + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) 228 + #define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ 229 + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) 230 + #define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ 231 + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) 232 + #define GICR_VPROPBASER_CACHEABILITY_MASK \ 233 + GICR_VPROPBASER_INNER_CACHEABILITY_MASK 234 + 235 + #define GICR_VPROPBASER_InnerShareable \ 236 + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) 237 + 238 + #define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) 239 + #define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) 240 + #define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) 241 + #define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) 242 + #define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) 243 + #define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) 244 + #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) 245 + #define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) 246 + 247 + #define GICR_VPENDBASER 0x0078 248 + 249 + #define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) 250 + #define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) 251 + #define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) 252 + #define GICR_VPENDBASER_SHAREABILITY_MASK \ 253 + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) 254 + #define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ 255 + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) 256 + #define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ 257 + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) 258 + #define GICR_VPENDBASER_CACHEABILITY_MASK \ 259 + GICR_VPENDBASER_INNER_CACHEABILITY_MASK 260 + 261 + #define GICR_VPENDBASER_NonShareable \ 262 + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) 263 + 264 + #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) 265 + #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) 266 + #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) 267 + #define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) 268 + #define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) 269 + #define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) 270 + #define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) 271 + #define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) 272 + 273 + #define GICR_VPENDBASER_Dirty (1ULL << 60) 274 + #define GICR_VPENDBASER_PendingLast (1ULL << 61) 275 + #define GICR_VPENDBASER_IDAI (1ULL << 62) 276 + #define GICR_VPENDBASER_Valid (1ULL << 63) 213 277 214 278 /* 215 279 * ITS registers, offsets from ITS_base ··· 298 234 #define GITS_TRANSLATER 0x10040 299 235 300 236 #define GITS_CTLR_ENABLE (1U << 0) 237 + #define GITS_CTLR_ImDe (1U << 1) 238 + #define GITS_CTLR_ITS_NUMBER_SHIFT 4 239 + #define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) 301 240 #define GITS_CTLR_QUIESCENT (1U << 31) 302 241 303 242 #define GITS_TYPER_PLPIS (1UL << 0) 243 + #define GITS_TYPER_VLPIS (1UL << 1) 304 244 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 245 + #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) 305 246 #define GITS_TYPER_IDBITS_SHIFT 8 306 247 #define GITS_TYPER_DEVBITS_SHIFT 13 307 248 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) 308 249 #define GITS_TYPER_PTA (1UL << 19) 309 250 #define GITS_TYPER_HWCOLLCNT_SHIFT 24 251 + #define GITS_TYPER_VMOVP (1ULL << 37) 310 252 311 253 #define GITS_IIDR_REV_SHIFT 12 312 254 #define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) ··· 410 340 #define GITS_CMD_INT 0x03 411 341 #define GITS_CMD_CLEAR 0x04 412 342 #define GITS_CMD_SYNC 0x05 343 + 344 + /* 345 + * GICv4 ITS specific commands 346 + */ 347 + #define GITS_CMD_GICv4(x) ((x) | 0x20) 348 + #define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) 349 + #define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) 350 + #define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) 351 + #define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) 352 + #define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) 353 + /* VMOVP is the odd one, as it doesn't have a physical counterpart */ 354 + #define GITS_CMD_VMOVP GITS_CMD_GICv4(2) 413 355 414 356 /* 415 357 * ITS error numbers ··· 569 487 struct page *prop_page; 570 488 int id_bits; 571 489 u64 flags; 490 + bool has_vlpis; 491 + bool has_direct_lpi; 572 492 }; 573 493 574 494 struct irq_domain;
+105
include/linux/irqchip/arm-gic-v4.h
··· 1 + /* 2 + * Copyright (C) 2016,2017 ARM Limited, All Rights Reserved. 3 + * Author: Marc Zyngier <marc.zyngier@arm.com> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 + */ 17 + 18 + #ifndef __LINUX_IRQCHIP_ARM_GIC_V4_H 19 + #define __LINUX_IRQCHIP_ARM_GIC_V4_H 20 + 21 + struct its_vpe; 22 + 23 + /* Embedded in kvm.arch */ 24 + struct its_vm { 25 + struct fwnode_handle *fwnode; 26 + struct irq_domain *domain; 27 + struct page *vprop_page; 28 + struct its_vpe **vpes; 29 + int nr_vpes; 30 + irq_hw_number_t db_lpi_base; 31 + unsigned long *db_bitmap; 32 + int nr_db_lpis; 33 + }; 34 + 35 + /* Embedded in kvm_vcpu.arch */ 36 + struct its_vpe { 37 + struct page *vpt_page; 38 + struct its_vm *its_vm; 39 + /* Doorbell interrupt */ 40 + int irq; 41 + irq_hw_number_t vpe_db_lpi; 42 + /* VPE proxy mapping */ 43 + int vpe_proxy_event; 44 + /* 45 + * This collection ID is used to indirect the target 46 + * redistributor for this VPE. The ID itself isn't involved in 47 + * programming of the ITS. 48 + */ 49 + u16 col_idx; 50 + /* Unique (system-wide) VPE identifier */ 51 + u16 vpe_id; 52 + /* Implementation Defined Area Invalid */ 53 + bool idai; 54 + /* Pending VLPIs on schedule out? */ 55 + bool pending_last; 56 + }; 57 + 58 + /* 59 + * struct its_vlpi_map: structure describing the mapping of a 60 + * VLPI. Only to be interpreted in the context of a physical interrupt 61 + * it complements. To be used as the vcpu_info passed to 62 + * irq_set_vcpu_affinity(). 63 + * 64 + * @vm: Pointer to the GICv4 notion of a VM 65 + * @vpe: Pointer to the GICv4 notion of a virtual CPU (VPE) 66 + * @vintid: Virtual LPI number 67 + * @db_enabled: Is the VPE doorbell to be generated? 68 + */ 69 + struct its_vlpi_map { 70 + struct its_vm *vm; 71 + struct its_vpe *vpe; 72 + u32 vintid; 73 + bool db_enabled; 74 + }; 75 + 76 + enum its_vcpu_info_cmd_type { 77 + MAP_VLPI, 78 + GET_VLPI, 79 + PROP_UPDATE_VLPI, 80 + PROP_UPDATE_AND_INV_VLPI, 81 + SCHEDULE_VPE, 82 + DESCHEDULE_VPE, 83 + INVALL_VPE, 84 + }; 85 + 86 + struct its_cmd_info { 87 + enum its_vcpu_info_cmd_type cmd_type; 88 + union { 89 + struct its_vlpi_map *map; 90 + u8 config; 91 + }; 92 + }; 93 + 94 + int its_alloc_vcpu_irqs(struct its_vm *vm); 95 + void its_free_vcpu_irqs(struct its_vm *vm); 96 + int its_schedule_vpe(struct its_vpe *vpe, bool on); 97 + int its_invall_vpe(struct its_vpe *vpe); 98 + int its_map_vlpi(int irq, struct its_vlpi_map *map); 99 + int its_get_vlpi(int irq, struct its_vlpi_map *map); 100 + int its_unmap_vlpi(int irq); 101 + int its_prop_update_vlpi(int irq, u8 config, bool inv); 102 + 103 + int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops); 104 + 105 + #endif
+3
include/linux/irqdomain.h
··· 460 460 extern void irq_domain_free_irqs_top(struct irq_domain *domain, 461 461 unsigned int virq, unsigned int nr_irqs); 462 462 463 + extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg); 464 + extern int irq_domain_pop_irq(struct irq_domain *domain, int virq); 465 + 463 466 extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, 464 467 unsigned int irq_base, 465 468 unsigned int nr_irqs, void *arg);
+9
kernel/irq/Kconfig
··· 63 63 config IRQ_DOMAIN 64 64 bool 65 65 66 + # Support for simulated interrupts 67 + config IRQ_SIM 68 + bool 69 + select IRQ_WORK 70 + 66 71 # Support for hierarchical irq domains 67 72 config IRQ_DOMAIN_HIERARCHY 68 73 bool 69 74 select IRQ_DOMAIN 75 + 76 + # Support for hierarchical fasteoi+edge and fasteoi+level handlers 77 + config IRQ_FASTEOI_HIERARCHY_HANDLERS 78 + bool 70 79 71 80 # Generic IRQ IPI support 72 81 config GENERIC_IRQ_IPI
+1
kernel/irq/Makefile
··· 4 4 obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o 5 5 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 6 6 obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o 7 + obj-$(CONFIG_IRQ_SIM) += irq_sim.o 7 8 obj-$(CONFIG_PROC_FS) += proc.o 8 9 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 9 10 obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
+109
kernel/irq/chip.c
··· 1098 1098 } 1099 1099 1100 1100 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1101 + 1102 + #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1103 + /** 1104 + * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1105 + * stacked on transparent controllers 1106 + * 1107 + * @desc: the interrupt description structure for this irq 1108 + * 1109 + * Like handle_fasteoi_irq(), but for use with hierarchy where 1110 + * the irq_chip also needs to have its ->irq_ack() function 1111 + * called. 1112 + */ 1113 + void handle_fasteoi_ack_irq(struct irq_desc *desc) 1114 + { 1115 + struct irq_chip *chip = desc->irq_data.chip; 1116 + 1117 + raw_spin_lock(&desc->lock); 1118 + 1119 + if (!irq_may_run(desc)) 1120 + goto out; 1121 + 1122 + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1123 + 1124 + /* 1125 + * If its disabled or no action available 1126 + * then mask it and get out of here: 1127 + */ 1128 + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1129 + desc->istate |= IRQS_PENDING; 1130 + mask_irq(desc); 1131 + goto out; 1132 + } 1133 + 1134 + kstat_incr_irqs_this_cpu(desc); 1135 + if (desc->istate & IRQS_ONESHOT) 1136 + mask_irq(desc); 1137 + 1138 + /* Start handling the irq */ 1139 + desc->irq_data.chip->irq_ack(&desc->irq_data); 1140 + 1141 + preflow_handler(desc); 1142 + handle_irq_event(desc); 1143 + 1144 + cond_unmask_eoi_irq(desc, chip); 1145 + 1146 + raw_spin_unlock(&desc->lock); 1147 + return; 1148 + out: 1149 + if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1150 + chip->irq_eoi(&desc->irq_data); 1151 + raw_spin_unlock(&desc->lock); 1152 + } 1153 + EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1154 + 1155 + /** 1156 + * handle_fasteoi_mask_irq - irq handler for level hierarchy 1157 + * stacked on transparent controllers 1158 + * 1159 + * @desc: the interrupt description structure for this irq 1160 + * 1161 + * Like handle_fasteoi_irq(), but for use with hierarchy where 1162 + * the irq_chip also needs to have its ->irq_mask_ack() function 1163 + * called. 1164 + */ 1165 + void handle_fasteoi_mask_irq(struct irq_desc *desc) 1166 + { 1167 + struct irq_chip *chip = desc->irq_data.chip; 1168 + 1169 + raw_spin_lock(&desc->lock); 1170 + mask_ack_irq(desc); 1171 + 1172 + if (!irq_may_run(desc)) 1173 + goto out; 1174 + 1175 + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1176 + 1177 + /* 1178 + * If its disabled or no action available 1179 + * then mask it and get out of here: 1180 + */ 1181 + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1182 + desc->istate |= IRQS_PENDING; 1183 + mask_irq(desc); 1184 + goto out; 1185 + } 1186 + 1187 + kstat_incr_irqs_this_cpu(desc); 1188 + if (desc->istate & IRQS_ONESHOT) 1189 + mask_irq(desc); 1190 + 1191 + preflow_handler(desc); 1192 + handle_irq_event(desc); 1193 + 1194 + cond_unmask_eoi_irq(desc, chip); 1195 + 1196 + raw_spin_unlock(&desc->lock); 1197 + return; 1198 + out: 1199 + if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1200 + chip->irq_eoi(&desc->irq_data); 1201 + raw_spin_unlock(&desc->lock); 1202 + } 1203 + EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1204 + 1205 + #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1206 + 1101 1207 /** 1102 1208 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1103 1209 * NULL) ··· 1217 1111 else 1218 1112 data->chip->irq_unmask(data); 1219 1113 } 1114 + EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1220 1115 1221 1116 /** 1222 1117 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if ··· 1232 1125 else 1233 1126 data->chip->irq_mask(data); 1234 1127 } 1128 + EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1235 1129 1236 1130 /** 1237 1131 * irq_chip_ack_parent - Acknowledge the parent interrupt ··· 1295 1187 1296 1188 return -ENOSYS; 1297 1189 } 1190 + EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1298 1191 1299 1192 /** 1300 1193 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
+49 -1
kernel/irq/debugfs.c
··· 5 5 */ 6 6 #include <linux/irqdomain.h> 7 7 #include <linux/irq.h> 8 + #include <linux/uaccess.h> 8 9 9 10 #include "internals.h" 10 11 ··· 172 171 return single_open(file, irq_debug_show, inode->i_private); 173 172 } 174 173 174 + static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, 175 + size_t count, loff_t *ppos) 176 + { 177 + struct irq_desc *desc = file_inode(file)->i_private; 178 + char buf[8] = { 0, }; 179 + size_t size; 180 + 181 + size = min(sizeof(buf) - 1, count); 182 + if (copy_from_user(buf, user_buf, size)) 183 + return -EFAULT; 184 + 185 + if (!strncmp(buf, "trigger", size)) { 186 + unsigned long flags; 187 + int err; 188 + 189 + /* Try the HW interface first */ 190 + err = irq_set_irqchip_state(irq_desc_get_irq(desc), 191 + IRQCHIP_STATE_PENDING, true); 192 + if (!err) 193 + return count; 194 + 195 + /* 196 + * Otherwise, try to inject via the resend interface, 197 + * which may or may not succeed. 198 + */ 199 + chip_bus_lock(desc); 200 + raw_spin_lock_irqsave(&desc->lock, flags); 201 + 202 + if (irq_settings_is_level(desc)) { 203 + /* Can't do level, sorry */ 204 + err = -EINVAL; 205 + } else { 206 + desc->istate |= IRQS_PENDING; 207 + check_irq_resend(desc); 208 + err = 0; 209 + } 210 + 211 + raw_spin_unlock_irqrestore(&desc->lock, flags); 212 + chip_bus_sync_unlock(desc); 213 + 214 + return err ? err : count; 215 + } 216 + 217 + return count; 218 + } 219 + 175 220 static const struct file_operations dfs_irq_ops = { 176 221 .open = irq_debug_open, 222 + .write = irq_debug_write, 177 223 .read = seq_read, 178 224 .llseek = seq_lseek, 179 225 .release = single_release, ··· 234 186 return; 235 187 236 188 sprintf(name, "%d", irq); 237 - desc->debugfs_file = debugfs_create_file(name, 0444, irq_dir, desc, 189 + desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc, 238 190 &dfs_irq_ops); 239 191 } 240 192
+1 -1
kernel/irq/internals.h
··· 151 151 #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) 152 152 153 153 #define for_each_action_of_desc(desc, act) \ 154 - for (act = desc->act; act; act = act->next) 154 + for (act = desc->action; act; act = act->next) 155 155 156 156 struct irq_desc * 157 157 __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
+164
kernel/irq/irq_sim.c
··· 1 + /* 2 + * Copyright (C) 2017 Bartosz Golaszewski <brgl@bgdev.pl> 3 + * 4 + * This program is free software; you can redistribute it and/or modify it 5 + * under the terms of the GNU General Public License as published by the 6 + * Free Software Foundation; either version 2 of the License, or (at your 7 + * option) any later version. 8 + */ 9 + 10 + #include <linux/irq_sim.h> 11 + #include <linux/irq.h> 12 + 13 + struct irq_sim_devres { 14 + struct irq_sim *sim; 15 + }; 16 + 17 + static void irq_sim_irqmask(struct irq_data *data) 18 + { 19 + struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data); 20 + 21 + irq_ctx->enabled = false; 22 + } 23 + 24 + static void irq_sim_irqunmask(struct irq_data *data) 25 + { 26 + struct irq_sim_irq_ctx *irq_ctx = irq_data_get_irq_chip_data(data); 27 + 28 + irq_ctx->enabled = true; 29 + } 30 + 31 + static struct irq_chip irq_sim_irqchip = { 32 + .name = "irq_sim", 33 + .irq_mask = irq_sim_irqmask, 34 + .irq_unmask = irq_sim_irqunmask, 35 + }; 36 + 37 + static void irq_sim_handle_irq(struct irq_work *work) 38 + { 39 + struct irq_sim_work_ctx *work_ctx; 40 + 41 + work_ctx = container_of(work, struct irq_sim_work_ctx, work); 42 + handle_simple_irq(irq_to_desc(work_ctx->irq)); 43 + } 44 + 45 + /** 46 + * irq_sim_init - Initialize the interrupt simulator: allocate a range of 47 + * dummy interrupts. 48 + * 49 + * @sim: The interrupt simulator object to initialize. 50 + * @num_irqs: Number of interrupts to allocate 51 + * 52 + * Returns 0 on success and a negative error number on failure. 53 + */ 54 + int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs) 55 + { 56 + int i; 57 + 58 + sim->irqs = kmalloc_array(num_irqs, sizeof(*sim->irqs), GFP_KERNEL); 59 + if (!sim->irqs) 60 + return -ENOMEM; 61 + 62 + sim->irq_base = irq_alloc_descs(-1, 0, num_irqs, 0); 63 + if (sim->irq_base < 0) { 64 + kfree(sim->irqs); 65 + return sim->irq_base; 66 + } 67 + 68 + for (i = 0; i < num_irqs; i++) { 69 + sim->irqs[i].irqnum = sim->irq_base + i; 70 + sim->irqs[i].enabled = false; 71 + irq_set_chip(sim->irq_base + i, &irq_sim_irqchip); 72 + irq_set_chip_data(sim->irq_base + i, &sim->irqs[i]); 73 + irq_set_handler(sim->irq_base + i, &handle_simple_irq); 74 + irq_modify_status(sim->irq_base + i, 75 + IRQ_NOREQUEST | IRQ_NOAUTOEN, IRQ_NOPROBE); 76 + } 77 + 78 + init_irq_work(&sim->work_ctx.work, irq_sim_handle_irq); 79 + sim->irq_count = num_irqs; 80 + 81 + return 0; 82 + } 83 + EXPORT_SYMBOL_GPL(irq_sim_init); 84 + 85 + /** 86 + * irq_sim_fini - Deinitialize the interrupt simulator: free the interrupt 87 + * descriptors and allocated memory. 88 + * 89 + * @sim: The interrupt simulator to tear down. 90 + */ 91 + void irq_sim_fini(struct irq_sim *sim) 92 + { 93 + irq_work_sync(&sim->work_ctx.work); 94 + irq_free_descs(sim->irq_base, sim->irq_count); 95 + kfree(sim->irqs); 96 + } 97 + EXPORT_SYMBOL_GPL(irq_sim_fini); 98 + 99 + static void devm_irq_sim_release(struct device *dev, void *res) 100 + { 101 + struct irq_sim_devres *this = res; 102 + 103 + irq_sim_fini(this->sim); 104 + } 105 + 106 + /** 107 + * irq_sim_init - Initialize the interrupt simulator for a managed device. 108 + * 109 + * @dev: Device to initialize the simulator object for. 110 + * @sim: The interrupt simulator object to initialize. 111 + * @num_irqs: Number of interrupts to allocate 112 + * 113 + * Returns 0 on success and a negative error number on failure. 114 + */ 115 + int devm_irq_sim_init(struct device *dev, struct irq_sim *sim, 116 + unsigned int num_irqs) 117 + { 118 + struct irq_sim_devres *dr; 119 + int rv; 120 + 121 + dr = devres_alloc(devm_irq_sim_release, sizeof(*dr), GFP_KERNEL); 122 + if (!dr) 123 + return -ENOMEM; 124 + 125 + rv = irq_sim_init(sim, num_irqs); 126 + if (rv) { 127 + devres_free(dr); 128 + return rv; 129 + } 130 + 131 + dr->sim = sim; 132 + devres_add(dev, dr); 133 + 134 + return 0; 135 + } 136 + EXPORT_SYMBOL_GPL(devm_irq_sim_init); 137 + 138 + /** 139 + * irq_sim_fire - Enqueue an interrupt. 140 + * 141 + * @sim: The interrupt simulator object. 142 + * @offset: Offset of the simulated interrupt which should be fired. 143 + */ 144 + void irq_sim_fire(struct irq_sim *sim, unsigned int offset) 145 + { 146 + if (sim->irqs[offset].enabled) { 147 + sim->work_ctx.irq = irq_sim_irqnum(sim, offset); 148 + irq_work_queue(&sim->work_ctx.work); 149 + } 150 + } 151 + EXPORT_SYMBOL_GPL(irq_sim_fire); 152 + 153 + /** 154 + * irq_sim_irqnum - Get the allocated number of a dummy interrupt. 155 + * 156 + * @sim: The interrupt simulator object. 157 + * @offset: Offset of the simulated interrupt for which to retrieve 158 + * the number. 159 + */ 160 + int irq_sim_irqnum(struct irq_sim *sim, unsigned int offset) 161 + { 162 + return sim->irqs[offset].irqnum; 163 + } 164 + EXPORT_SYMBOL_GPL(irq_sim_irqnum);
+200 -30
kernel/irq/irqdomain.c
··· 455 455 } 456 456 EXPORT_SYMBOL_GPL(irq_set_default_host); 457 457 458 + static void irq_domain_clear_mapping(struct irq_domain *domain, 459 + irq_hw_number_t hwirq) 460 + { 461 + if (hwirq < domain->revmap_size) { 462 + domain->linear_revmap[hwirq] = 0; 463 + } else { 464 + mutex_lock(&revmap_trees_mutex); 465 + radix_tree_delete(&domain->revmap_tree, hwirq); 466 + mutex_unlock(&revmap_trees_mutex); 467 + } 468 + } 469 + 470 + static void irq_domain_set_mapping(struct irq_domain *domain, 471 + irq_hw_number_t hwirq, 472 + struct irq_data *irq_data) 473 + { 474 + if (hwirq < domain->revmap_size) { 475 + domain->linear_revmap[hwirq] = irq_data->irq; 476 + } else { 477 + mutex_lock(&revmap_trees_mutex); 478 + radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 479 + mutex_unlock(&revmap_trees_mutex); 480 + } 481 + } 482 + 458 483 void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) 459 484 { 460 485 struct irq_data *irq_data = irq_get_irq_data(irq); ··· 508 483 domain->mapcount--; 509 484 510 485 /* Clear reverse map for this hwirq */ 511 - if (hwirq < domain->revmap_size) { 512 - domain->linear_revmap[hwirq] = 0; 513 - } else { 514 - mutex_lock(&revmap_trees_mutex); 515 - radix_tree_delete(&domain->revmap_tree, hwirq); 516 - mutex_unlock(&revmap_trees_mutex); 517 - } 486 + irq_domain_clear_mapping(domain, hwirq); 518 487 } 519 488 520 489 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, ··· 552 533 } 553 534 554 535 domain->mapcount++; 555 - if (hwirq < domain->revmap_size) { 556 - domain->linear_revmap[hwirq] = virq; 557 - } else { 558 - mutex_lock(&revmap_trees_mutex); 559 - radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); 560 - mutex_unlock(&revmap_trees_mutex); 561 - } 536 + irq_domain_set_mapping(domain, hwirq, irq_data); 562 537 mutex_unlock(&irq_domain_mutex); 563 538 564 539 irq_clear_status_flags(virq, IRQ_NOREQUEST); ··· 1151 1138 1152 1139 for (data = irq_get_irq_data(virq); data; data = data->parent_data) { 1153 1140 struct irq_domain *domain = data->domain; 1154 - irq_hw_number_t hwirq = data->hwirq; 1155 1141 1156 1142 domain->mapcount++; 1157 - if (hwirq < domain->revmap_size) { 1158 - domain->linear_revmap[hwirq] = virq; 1159 - } else { 1160 - mutex_lock(&revmap_trees_mutex); 1161 - radix_tree_insert(&domain->revmap_tree, hwirq, data); 1162 - mutex_unlock(&revmap_trees_mutex); 1163 - } 1143 + irq_domain_set_mapping(domain, data->hwirq, data); 1164 1144 1165 1145 /* If not already assigned, give the domain the chip's name */ 1166 1146 if (!domain->name && data->chip) ··· 1177 1171 irq_hw_number_t hwirq = data->hwirq; 1178 1172 1179 1173 domain->mapcount--; 1180 - if (hwirq < domain->revmap_size) { 1181 - domain->linear_revmap[hwirq] = 0; 1182 - } else { 1183 - mutex_lock(&revmap_trees_mutex); 1184 - radix_tree_delete(&domain->revmap_tree, hwirq); 1185 - mutex_unlock(&revmap_trees_mutex); 1186 - } 1174 + irq_domain_clear_mapping(domain, hwirq); 1187 1175 } 1188 1176 } 1189 1177 ··· 1362 1362 unsigned int irq_base, 1363 1363 unsigned int nr_irqs) 1364 1364 { 1365 - domain->ops->free(domain, irq_base, nr_irqs); 1365 + if (domain->ops->free) 1366 + domain->ops->free(domain, irq_base, nr_irqs); 1366 1367 } 1367 1368 1368 1369 int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain, ··· 1448 1447 irq_free_descs(virq, nr_irqs); 1449 1448 return ret; 1450 1449 } 1450 + 1451 + /* The irq_data was moved, fix the revmap to refer to the new location */ 1452 + static void irq_domain_fix_revmap(struct irq_data *d) 1453 + { 1454 + void **slot; 1455 + 1456 + if (d->hwirq < d->domain->revmap_size) 1457 + return; /* Not using radix tree. */ 1458 + 1459 + /* Fix up the revmap. */ 1460 + mutex_lock(&revmap_trees_mutex); 1461 + slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq); 1462 + if (slot) 1463 + radix_tree_replace_slot(&d->domain->revmap_tree, slot, d); 1464 + mutex_unlock(&revmap_trees_mutex); 1465 + } 1466 + 1467 + /** 1468 + * irq_domain_push_irq() - Push a domain in to the top of a hierarchy. 1469 + * @domain: Domain to push. 1470 + * @virq: Irq to push the domain in to. 1471 + * @arg: Passed to the irq_domain_ops alloc() function. 1472 + * 1473 + * For an already existing irqdomain hierarchy, as might be obtained 1474 + * via a call to pci_enable_msix(), add an additional domain to the 1475 + * head of the processing chain. Must be called before request_irq() 1476 + * has been called. 1477 + */ 1478 + int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg) 1479 + { 1480 + struct irq_data *child_irq_data; 1481 + struct irq_data *root_irq_data = irq_get_irq_data(virq); 1482 + struct irq_desc *desc; 1483 + int rv = 0; 1484 + 1485 + /* 1486 + * Check that no action has been set, which indicates the virq 1487 + * is in a state where this function doesn't have to deal with 1488 + * races between interrupt handling and maintaining the 1489 + * hierarchy. This will catch gross misuse. Attempting to 1490 + * make the check race free would require holding locks across 1491 + * calls to struct irq_domain_ops->alloc(), which could lead 1492 + * to deadlock, so we just do a simple check before starting. 1493 + */ 1494 + desc = irq_to_desc(virq); 1495 + if (!desc) 1496 + return -EINVAL; 1497 + if (WARN_ON(desc->action)) 1498 + return -EBUSY; 1499 + 1500 + if (domain == NULL) 1501 + return -EINVAL; 1502 + 1503 + if (WARN_ON(!irq_domain_is_hierarchy(domain))) 1504 + return -EINVAL; 1505 + 1506 + if (!root_irq_data) 1507 + return -EINVAL; 1508 + 1509 + if (domain->parent != root_irq_data->domain) 1510 + return -EINVAL; 1511 + 1512 + child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL, 1513 + irq_data_get_node(root_irq_data)); 1514 + if (!child_irq_data) 1515 + return -ENOMEM; 1516 + 1517 + mutex_lock(&irq_domain_mutex); 1518 + 1519 + /* Copy the original irq_data. */ 1520 + *child_irq_data = *root_irq_data; 1521 + 1522 + /* 1523 + * Overwrite the root_irq_data, which is embedded in struct 1524 + * irq_desc, with values for this domain. 1525 + */ 1526 + root_irq_data->parent_data = child_irq_data; 1527 + root_irq_data->domain = domain; 1528 + root_irq_data->mask = 0; 1529 + root_irq_data->hwirq = 0; 1530 + root_irq_data->chip = NULL; 1531 + root_irq_data->chip_data = NULL; 1532 + 1533 + /* May (probably does) set hwirq, chip, etc. */ 1534 + rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg); 1535 + if (rv) { 1536 + /* Restore the original irq_data. */ 1537 + *root_irq_data = *child_irq_data; 1538 + goto error; 1539 + } 1540 + 1541 + irq_domain_fix_revmap(child_irq_data); 1542 + irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data); 1543 + 1544 + error: 1545 + mutex_unlock(&irq_domain_mutex); 1546 + 1547 + return rv; 1548 + } 1549 + EXPORT_SYMBOL_GPL(irq_domain_push_irq); 1550 + 1551 + /** 1552 + * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy. 1553 + * @domain: Domain to remove. 1554 + * @virq: Irq to remove the domain from. 1555 + * 1556 + * Undo the effects of a call to irq_domain_push_irq(). Must be 1557 + * called either before request_irq() or after free_irq(). 1558 + */ 1559 + int irq_domain_pop_irq(struct irq_domain *domain, int virq) 1560 + { 1561 + struct irq_data *root_irq_data = irq_get_irq_data(virq); 1562 + struct irq_data *child_irq_data; 1563 + struct irq_data *tmp_irq_data; 1564 + struct irq_desc *desc; 1565 + 1566 + /* 1567 + * Check that no action is set, which indicates the virq is in 1568 + * a state where this function doesn't have to deal with races 1569 + * between interrupt handling and maintaining the hierarchy. 1570 + * This will catch gross misuse. Attempting to make the check 1571 + * race free would require holding locks across calls to 1572 + * struct irq_domain_ops->free(), which could lead to 1573 + * deadlock, so we just do a simple check before starting. 1574 + */ 1575 + desc = irq_to_desc(virq); 1576 + if (!desc) 1577 + return -EINVAL; 1578 + if (WARN_ON(desc->action)) 1579 + return -EBUSY; 1580 + 1581 + if (domain == NULL) 1582 + return -EINVAL; 1583 + 1584 + if (!root_irq_data) 1585 + return -EINVAL; 1586 + 1587 + tmp_irq_data = irq_domain_get_irq_data(domain, virq); 1588 + 1589 + /* We can only "pop" if this domain is at the top of the list */ 1590 + if (WARN_ON(root_irq_data != tmp_irq_data)) 1591 + return -EINVAL; 1592 + 1593 + if (WARN_ON(root_irq_data->domain != domain)) 1594 + return -EINVAL; 1595 + 1596 + child_irq_data = root_irq_data->parent_data; 1597 + if (WARN_ON(!child_irq_data)) 1598 + return -EINVAL; 1599 + 1600 + mutex_lock(&irq_domain_mutex); 1601 + 1602 + root_irq_data->parent_data = NULL; 1603 + 1604 + irq_domain_clear_mapping(domain, root_irq_data->hwirq); 1605 + irq_domain_free_irqs_hierarchy(domain, virq, 1); 1606 + 1607 + /* Restore the original irq_data. */ 1608 + *root_irq_data = *child_irq_data; 1609 + 1610 + irq_domain_fix_revmap(root_irq_data); 1611 + 1612 + mutex_unlock(&irq_domain_mutex); 1613 + 1614 + kfree(child_irq_data); 1615 + 1616 + return 0; 1617 + } 1618 + EXPORT_SYMBOL_GPL(irq_domain_pop_irq); 1451 1619 1452 1620 /** 1453 1621 * irq_domain_free_irqs - Free IRQ number and associated data structures
+12 -2
kernel/irq/manage.c
··· 400 400 return -EINVAL; 401 401 402 402 data = irq_desc_get_irq_data(desc); 403 - chip = irq_data_get_irq_chip(data); 404 - if (chip && chip->irq_set_vcpu_affinity) 403 + do { 404 + chip = irq_data_get_irq_chip(data); 405 + if (chip && chip->irq_set_vcpu_affinity) 406 + break; 407 + #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 408 + data = data->parent_data; 409 + #else 410 + data = NULL; 411 + #endif 412 + } while (data); 413 + 414 + if (data) 405 415 ret = chip->irq_set_vcpu_affinity(data, vcpu_info); 406 416 irq_put_desc_unlock(desc, flags); 407 417
+4 -4
kernel/irq/proc.c
··· 61 61 case EFFECTIVE: 62 62 case EFFECTIVE_LIST: 63 63 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 64 - mask = desc->irq_common_data.effective_affinity; 64 + mask = irq_data_get_effective_affinity_mask(&desc->irq_data); 65 65 break; 66 - #else 67 - return -EINVAL; 68 66 #endif 69 - }; 67 + default: 68 + return -EINVAL; 69 + } 70 70 71 71 switch (type) { 72 72 case AFFINITY_LIST: