Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
"The irq department delivers:

- new core infrastructure to allow better management of multi-queue
devices (interrupt spreading, node aware descriptor allocation ...)

- a new interrupt flow handler to support the new fangled Intel VMD
devices.

- yet another new interrupt controller driver.

- a series of fixes which addresses sparse warnings, missing
includes, missing static declarations etc from Ben Dooks.

- a fix for the error handling in the hierarchical domain allocation
code.

- the usual pile of small updates to core and driver code"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
genirq: Fix missing irq allocation affinity hint
irqdomain: Fix irq_domain_alloc_irqs_recursive() error handling
irq/Documentation: Correct result of echnoing 5 to smp_affinity
MAINTAINERS: Remove Jiang Liu from irq domains
genirq/msi: Fix broken debug output
genirq: Add a helper to spread an affinity mask for MSI/MSI-X vectors
genirq/msi: Make use of affinity aware allocations
genirq: Use affinity hint in irqdesc allocation
genirq: Add affinity hint to irq allocation
genirq: Introduce IRQD_AFFINITY_MANAGED flag
genirq/msi: Remove unused MSI_FLAG_IDENTITY_MAP
irqchip/s3c24xx: Fixup IO accessors for big endian
irqchip/exynos-combiner: Fix usage of __raw IO
irqdomain: Fix disposal of mappings for interrupt hierarchies
irqchip/aspeed-vic: Add irq controller for Aspeed
doc/devicetree: Add Aspeed VIC bindings
x86/PCI/VMD: Use untracked irq handler
genirq: Add untracked irq handler
irqchip/mips-gic: Populate irq_domain names
irqchip/gicv3-its: Implement two-level(indirect) device table support
...

+1269 -324
+2 -1
Documentation/devicetree/bindings/interrupt-controller/arm,gic.txt
··· 21 21 "arm,pl390" 22 22 "arm,tc11mp-gic" 23 23 "brcm,brahma-b15-gic" 24 + "nvidia,tegra210-agic" 24 25 "qcom,msm-8660-qgic" 25 26 "qcom,msm-qgic2" 26 27 - interrupt-controller : Identifies the node as an interrupt controller ··· 69 68 "ic_clk" (for "arm,arm11mp-gic") 70 69 "PERIPHCLKEN" (for "arm,cortex-a15-gic") 71 70 "PERIPHCLK", "PERIPHCLKEN" (for "arm,cortex-a9-gic") 72 - "clk" (for "arm,gic-400") 71 + "clk" (for "arm,gic-400" and "nvidia,tegra210") 73 72 "gclk" (for "arm,pl390") 74 73 75 74 - power-domains : A phandle and PM domain specifier as defined by bindings of
+22
Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-vic.txt
··· 1 + Aspeed Vectored Interrupt Controller 2 + 3 + These bindings are for the Aspeed AST2400 interrupt controller register layout. 4 + The SoC has an legacy register layout, but this driver does not support that 5 + mode of operation. 6 + 7 + Required properties: 8 + 9 + - compatible : should be "aspeed,ast2400-vic". 10 + 11 + - interrupt-controller : Identifies the node as an interrupt controller 12 + - #interrupt-cells : Specifies the number of cells needed to encode an 13 + interrupt source. The value shall be 1. 14 + 15 + Example: 16 + 17 + vic: interrupt-controller@1e6c0080 { 18 + compatible = "aspeed,ast2400-vic"; 19 + interrupt-controller; 20 + #interrupt-cells = <1>; 21 + reg = <0x1e6c0080 0x80>; 22 + };
+1 -1
Documentation/filesystems/proc.txt
··· 725 725 > echo 1 > /proc/irq/10/smp_affinity 726 726 727 727 This means that only the first CPU will handle the IRQ, but you can also echo 728 - 5 which means that only the first and fourth CPU can handle the IRQ. 728 + 5 which means that only the first and third CPU can handle the IRQ. 729 729 730 730 The contents of each smp_affinity file is the same by default: 731 731
-1
MAINTAINERS
··· 6235 6235 F: drivers/irqchip/ 6236 6236 6237 6237 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 6238 - M: Jiang Liu <jiang.liu@linux.intel.com> 6239 6238 M: Marc Zyngier <marc.zyngier@arm.com> 6240 6239 S: Maintained 6241 6240 T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
+1 -1
arch/sparc/kernel/irq_64.c
··· 242 242 { 243 243 int irq; 244 244 245 - irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL); 245 + irq = __irq_alloc_descs(-1, 1, 1, numa_node_id(), NULL, NULL); 246 246 if (irq <= 0) 247 247 goto out; 248 248
+3 -2
arch/x86/kernel/apic/io_apic.c
··· 981 981 982 982 return __irq_domain_alloc_irqs(domain, irq, 1, 983 983 ioapic_alloc_attr_node(info), 984 - info, legacy); 984 + info, legacy, NULL); 985 985 } 986 986 987 987 /* ··· 1014 1014 info->ioapic_pin)) 1015 1015 return -ENOMEM; 1016 1016 } else { 1017 - irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true); 1017 + irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true, 1018 + NULL); 1018 1019 if (irq >= 0) { 1019 1020 irq_data = irq_domain_get_irq_data(domain, irq); 1020 1021 data = irq_data->chip_data;
+1 -1
arch/x86/pci/vmd.c
··· 195 195 vmdirq->virq = virq; 196 196 197 197 irq_domain_set_info(domain, virq, vmdirq->irq->vmd_vector, info->chip, 198 - vmdirq, handle_simple_irq, vmd, NULL); 198 + vmdirq, handle_untracked_irq, vmd, NULL); 199 199 return 0; 200 200 } 201 201
+6
drivers/irqchip/Kconfig
··· 8 8 select IRQ_DOMAIN_HIERARCHY 9 9 select MULTI_IRQ_HANDLER 10 10 11 + config ARM_GIC_PM 12 + bool 13 + depends on PM 14 + select ARM_GIC 15 + select PM_CLK 16 + 11 17 config ARM_GIC_MAX_NR 12 18 int 13 19 default 2 if ARCH_REALVIEW
+2
drivers/irqchip/Makefile
··· 24 24 obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o 25 25 obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o 26 26 obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o 27 + obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o 27 28 obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o 28 29 obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o 29 30 obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o ··· 70 69 obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o 71 70 obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o 72 71 obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o 72 + obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
+7 -7
drivers/irqchip/exynos-combiner.c
··· 55 55 { 56 56 u32 mask = 1 << (data->hwirq % 32); 57 57 58 - __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); 58 + writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); 59 59 } 60 60 61 61 static void combiner_unmask_irq(struct irq_data *data) 62 62 { 63 63 u32 mask = 1 << (data->hwirq % 32); 64 64 65 - __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); 65 + writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET); 66 66 } 67 67 68 68 static void combiner_handle_cascade_irq(struct irq_desc *desc) ··· 75 75 chained_irq_enter(chip, desc); 76 76 77 77 spin_lock(&irq_controller_lock); 78 - status = __raw_readl(chip_data->base + COMBINER_INT_STATUS); 78 + status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS); 79 79 spin_unlock(&irq_controller_lock); 80 80 status &= chip_data->irq_mask; 81 81 ··· 135 135 combiner_data->parent_irq = irq; 136 136 137 137 /* Disable all interrupts */ 138 - __raw_writel(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); 138 + writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR); 139 139 } 140 140 141 141 static int combiner_irq_domain_xlate(struct irq_domain *d, ··· 218 218 219 219 for (i = 0; i < max_nr; i++) 220 220 combiner_data[i].pm_save = 221 - __raw_readl(combiner_data[i].base + COMBINER_ENABLE_SET); 221 + readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET); 222 222 223 223 return 0; 224 224 } ··· 235 235 int i; 236 236 237 237 for (i = 0; i < max_nr; i++) { 238 - __raw_writel(combiner_data[i].irq_mask, 238 + writel_relaxed(combiner_data[i].irq_mask, 239 239 combiner_data[i].base + COMBINER_ENABLE_CLEAR); 240 - __raw_writel(combiner_data[i].pm_save, 240 + writel_relaxed(combiner_data[i].pm_save, 241 241 combiner_data[i].base + COMBINER_ENABLE_SET); 242 242 } 243 243 }
+1 -1
drivers/irqchip/irq-armada-370-xp.c
··· 541 541 writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); 542 542 } 543 543 544 - struct syscore_ops armada_370_xp_mpic_syscore_ops = { 544 + static struct syscore_ops armada_370_xp_mpic_syscore_ops = { 545 545 .suspend = armada_370_xp_mpic_suspend, 546 546 .resume = armada_370_xp_mpic_resume, 547 547 };
+230
drivers/irqchip/irq-aspeed-vic.c
··· 1 + /* 2 + * Copyright (C) 2015 - Ben Herrenschmidt, IBM Corp. 3 + * 4 + * Driver for Aspeed "new" VIC as found in SoC generation 3 and later 5 + * 6 + * Based on irq-vic.c: 7 + * 8 + * Copyright (C) 1999 - 2003 ARM Limited 9 + * Copyright (C) 2000 Deep Blue Solutions Ltd 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2 of the License, or 14 + * (at your option) any later version. 15 + * 16 + * This program is distributed in the hope that it will be useful, 17 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 + * GNU General Public License for more details. 20 + * 21 + */ 22 + 23 + #include <linux/export.h> 24 + #include <linux/init.h> 25 + #include <linux/list.h> 26 + #include <linux/io.h> 27 + #include <linux/irq.h> 28 + #include <linux/irqchip.h> 29 + #include <linux/irqchip/chained_irq.h> 30 + #include <linux/irqdomain.h> 31 + #include <linux/of.h> 32 + #include <linux/of_address.h> 33 + #include <linux/of_irq.h> 34 + #include <linux/syscore_ops.h> 35 + #include <linux/device.h> 36 + #include <linux/slab.h> 37 + 38 + #include <asm/exception.h> 39 + #include <asm/irq.h> 40 + 41 + /* These definitions correspond to the "new mapping" of the 42 + * register set that interleaves "high" and "low". The offsets 43 + * below are for the "low" register, add 4 to get to the high one 44 + */ 45 + #define AVIC_IRQ_STATUS 0x00 46 + #define AVIC_FIQ_STATUS 0x08 47 + #define AVIC_RAW_STATUS 0x10 48 + #define AVIC_INT_SELECT 0x18 49 + #define AVIC_INT_ENABLE 0x20 50 + #define AVIC_INT_ENABLE_CLR 0x28 51 + #define AVIC_INT_TRIGGER 0x30 52 + #define AVIC_INT_TRIGGER_CLR 0x38 53 + #define AVIC_INT_SENSE 0x40 54 + #define AVIC_INT_DUAL_EDGE 0x48 55 + #define AVIC_INT_EVENT 0x50 56 + #define AVIC_EDGE_CLR 0x58 57 + #define AVIC_EDGE_STATUS 0x60 58 + 59 + #define NUM_IRQS 64 60 + 61 + struct aspeed_vic { 62 + void __iomem *base; 63 + u32 edge_sources[2]; 64 + struct irq_domain *dom; 65 + }; 66 + static struct aspeed_vic *system_avic; 67 + 68 + static void vic_init_hw(struct aspeed_vic *vic) 69 + { 70 + u32 sense; 71 + 72 + /* Disable all interrupts */ 73 + writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR); 74 + writel(0xffffffff, vic->base + AVIC_INT_ENABLE_CLR + 4); 75 + 76 + /* Make sure no soft trigger is on */ 77 + writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR); 78 + writel(0xffffffff, vic->base + AVIC_INT_TRIGGER_CLR + 4); 79 + 80 + /* Set everything to be IRQ */ 81 + writel(0, vic->base + AVIC_INT_SELECT); 82 + writel(0, vic->base + AVIC_INT_SELECT + 4); 83 + 84 + /* Some interrupts have a programable high/low level trigger 85 + * (4 GPIO direct inputs), for now we assume this was configured 86 + * by firmware. We read which ones are edge now. 87 + */ 88 + sense = readl(vic->base + AVIC_INT_SENSE); 89 + vic->edge_sources[0] = ~sense; 90 + sense = readl(vic->base + AVIC_INT_SENSE + 4); 91 + vic->edge_sources[1] = ~sense; 92 + 93 + /* Clear edge detection latches */ 94 + writel(0xffffffff, vic->base + AVIC_EDGE_CLR); 95 + writel(0xffffffff, vic->base + AVIC_EDGE_CLR + 4); 96 + } 97 + 98 + static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs) 99 + { 100 + struct aspeed_vic *vic = system_avic; 101 + u32 stat, irq; 102 + 103 + for (;;) { 104 + irq = 0; 105 + stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS); 106 + if (!stat) { 107 + stat = readl_relaxed(vic->base + AVIC_IRQ_STATUS + 4); 108 + irq = 32; 109 + } 110 + if (stat == 0) 111 + break; 112 + irq += ffs(stat) - 1; 113 + handle_domain_irq(vic->dom, irq, regs); 114 + } 115 + } 116 + 117 + static void avic_ack_irq(struct irq_data *d) 118 + { 119 + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); 120 + unsigned int sidx = d->hwirq >> 5; 121 + unsigned int sbit = 1u << (d->hwirq & 0x1f); 122 + 123 + /* Clear edge latch for edge interrupts, nop for level */ 124 + if (vic->edge_sources[sidx] & sbit) 125 + writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4); 126 + } 127 + 128 + static void avic_mask_irq(struct irq_data *d) 129 + { 130 + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); 131 + unsigned int sidx = d->hwirq >> 5; 132 + unsigned int sbit = 1u << (d->hwirq & 0x1f); 133 + 134 + writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4); 135 + } 136 + 137 + static void avic_unmask_irq(struct irq_data *d) 138 + { 139 + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); 140 + unsigned int sidx = d->hwirq >> 5; 141 + unsigned int sbit = 1u << (d->hwirq & 0x1f); 142 + 143 + writel(sbit, vic->base + AVIC_INT_ENABLE + sidx * 4); 144 + } 145 + 146 + /* For level irq, faster than going through a nop "ack" and mask */ 147 + static void avic_mask_ack_irq(struct irq_data *d) 148 + { 149 + struct aspeed_vic *vic = irq_data_get_irq_chip_data(d); 150 + unsigned int sidx = d->hwirq >> 5; 151 + unsigned int sbit = 1u << (d->hwirq & 0x1f); 152 + 153 + /* First mask */ 154 + writel(sbit, vic->base + AVIC_INT_ENABLE_CLR + sidx * 4); 155 + 156 + /* Then clear edge latch for edge interrupts */ 157 + if (vic->edge_sources[sidx] & sbit) 158 + writel(sbit, vic->base + AVIC_EDGE_CLR + sidx * 4); 159 + } 160 + 161 + static struct irq_chip avic_chip = { 162 + .name = "AVIC", 163 + .irq_ack = avic_ack_irq, 164 + .irq_mask = avic_mask_irq, 165 + .irq_unmask = avic_unmask_irq, 166 + .irq_mask_ack = avic_mask_ack_irq, 167 + }; 168 + 169 + static int avic_map(struct irq_domain *d, unsigned int irq, 170 + irq_hw_number_t hwirq) 171 + { 172 + struct aspeed_vic *vic = d->host_data; 173 + unsigned int sidx = hwirq >> 5; 174 + unsigned int sbit = 1u << (hwirq & 0x1f); 175 + 176 + /* Check if interrupt exists */ 177 + if (sidx > 1) 178 + return -EPERM; 179 + 180 + if (vic->edge_sources[sidx] & sbit) 181 + irq_set_chip_and_handler(irq, &avic_chip, handle_edge_irq); 182 + else 183 + irq_set_chip_and_handler(irq, &avic_chip, handle_level_irq); 184 + irq_set_chip_data(irq, vic); 185 + irq_set_probe(irq); 186 + return 0; 187 + } 188 + 189 + static struct irq_domain_ops avic_dom_ops = { 190 + .map = avic_map, 191 + .xlate = irq_domain_xlate_onetwocell, 192 + }; 193 + 194 + static int __init avic_of_init(struct device_node *node, 195 + struct device_node *parent) 196 + { 197 + void __iomem *regs; 198 + struct aspeed_vic *vic; 199 + 200 + if (WARN(parent, "non-root Aspeed VIC not supported")) 201 + return -EINVAL; 202 + if (WARN(system_avic, "duplicate Aspeed VIC not supported")) 203 + return -EINVAL; 204 + 205 + regs = of_iomap(node, 0); 206 + if (WARN_ON(!regs)) 207 + return -EIO; 208 + 209 + vic = kzalloc(sizeof(struct aspeed_vic), GFP_KERNEL); 210 + if (WARN_ON(!vic)) { 211 + iounmap(regs); 212 + return -ENOMEM; 213 + } 214 + vic->base = regs; 215 + 216 + /* Initialize soures, all masked */ 217 + vic_init_hw(vic); 218 + 219 + /* Ready to receive interrupts */ 220 + system_avic = vic; 221 + set_handle_irq(avic_handle_irq); 222 + 223 + /* Register our domain */ 224 + vic->dom = irq_domain_add_simple(node, NUM_IRQS, 0, 225 + &avic_dom_ops, vic); 226 + 227 + return 0; 228 + } 229 + 230 + IRQCHIP_DECLARE(aspeed_new_vic, "aspeed,ast2400-vic", avic_of_init);
+1 -2
drivers/irqchip/irq-bcm2835.c
··· 52 52 #include <linux/irqdomain.h> 53 53 54 54 #include <asm/exception.h> 55 - #include <asm/mach/irq.h> 56 55 57 56 /* Put the bank and irq (32 bits) into the hwirq */ 58 57 #define MAKE_HWIRQ(b, n) ((b << 5) | (n)) ··· 241 242 u32 hwirq; 242 243 243 244 while ((hwirq = get_next_armctrl_hwirq()) != ~0) 244 - handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); 245 + handle_domain_irq(intc.domain, hwirq, regs); 245 246 } 246 247 247 248 static void bcm2836_chained_handle_irq(struct irq_desc *desc)
+3 -3
drivers/irqchip/irq-bcm2836.c
··· 180 180 } else if (stat) { 181 181 u32 hwirq = ffs(stat) - 1; 182 182 183 - handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); 183 + handle_domain_irq(intc.domain, hwirq, regs); 184 184 } 185 185 } 186 186 ··· 224 224 }; 225 225 226 226 #ifdef CONFIG_ARM 227 - int __init bcm2836_smp_boot_secondary(unsigned int cpu, 228 - struct task_struct *idle) 227 + static int __init bcm2836_smp_boot_secondary(unsigned int cpu, 228 + struct task_struct *idle) 229 229 { 230 230 unsigned long secondary_startup_phys = 231 231 (unsigned long)virt_to_phys((void *)secondary_startup);
+5 -5
drivers/irqchip/irq-bcm7120-l2.c
··· 215 215 return 0; 216 216 } 217 217 218 - int __init bcm7120_l2_intc_probe(struct device_node *dn, 218 + static int __init bcm7120_l2_intc_probe(struct device_node *dn, 219 219 struct device_node *parent, 220 220 int (*iomap_regs_fn)(struct device_node *, 221 221 struct bcm7120_l2_intc_data *), ··· 339 339 return ret; 340 340 } 341 341 342 - int __init bcm7120_l2_intc_probe_7120(struct device_node *dn, 343 - struct device_node *parent) 342 + static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn, 343 + struct device_node *parent) 344 344 { 345 345 return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120, 346 346 "BCM7120 L2"); 347 347 } 348 348 349 - int __init bcm7120_l2_intc_probe_3380(struct device_node *dn, 350 - struct device_node *parent) 349 + static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn, 350 + struct device_node *parent) 351 351 { 352 352 return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380, 353 353 "BCM3380 L2");
+2 -2
drivers/irqchip/irq-brcmstb-l2.c
··· 112 112 irq_gc_unlock(gc); 113 113 } 114 114 115 - int __init brcmstb_l2_intc_of_init(struct device_node *np, 116 - struct device_node *parent) 115 + static int __init brcmstb_l2_intc_of_init(struct device_node *np, 116 + struct device_node *parent) 117 117 { 118 118 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 119 119 struct brcmstb_l2_intc_data *data;
+2 -2
drivers/irqchip/irq-gic-common.c
··· 90 90 return ret; 91 91 } 92 92 93 - void __init gic_dist_config(void __iomem *base, int gic_irqs, 94 - void (*sync_access)(void)) 93 + void gic_dist_config(void __iomem *base, int gic_irqs, 94 + void (*sync_access)(void)) 95 95 { 96 96 unsigned int i; 97 97
+184
drivers/irqchip/irq-gic-pm.c
··· 1 + /* 2 + * Copyright (C) 2016 NVIDIA CORPORATION, All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + #include <linux/module.h> 17 + #include <linux/clk.h> 18 + #include <linux/of_device.h> 19 + #include <linux/of_irq.h> 20 + #include <linux/irqchip/arm-gic.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/pm_clock.h> 23 + #include <linux/pm_runtime.h> 24 + #include <linux/slab.h> 25 + 26 + struct gic_clk_data { 27 + unsigned int num_clocks; 28 + const char *const *clocks; 29 + }; 30 + 31 + static int gic_runtime_resume(struct device *dev) 32 + { 33 + struct gic_chip_data *gic = dev_get_drvdata(dev); 34 + int ret; 35 + 36 + ret = pm_clk_resume(dev); 37 + if (ret) 38 + return ret; 39 + 40 + /* 41 + * On the very first resume, the pointer to the driver data 42 + * will be NULL and this is intentional, because we do not 43 + * want to restore the GIC on the very first resume. So if 44 + * the pointer is not valid just return. 45 + */ 46 + if (!gic) 47 + return 0; 48 + 49 + gic_dist_restore(gic); 50 + gic_cpu_restore(gic); 51 + 52 + return 0; 53 + } 54 + 55 + static int gic_runtime_suspend(struct device *dev) 56 + { 57 + struct gic_chip_data *gic = dev_get_drvdata(dev); 58 + 59 + gic_dist_save(gic); 60 + gic_cpu_save(gic); 61 + 62 + return pm_clk_suspend(dev); 63 + } 64 + 65 + static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data) 66 + { 67 + struct clk *clk; 68 + unsigned int i; 69 + int ret; 70 + 71 + if (!dev || !data) 72 + return -EINVAL; 73 + 74 + ret = pm_clk_create(dev); 75 + if (ret) 76 + return ret; 77 + 78 + for (i = 0; i < data->num_clocks; i++) { 79 + clk = of_clk_get_by_name(dev->of_node, data->clocks[i]); 80 + if (IS_ERR(clk)) { 81 + dev_err(dev, "failed to get clock %s\n", 82 + data->clocks[i]); 83 + ret = PTR_ERR(clk); 84 + goto error; 85 + } 86 + 87 + ret = pm_clk_add_clk(dev, clk); 88 + if (ret) { 89 + dev_err(dev, "failed to add clock at index %d\n", i); 90 + clk_put(clk); 91 + goto error; 92 + } 93 + } 94 + 95 + return 0; 96 + 97 + error: 98 + pm_clk_destroy(dev); 99 + 100 + return ret; 101 + } 102 + 103 + static int gic_probe(struct platform_device *pdev) 104 + { 105 + struct device *dev = &pdev->dev; 106 + const struct gic_clk_data *data; 107 + struct gic_chip_data *gic; 108 + int ret, irq; 109 + 110 + data = of_device_get_match_data(&pdev->dev); 111 + if (!data) { 112 + dev_err(&pdev->dev, "no device match found\n"); 113 + return -ENODEV; 114 + } 115 + 116 + irq = irq_of_parse_and_map(dev->of_node, 0); 117 + if (!irq) { 118 + dev_err(dev, "no parent interrupt found!\n"); 119 + return -EINVAL; 120 + } 121 + 122 + ret = gic_get_clocks(dev, data); 123 + if (ret) 124 + goto irq_dispose; 125 + 126 + pm_runtime_enable(dev); 127 + 128 + ret = pm_runtime_get_sync(dev); 129 + if (ret < 0) 130 + goto rpm_disable; 131 + 132 + ret = gic_of_init_child(dev, &gic, irq); 133 + if (ret) 134 + goto rpm_put; 135 + 136 + platform_set_drvdata(pdev, gic); 137 + 138 + pm_runtime_put(dev); 139 + 140 + dev_info(dev, "GIC IRQ controller registered\n"); 141 + 142 + return 0; 143 + 144 + rpm_put: 145 + pm_runtime_put_sync(dev); 146 + rpm_disable: 147 + pm_runtime_disable(dev); 148 + pm_clk_destroy(dev); 149 + irq_dispose: 150 + irq_dispose_mapping(irq); 151 + 152 + return ret; 153 + } 154 + 155 + static const struct dev_pm_ops gic_pm_ops = { 156 + SET_RUNTIME_PM_OPS(gic_runtime_suspend, 157 + gic_runtime_resume, NULL) 158 + }; 159 + 160 + static const char * const gic400_clocks[] = { 161 + "clk", 162 + }; 163 + 164 + static const struct gic_clk_data gic400_data = { 165 + .num_clocks = ARRAY_SIZE(gic400_clocks), 166 + .clocks = gic400_clocks, 167 + }; 168 + 169 + static const struct of_device_id gic_match[] = { 170 + { .compatible = "nvidia,tegra210-agic", .data = &gic400_data }, 171 + {}, 172 + }; 173 + MODULE_DEVICE_TABLE(of, gic_match); 174 + 175 + static struct platform_driver gic_driver = { 176 + .probe = gic_probe, 177 + .driver = { 178 + .name = "gic", 179 + .of_match_table = gic_match, 180 + .pm = &gic_pm_ops, 181 + } 182 + }; 183 + 184 + builtin_platform_driver(gic_driver);
+1
drivers/irqchip/irq-gic-v2m.c
··· 24 24 #include <linux/of_pci.h> 25 25 #include <linux/slab.h> 26 26 #include <linux/spinlock.h> 27 + #include <linux/irqchip/arm-gic.h> 27 28 28 29 /* 29 30 * MSI_TYPER:
+255 -153
drivers/irqchip/irq-gic-v3-its.c
··· 56 56 }; 57 57 58 58 /* 59 - * The ITS_BASER structure - contains memory information and cached 60 - * value of BASER register configuration. 59 + * The ITS_BASER structure - contains memory information, cached 60 + * value of BASER register configuration and ITS page size. 61 61 */ 62 62 struct its_baser { 63 63 void *base; 64 64 u64 val; 65 65 u32 order; 66 + u32 psz; 66 67 }; 67 68 68 69 /* ··· 825 824 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", 826 825 }; 827 826 827 + static u64 its_read_baser(struct its_node *its, struct its_baser *baser) 828 + { 829 + u32 idx = baser - its->tables; 830 + 831 + return readq_relaxed(its->base + GITS_BASER + (idx << 3)); 832 + } 833 + 834 + static void its_write_baser(struct its_node *its, struct its_baser *baser, 835 + u64 val) 836 + { 837 + u32 idx = baser - its->tables; 838 + 839 + writeq_relaxed(val, its->base + GITS_BASER + (idx << 3)); 840 + baser->val = its_read_baser(its, baser); 841 + } 842 + 843 + static int its_setup_baser(struct its_node *its, struct its_baser *baser, 844 + u64 cache, u64 shr, u32 psz, u32 order, 845 + bool indirect) 846 + { 847 + u64 val = its_read_baser(its, baser); 848 + u64 esz = GITS_BASER_ENTRY_SIZE(val); 849 + u64 type = GITS_BASER_TYPE(val); 850 + u32 alloc_pages; 851 + void *base; 852 + u64 tmp; 853 + 854 + retry_alloc_baser: 855 + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 856 + if (alloc_pages > GITS_BASER_PAGES_MAX) { 857 + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", 858 + &its->phys_base, its_base_type_string[type], 859 + alloc_pages, GITS_BASER_PAGES_MAX); 860 + alloc_pages = GITS_BASER_PAGES_MAX; 861 + order = get_order(GITS_BASER_PAGES_MAX * psz); 862 + } 863 + 864 + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 865 + if (!base) 866 + return -ENOMEM; 867 + 868 + retry_baser: 869 + val = (virt_to_phys(base) | 870 + (type << GITS_BASER_TYPE_SHIFT) | 871 + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 872 + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | 873 + cache | 874 + shr | 875 + GITS_BASER_VALID); 876 + 877 + val |= indirect ? GITS_BASER_INDIRECT : 0x0; 878 + 879 + switch (psz) { 880 + case SZ_4K: 881 + val |= GITS_BASER_PAGE_SIZE_4K; 882 + break; 883 + case SZ_16K: 884 + val |= GITS_BASER_PAGE_SIZE_16K; 885 + break; 886 + case SZ_64K: 887 + val |= GITS_BASER_PAGE_SIZE_64K; 888 + break; 889 + } 890 + 891 + its_write_baser(its, baser, val); 892 + tmp = baser->val; 893 + 894 + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 895 + /* 896 + * Shareability didn't stick. Just use 897 + * whatever the read reported, which is likely 898 + * to be the only thing this redistributor 899 + * supports. If that's zero, make it 900 + * non-cacheable as well. 901 + */ 902 + shr = tmp & GITS_BASER_SHAREABILITY_MASK; 903 + if (!shr) { 904 + cache = GITS_BASER_nC; 905 + __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order)); 906 + } 907 + goto retry_baser; 908 + } 909 + 910 + if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 911 + /* 912 + * Page size didn't stick. Let's try a smaller 913 + * size and retry. If we reach 4K, then 914 + * something is horribly wrong... 915 + */ 916 + free_pages((unsigned long)base, order); 917 + baser->base = NULL; 918 + 919 + switch (psz) { 920 + case SZ_16K: 921 + psz = SZ_4K; 922 + goto retry_alloc_baser; 923 + case SZ_64K: 924 + psz = SZ_16K; 925 + goto retry_alloc_baser; 926 + } 927 + } 928 + 929 + if (val != tmp) { 930 + pr_err("ITS@%pa: %s doesn't stick: %lx %lx\n", 931 + &its->phys_base, its_base_type_string[type], 932 + (unsigned long) val, (unsigned long) tmp); 933 + free_pages((unsigned long)base, order); 934 + return -ENXIO; 935 + } 936 + 937 + baser->order = order; 938 + baser->base = base; 939 + baser->psz = psz; 940 + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; 941 + 942 + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", 943 + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / tmp), 944 + its_base_type_string[type], 945 + (unsigned long)virt_to_phys(base), 946 + indirect ? "indirect" : "flat", (int)esz, 947 + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 948 + 949 + return 0; 950 + } 951 + 952 + static bool its_parse_baser_device(struct its_node *its, struct its_baser *baser, 953 + u32 psz, u32 *order) 954 + { 955 + u64 esz = GITS_BASER_ENTRY_SIZE(its_read_baser(its, baser)); 956 + u64 val = GITS_BASER_InnerShareable | GITS_BASER_WaWb; 957 + u32 ids = its->device_ids; 958 + u32 new_order = *order; 959 + bool indirect = false; 960 + 961 + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ 962 + if ((esz << ids) > (psz * 2)) { 963 + /* 964 + * Find out whether hw supports a single or two-level table by 965 + * table by reading bit at offset '62' after writing '1' to it. 966 + */ 967 + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); 968 + indirect = !!(baser->val & GITS_BASER_INDIRECT); 969 + 970 + if (indirect) { 971 + /* 972 + * The size of the lvl2 table is equal to ITS page size 973 + * which is 'psz'. For computing lvl1 table size, 974 + * subtract ID bits that sparse lvl2 table from 'ids' 975 + * which is reported by ITS hardware times lvl1 table 976 + * entry size. 977 + */ 978 + ids -= ilog2(psz / esz); 979 + esz = GITS_LVL1_ENTRY_SIZE; 980 + } 981 + } 982 + 983 + /* 984 + * Allocate as many entries as required to fit the 985 + * range of device IDs that the ITS can grok... The ID 986 + * space being incredibly sparse, this results in a 987 + * massive waste of memory if two-level device table 988 + * feature is not supported by hardware. 989 + */ 990 + new_order = max_t(u32, get_order(esz << ids), new_order); 991 + if (new_order >= MAX_ORDER) { 992 + new_order = MAX_ORDER - 1; 993 + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / esz); 994 + pr_warn("ITS@%pa: Device Table too large, reduce ids %u->%u\n", 995 + &its->phys_base, its->device_ids, ids); 996 + } 997 + 998 + *order = new_order; 999 + 1000 + return indirect; 1001 + } 1002 + 828 1003 static void its_free_tables(struct its_node *its) 829 1004 { 830 1005 int i; ··· 1014 837 } 1015 838 } 1016 839 1017 - static int its_alloc_tables(const char *node_name, struct its_node *its) 840 + static int its_alloc_tables(struct its_node *its) 1018 841 { 1019 - int err; 1020 - int i; 1021 - int psz = SZ_64K; 842 + u64 typer = readq_relaxed(its->base + GITS_TYPER); 843 + u32 ids = GITS_TYPER_DEVBITS(typer); 1022 844 u64 shr = GITS_BASER_InnerShareable; 1023 - u64 cache; 1024 - u64 typer; 1025 - u32 ids; 845 + u64 cache = GITS_BASER_WaWb; 846 + u32 psz = SZ_64K; 847 + int err, i; 1026 848 1027 849 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { 1028 850 /* 1029 - * erratum 22375: only alloc 8MB table size 1030 - * erratum 24313: ignore memory access type 1031 - */ 1032 - cache = 0; 1033 - ids = 0x14; /* 20 bits, 8MB */ 1034 - } else { 1035 - cache = GITS_BASER_WaWb; 1036 - typer = readq_relaxed(its->base + GITS_TYPER); 1037 - ids = GITS_TYPER_DEVBITS(typer); 851 + * erratum 22375: only alloc 8MB table size 852 + * erratum 24313: ignore memory access type 853 + */ 854 + cache = GITS_BASER_nCnB; 855 + ids = 0x14; /* 20 bits, 8MB */ 1038 856 } 1039 857 1040 858 its->device_ids = ids; 1041 859 1042 860 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 1043 - u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); 861 + struct its_baser *baser = its->tables + i; 862 + u64 val = its_read_baser(its, baser); 1044 863 u64 type = GITS_BASER_TYPE(val); 1045 - u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 1046 - int order = get_order(psz); 1047 - int alloc_pages; 1048 - u64 tmp; 1049 - void *base; 864 + u32 order = get_order(psz); 865 + bool indirect = false; 1050 866 1051 867 if (type == GITS_BASER_TYPE_NONE) 1052 868 continue; 1053 869 1054 - /* 1055 - * Allocate as many entries as required to fit the 1056 - * range of device IDs that the ITS can grok... The ID 1057 - * space being incredibly sparse, this results in a 1058 - * massive waste of memory. 1059 - * 1060 - * For other tables, only allocate a single page. 1061 - */ 1062 - if (type == GITS_BASER_TYPE_DEVICE) { 1063 - /* 1064 - * 'order' was initialized earlier to the default page 1065 - * granule of the the ITS. We can't have an allocation 1066 - * smaller than that. If the requested allocation 1067 - * is smaller, round up to the default page granule. 1068 - */ 1069 - order = max(get_order((1UL << ids) * entry_size), 1070 - order); 1071 - if (order >= MAX_ORDER) { 1072 - order = MAX_ORDER - 1; 1073 - pr_warn("%s: Device Table too large, reduce its page order to %u\n", 1074 - node_name, order); 1075 - } 870 + if (type == GITS_BASER_TYPE_DEVICE) 871 + indirect = its_parse_baser_device(its, baser, psz, &order); 872 + 873 + err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); 874 + if (err < 0) { 875 + its_free_tables(its); 876 + return err; 1076 877 } 1077 878 1078 - retry_alloc_baser: 1079 - alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); 1080 - if (alloc_pages > GITS_BASER_PAGES_MAX) { 1081 - alloc_pages = GITS_BASER_PAGES_MAX; 1082 - order = get_order(GITS_BASER_PAGES_MAX * psz); 1083 - pr_warn("%s: Device Table too large, reduce its page order to %u (%u pages)\n", 1084 - node_name, order, alloc_pages); 1085 - } 1086 - 1087 - base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 1088 - if (!base) { 1089 - err = -ENOMEM; 1090 - goto out_free; 1091 - } 1092 - 1093 - its->tables[i].base = base; 1094 - its->tables[i].order = order; 1095 - 1096 - retry_baser: 1097 - val = (virt_to_phys(base) | 1098 - (type << GITS_BASER_TYPE_SHIFT) | 1099 - ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | 1100 - cache | 1101 - shr | 1102 - GITS_BASER_VALID); 1103 - 1104 - switch (psz) { 1105 - case SZ_4K: 1106 - val |= GITS_BASER_PAGE_SIZE_4K; 1107 - break; 1108 - case SZ_16K: 1109 - val |= GITS_BASER_PAGE_SIZE_16K; 1110 - break; 1111 - case SZ_64K: 1112 - val |= GITS_BASER_PAGE_SIZE_64K; 1113 - break; 1114 - } 1115 - 1116 - val |= alloc_pages - 1; 1117 - its->tables[i].val = val; 1118 - 1119 - writeq_relaxed(val, its->base + GITS_BASER + i * 8); 1120 - tmp = readq_relaxed(its->base + GITS_BASER + i * 8); 1121 - 1122 - if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { 1123 - /* 1124 - * Shareability didn't stick. Just use 1125 - * whatever the read reported, which is likely 1126 - * to be the only thing this redistributor 1127 - * supports. If that's zero, make it 1128 - * non-cacheable as well. 1129 - */ 1130 - shr = tmp & GITS_BASER_SHAREABILITY_MASK; 1131 - if (!shr) { 1132 - cache = GITS_BASER_nC; 1133 - __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order)); 1134 - } 1135 - goto retry_baser; 1136 - } 1137 - 1138 - if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { 1139 - /* 1140 - * Page size didn't stick. Let's try a smaller 1141 - * size and retry. If we reach 4K, then 1142 - * something is horribly wrong... 1143 - */ 1144 - free_pages((unsigned long)base, order); 1145 - its->tables[i].base = NULL; 1146 - 1147 - switch (psz) { 1148 - case SZ_16K: 1149 - psz = SZ_4K; 1150 - goto retry_alloc_baser; 1151 - case SZ_64K: 1152 - psz = SZ_16K; 1153 - goto retry_alloc_baser; 1154 - } 1155 - } 1156 - 1157 - if (val != tmp) { 1158 - pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", 1159 - node_name, i, 1160 - (unsigned long) val, (unsigned long) tmp); 1161 - err = -ENXIO; 1162 - goto out_free; 1163 - } 1164 - 1165 - pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 1166 - (int)(PAGE_ORDER_TO_SIZE(order) / entry_size), 1167 - its_base_type_string[type], 1168 - (unsigned long)virt_to_phys(base), 1169 - psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 879 + /* Update settings which will be used for next BASERn */ 880 + psz = baser->psz; 881 + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; 882 + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; 1170 883 } 1171 884 1172 885 return 0; 1173 - 1174 - out_free: 1175 - its_free_tables(its); 1176 - 1177 - return err; 1178 886 } 1179 887 1180 888 static int its_alloc_collections(struct its_node *its) ··· 1247 1185 return NULL; 1248 1186 } 1249 1187 1188 + static bool its_alloc_device_table(struct its_node *its, u32 dev_id) 1189 + { 1190 + struct its_baser *baser; 1191 + struct page *page; 1192 + u32 esz, idx; 1193 + __le64 *table; 1194 + 1195 + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 1196 + 1197 + /* Don't allow device id that exceeds ITS hardware limit */ 1198 + if (!baser) 1199 + return (ilog2(dev_id) < its->device_ids); 1200 + 1201 + /* Don't allow device id that exceeds single, flat table limit */ 1202 + esz = GITS_BASER_ENTRY_SIZE(baser->val); 1203 + if (!(baser->val & GITS_BASER_INDIRECT)) 1204 + return (dev_id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); 1205 + 1206 + /* Compute 1st level table index & check if that exceeds table limit */ 1207 + idx = dev_id >> ilog2(baser->psz / esz); 1208 + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) 1209 + return false; 1210 + 1211 + table = baser->base; 1212 + 1213 + /* Allocate memory for 2nd level table */ 1214 + if (!table[idx]) { 1215 + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); 1216 + if (!page) 1217 + return false; 1218 + 1219 + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ 1220 + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 1221 + __flush_dcache_area(page_address(page), baser->psz); 1222 + 1223 + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); 1224 + 1225 + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ 1226 + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) 1227 + __flush_dcache_area(table + idx, GITS_LVL1_ENTRY_SIZE); 1228 + 1229 + /* Ensure updated table contents are visible to ITS hardware */ 1230 + dsb(sy); 1231 + } 1232 + 1233 + return true; 1234 + } 1235 + 1250 1236 static struct its_device *its_create_device(struct its_node *its, u32 dev_id, 1251 1237 int nvecs) 1252 1238 { 1253 - struct its_baser *baser; 1254 1239 struct its_device *dev; 1255 1240 unsigned long *lpi_map; 1256 1241 unsigned long flags; ··· 1308 1199 int nr_ites; 1309 1200 int sz; 1310 1201 1311 - baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); 1312 - 1313 - /* Don't allow 'dev_id' that exceeds single, flat table limit */ 1314 - if (baser) { 1315 - if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) / 1316 - GITS_BASER_ENTRY_SIZE(baser->val))) 1317 - return NULL; 1318 - } else if (ilog2(dev_id) >= its->device_ids) 1202 + if (!its_alloc_device_table(its, dev_id)) 1319 1203 return NULL; 1320 1204 1321 1205 dev = kzalloc(sizeof(*dev), GFP_KERNEL); ··· 1671 1569 1672 1570 its_enable_quirks(its); 1673 1571 1674 - err = its_alloc_tables(node->full_name, its); 1572 + err = its_alloc_tables(its); 1675 1573 if (err) 1676 1574 goto out_free_cmd; 1677 1575
+96 -42
drivers/irqchip/irq-gic.c
··· 75 75 void __iomem *raw_dist_base; 76 76 void __iomem *raw_cpu_base; 77 77 u32 percpu_offset; 78 - #ifdef CONFIG_CPU_PM 78 + #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) 79 79 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 80 80 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; 81 81 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; ··· 449 449 } 450 450 451 451 452 - static void __init gic_dist_init(struct gic_chip_data *gic) 452 + static void gic_dist_init(struct gic_chip_data *gic) 453 453 { 454 454 unsigned int i; 455 455 u32 cpumask; ··· 528 528 return 0; 529 529 } 530 530 531 - #ifdef CONFIG_CPU_PM 531 + #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) 532 532 /* 533 533 * Saves the GIC distributor registers during suspend or idle. Must be called 534 534 * with interrupts disabled but before powering down the GIC. After calling 535 535 * this function, no interrupts will be delivered by the GIC, and another 536 536 * platform-specific wakeup source must be enabled. 537 537 */ 538 - static void gic_dist_save(struct gic_chip_data *gic) 538 + void gic_dist_save(struct gic_chip_data *gic) 539 539 { 540 540 unsigned int gic_irqs; 541 541 void __iomem *dist_base; ··· 574 574 * handled normally, but any edge interrupts that occured will not be seen by 575 575 * the GIC and need to be handled by the platform-specific wakeup source. 576 576 */ 577 - static void gic_dist_restore(struct gic_chip_data *gic) 577 + void gic_dist_restore(struct gic_chip_data *gic) 578 578 { 579 579 unsigned int gic_irqs; 580 580 unsigned int i; ··· 620 620 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 621 621 } 622 622 623 - static void gic_cpu_save(struct gic_chip_data *gic) 623 + void gic_cpu_save(struct gic_chip_data *gic) 624 624 { 625 625 int i; 626 626 u32 *ptr; ··· 650 650 651 651 } 652 652 653 - static void gic_cpu_restore(struct gic_chip_data *gic) 653 + void gic_cpu_restore(struct gic_chip_data *gic) 654 654 { 655 655 int i; 656 656 u32 *ptr; ··· 727 727 .notifier_call = gic_notifier, 728 728 }; 729 729 730 - static int __init gic_pm_init(struct gic_chip_data *gic) 730 + static int gic_pm_init(struct gic_chip_data *gic) 731 731 { 732 732 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 733 733 sizeof(u32)); ··· 757 757 return -ENOMEM; 758 758 } 759 759 #else 760 - static int __init gic_pm_init(struct gic_chip_data *gic) 760 + static int gic_pm_init(struct gic_chip_data *gic) 761 761 { 762 762 return 0; 763 763 } ··· 1032 1032 .unmap = gic_irq_domain_unmap, 1033 1033 }; 1034 1034 1035 - static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, 1036 - struct fwnode_handle *handle) 1035 + static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, 1036 + const char *name, bool use_eoimode1) 1037 1037 { 1038 - irq_hw_number_t hwirq_base; 1039 - int gic_irqs, irq_base, i, ret; 1040 - 1041 - if (WARN_ON(!gic || gic->domain)) 1042 - return -EINVAL; 1043 - 1044 1038 /* Initialize irq_chip */ 1045 1039 gic->chip = gic_chip; 1040 + gic->chip.name = name; 1041 + gic->chip.parent_device = dev; 1046 1042 1047 - if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) { 1043 + if (use_eoimode1) { 1048 1044 gic->chip.irq_mask = gic_eoimode1_mask_irq; 1049 1045 gic->chip.irq_eoi = gic_eoimode1_eoi_irq; 1050 1046 gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; 1051 - gic->chip.name = kasprintf(GFP_KERNEL, "GICv2"); 1052 - } else { 1053 - gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", 1054 - (int)(gic - &gic_data[0])); 1055 1047 } 1056 1048 1057 1049 #ifdef CONFIG_SMP 1058 1050 if (gic == &gic_data[0]) 1059 1051 gic->chip.irq_set_affinity = gic_set_affinity; 1060 1052 #endif 1053 + } 1054 + 1055 + static int gic_init_bases(struct gic_chip_data *gic, int irq_start, 1056 + struct fwnode_handle *handle) 1057 + { 1058 + irq_hw_number_t hwirq_base; 1059 + int gic_irqs, irq_base, ret; 1061 1060 1062 1061 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { 1063 1062 /* Frankein-GIC without banked registers... */ ··· 1137 1138 goto error; 1138 1139 } 1139 1140 1140 - if (gic == &gic_data[0]) { 1141 - /* 1142 - * Initialize the CPU interface map to all CPUs. 1143 - * It will be refined as each CPU probes its ID. 1144 - * This is only necessary for the primary GIC. 1145 - */ 1146 - for (i = 0; i < NR_GIC_CPU_IF; i++) 1147 - gic_cpu_map[i] = 0xff; 1148 - #ifdef CONFIG_SMP 1149 - set_smp_cross_call(gic_raise_softirq); 1150 - register_cpu_notifier(&gic_cpu_notifier); 1151 - #endif 1152 - set_handle_irq(gic_handle_irq); 1153 - if (static_key_true(&supports_deactivate)) 1154 - pr_info("GIC: Using split EOI/Deactivate mode\n"); 1155 - } 1156 - 1157 1141 gic_dist_init(gic); 1158 1142 ret = gic_cpu_init(gic); 1159 1143 if (ret) ··· 1154 1172 free_percpu(gic->cpu_base.percpu_base); 1155 1173 } 1156 1174 1157 - kfree(gic->chip.name); 1175 + return ret; 1176 + } 1177 + 1178 + static int __init __gic_init_bases(struct gic_chip_data *gic, 1179 + int irq_start, 1180 + struct fwnode_handle *handle) 1181 + { 1182 + char *name; 1183 + int i, ret; 1184 + 1185 + if (WARN_ON(!gic || gic->domain)) 1186 + return -EINVAL; 1187 + 1188 + if (gic == &gic_data[0]) { 1189 + /* 1190 + * Initialize the CPU interface map to all CPUs. 1191 + * It will be refined as each CPU probes its ID. 1192 + * This is only necessary for the primary GIC. 1193 + */ 1194 + for (i = 0; i < NR_GIC_CPU_IF; i++) 1195 + gic_cpu_map[i] = 0xff; 1196 + #ifdef CONFIG_SMP 1197 + set_smp_cross_call(gic_raise_softirq); 1198 + register_cpu_notifier(&gic_cpu_notifier); 1199 + #endif 1200 + set_handle_irq(gic_handle_irq); 1201 + if (static_key_true(&supports_deactivate)) 1202 + pr_info("GIC: Using split EOI/Deactivate mode\n"); 1203 + } 1204 + 1205 + if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) { 1206 + name = kasprintf(GFP_KERNEL, "GICv2"); 1207 + gic_init_chip(gic, NULL, name, true); 1208 + } else { 1209 + name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0])); 1210 + gic_init_chip(gic, NULL, name, false); 1211 + } 1212 + 1213 + ret = gic_init_bases(gic, irq_start, handle); 1214 + if (ret) 1215 + kfree(name); 1158 1216 1159 1217 return ret; 1160 1218 } ··· 1272 1250 return true; 1273 1251 } 1274 1252 1275 - static int __init gic_of_setup(struct gic_chip_data *gic, struct device_node *node) 1253 + static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) 1276 1254 { 1277 1255 if (!gic || !node) 1278 1256 return -EINVAL; ··· 1294 1272 gic_teardown(gic); 1295 1273 1296 1274 return -ENOMEM; 1275 + } 1276 + 1277 + int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) 1278 + { 1279 + int ret; 1280 + 1281 + if (!dev || !dev->of_node || !gic || !irq) 1282 + return -EINVAL; 1283 + 1284 + *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL); 1285 + if (!*gic) 1286 + return -ENOMEM; 1287 + 1288 + gic_init_chip(*gic, dev, dev->of_node->name, false); 1289 + 1290 + ret = gic_of_setup(*gic, dev->of_node); 1291 + if (ret) 1292 + return ret; 1293 + 1294 + ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); 1295 + if (ret) { 1296 + gic_teardown(*gic); 1297 + return ret; 1298 + } 1299 + 1300 + irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic); 1301 + 1302 + return 0; 1297 1303 } 1298 1304 1299 1305 static void __init gic_of_setup_kvm_info(struct device_node *node) ··· 1403 1353 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1404 1354 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1405 1355 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); 1406 - 1356 + #else 1357 + int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) 1358 + { 1359 + return -ENOTSUPP; 1360 + } 1407 1361 #endif 1408 1362 1409 1363 #ifdef CONFIG_ACPI
+3
drivers/irqchip/irq-mips-gic.c
··· 1042 1042 &gic_irq_domain_ops, NULL); 1043 1043 if (!gic_irq_domain) 1044 1044 panic("Failed to add GIC IRQ domain"); 1045 + gic_irq_domain->name = "mips-gic-irq"; 1045 1046 1046 1047 gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0, 1047 1048 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 1048 1049 node, &gic_dev_domain_ops, NULL); 1049 1050 if (!gic_dev_domain) 1050 1051 panic("Failed to add GIC DEV domain"); 1052 + gic_dev_domain->name = "mips-gic-dev"; 1051 1053 1052 1054 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 1053 1055 IRQ_DOMAIN_FLAG_IPI_PER_CPU, ··· 1058 1056 if (!gic_ipi_domain) 1059 1057 panic("Failed to add GIC IPI domain"); 1060 1058 1059 + gic_ipi_domain->name = "mips-gic-ipi"; 1061 1060 gic_ipi_domain->bus_token = DOMAIN_BUS_IPI; 1062 1061 1063 1062 if (node &&
+2
drivers/irqchip/irq-omap-intc.c
··· 23 23 #include <linux/of_address.h> 24 24 #include <linux/of_irq.h> 25 25 26 + #include <linux/irqchip/irq-omap-intc.h> 27 + 26 28 /* Define these here for now until we drop all board-files */ 27 29 #define OMAP24XX_IC_BASE 0x480fe000 28 30 #define OMAP34XX_IC_BASE 0x48200000
+18 -18
drivers/irqchip/irq-s3c24xx.c
··· 92 92 unsigned long mask; 93 93 unsigned int irqno; 94 94 95 - mask = __raw_readl(intc->reg_mask); 95 + mask = readl_relaxed(intc->reg_mask); 96 96 mask |= (1UL << irq_data->offset); 97 - __raw_writel(mask, intc->reg_mask); 97 + writel_relaxed(mask, intc->reg_mask); 98 98 99 99 if (parent_intc) { 100 100 parent_data = &parent_intc->irqs[irq_data->parent_irq]; ··· 119 119 unsigned long mask; 120 120 unsigned int irqno; 121 121 122 - mask = __raw_readl(intc->reg_mask); 122 + mask = readl_relaxed(intc->reg_mask); 123 123 mask &= ~(1UL << irq_data->offset); 124 - __raw_writel(mask, intc->reg_mask); 124 + writel_relaxed(mask, intc->reg_mask); 125 125 126 126 if (parent_intc) { 127 127 irqno = irq_find_mapping(parent_intc->domain, ··· 136 136 struct s3c_irq_intc *intc = irq_data->intc; 137 137 unsigned long bitval = 1UL << irq_data->offset; 138 138 139 - __raw_writel(bitval, intc->reg_pending); 139 + writel_relaxed(bitval, intc->reg_pending); 140 140 if (intc->reg_intpnd) 141 - __raw_writel(bitval, intc->reg_intpnd); 141 + writel_relaxed(bitval, intc->reg_intpnd); 142 142 } 143 143 144 144 static int s3c_irq_type(struct irq_data *data, unsigned int type) ··· 172 172 unsigned long newvalue = 0, value; 173 173 174 174 /* Set the GPIO to external interrupt mode */ 175 - value = __raw_readl(gpcon_reg); 175 + value = readl_relaxed(gpcon_reg); 176 176 value = (value & ~(3 << gpcon_offset)) | (0x02 << gpcon_offset); 177 - __raw_writel(value, gpcon_reg); 177 + writel_relaxed(value, gpcon_reg); 178 178 179 179 /* Set the external interrupt to pointed trigger type */ 180 180 switch (type) ··· 208 208 return -EINVAL; 209 209 } 210 210 211 - value = __raw_readl(extint_reg); 211 + value = readl_relaxed(extint_reg); 212 212 value = (value & ~(7 << extint_offset)) | (newvalue << extint_offset); 213 - __raw_writel(value, extint_reg); 213 + writel_relaxed(value, extint_reg); 214 214 215 215 return 0; 216 216 } ··· 315 315 316 316 chained_irq_enter(chip, desc); 317 317 318 - src = __raw_readl(sub_intc->reg_pending); 319 - msk = __raw_readl(sub_intc->reg_mask); 318 + src = readl_relaxed(sub_intc->reg_pending); 319 + msk = readl_relaxed(sub_intc->reg_mask); 320 320 321 321 src &= ~msk; 322 322 src &= irq_data->sub_bits; ··· 337 337 int pnd; 338 338 int offset; 339 339 340 - pnd = __raw_readl(intc->reg_intpnd); 340 + pnd = readl_relaxed(intc->reg_intpnd); 341 341 if (!pnd) 342 342 return false; 343 343 ··· 352 352 * 353 353 * Thanks to Klaus, Shannon, et al for helping to debug this problem 354 354 */ 355 - offset = __raw_readl(intc->reg_intpnd + 4); 355 + offset = readl_relaxed(intc->reg_intpnd + 4); 356 356 357 357 /* Find the bit manually, when the offset is wrong. 358 358 * The pending register only ever contains the one bit of the next ··· 406 406 intmod = 0; 407 407 } 408 408 409 - __raw_writel(intmod, S3C2410_INTMOD); 409 + writel_relaxed(intmod, S3C2410_INTMOD); 410 410 return 0; 411 411 } 412 412 ··· 508 508 509 509 last = 0; 510 510 for (i = 0; i < 4; i++) { 511 - pend = __raw_readl(reg_source); 511 + pend = readl_relaxed(reg_source); 512 512 513 513 if (pend == 0 || pend == last) 514 514 break; 515 515 516 - __raw_writel(pend, intc->reg_pending); 516 + writel_relaxed(pend, intc->reg_pending); 517 517 if (intc->reg_intpnd) 518 - __raw_writel(pend, intc->reg_intpnd); 518 + writel_relaxed(pend, intc->reg_intpnd); 519 519 520 520 pr_info("irq: clearing pending status %08x\n", (int)pend); 521 521 last = pend;
+8 -3
drivers/irqchip/irq-sirfsoc.c
··· 29 29 30 30 static struct irq_domain *sirfsoc_irqdomain; 31 31 32 + static void __iomem *sirfsoc_irq_get_regbase(void) 33 + { 34 + return (void __iomem __force *)sirfsoc_irqdomain->host_data; 35 + } 36 + 32 37 static __init void sirfsoc_alloc_gc(void __iomem *base) 33 38 { 34 39 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; ··· 58 53 59 54 static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) 60 55 { 61 - void __iomem *base = sirfsoc_irqdomain->host_data; 56 + void __iomem *base = sirfsoc_irq_get_regbase(); 62 57 u32 irqstat; 63 58 64 59 irqstat = readl_relaxed(base + SIRFSOC_INIT_IRQ_ID); ··· 99 94 100 95 static int sirfsoc_irq_suspend(void) 101 96 { 102 - void __iomem *base = sirfsoc_irqdomain->host_data; 97 + void __iomem *base = sirfsoc_irq_get_regbase(); 103 98 104 99 sirfsoc_irq_st.mask0 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK0); 105 100 sirfsoc_irq_st.mask1 = readl_relaxed(base + SIRFSOC_INT_RISC_MASK1); ··· 111 106 112 107 static void sirfsoc_irq_resume(void) 113 108 { 114 - void __iomem *base = sirfsoc_irqdomain->host_data; 109 + void __iomem *base = sirfsoc_irq_get_regbase(); 115 110 116 111 writel_relaxed(sirfsoc_irq_st.mask0, base + SIRFSOC_INT_RISC_MASK0); 117 112 writel_relaxed(sirfsoc_irq_st.mask1, base + SIRFSOC_INT_RISC_MASK1);
+2 -2
drivers/irqchip/irq-tegra.c
··· 90 90 91 91 static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg) 92 92 { 93 - void __iomem *base = d->chip_data; 93 + void __iomem *base = (void __iomem __force *)d->chip_data; 94 94 u32 mask; 95 95 96 96 mask = BIT(d->hwirq % 32); ··· 266 266 267 267 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 268 268 &tegra_ictlr_chip, 269 - info->base[ictlr]); 269 + (void __force *)info->base[ictlr]); 270 270 } 271 271 272 272 parent_fwspec = *fwspec;
+3 -2
drivers/irqchip/irq-vic.c
··· 167 167 return 0; 168 168 } 169 169 170 - struct syscore_ops vic_syscore_ops = { 170 + static struct syscore_ops vic_syscore_ops = { 171 171 .suspend = vic_suspend, 172 172 .resume = vic_resume, 173 173 }; ··· 517 517 EXPORT_SYMBOL_GPL(vic_init_cascaded); 518 518 519 519 #ifdef CONFIG_OF 520 - int __init vic_of_init(struct device_node *node, struct device_node *parent) 520 + static int __init vic_of_init(struct device_node *node, 521 + struct device_node *parent) 521 522 { 522 523 void __iomem *regs; 523 524 u32 interrupt_mask = ~0;
+8
include/linux/interrupt.h
··· 278 278 extern int 279 279 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 280 280 281 + struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); 282 + 281 283 #else /* CONFIG_SMP */ 282 284 283 285 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) ··· 309 307 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 310 308 { 311 309 return 0; 310 + } 311 + 312 + static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 313 + { 314 + *nr_vecs = 1; 315 + return NULL; 312 316 } 313 317 #endif /* CONFIG_SMP */ 314 318
+14 -2
include/linux/irq.h
··· 197 197 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 198 198 * IRQD_WAKEUP_ARMED - Wakeup mode armed 199 199 * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU 200 + * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel 200 201 */ 201 202 enum { 202 203 IRQD_TRIGGER_MASK = 0xf, ··· 213 212 IRQD_IRQ_INPROGRESS = (1 << 18), 214 213 IRQD_WAKEUP_ARMED = (1 << 19), 215 214 IRQD_FORWARDED_TO_VCPU = (1 << 20), 215 + IRQD_AFFINITY_MANAGED = (1 << 21), 216 216 }; 217 217 218 218 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) ··· 307 305 __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; 308 306 } 309 307 308 + static inline bool irqd_affinity_is_managed(struct irq_data *d) 309 + { 310 + return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 311 + } 312 + 310 313 #undef __irqd_to_state 311 314 312 315 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) ··· 322 315 /** 323 316 * struct irq_chip - hardware interrupt chip descriptor 324 317 * 318 + * @parent_device: pointer to parent device for irqchip 325 319 * @name: name for /proc/interrupts 326 320 * @irq_startup: start up the interrupt (defaults to ->enable if NULL) 327 321 * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) ··· 362 354 * @flags: chip specific flags 363 355 */ 364 356 struct irq_chip { 357 + struct device *parent_device; 365 358 const char *name; 366 359 unsigned int (*irq_startup)(struct irq_data *data); 367 360 void (*irq_shutdown)(struct irq_data *data); ··· 491 482 extern void handle_edge_irq(struct irq_desc *desc); 492 483 extern void handle_edge_eoi_irq(struct irq_desc *desc); 493 484 extern void handle_simple_irq(struct irq_desc *desc); 485 + extern void handle_untracked_irq(struct irq_desc *desc); 494 486 extern void handle_percpu_irq(struct irq_desc *desc); 495 487 extern void handle_percpu_devid_irq(struct irq_desc *desc); 496 488 extern void handle_bad_irq(struct irq_desc *desc); 497 489 extern void handle_nested_irq(unsigned int irq); 498 490 499 491 extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); 492 + extern int irq_chip_pm_get(struct irq_data *data); 493 + extern int irq_chip_pm_put(struct irq_data *data); 500 494 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 501 495 extern void irq_chip_enable_parent(struct irq_data *data); 502 496 extern void irq_chip_disable_parent(struct irq_data *data); ··· 713 701 unsigned int arch_dynirq_lower_bound(unsigned int from); 714 702 715 703 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 716 - struct module *owner); 704 + struct module *owner, const struct cpumask *affinity); 717 705 718 706 /* use macros to avoid needing export.h for THIS_MODULE */ 719 707 #define irq_alloc_descs(irq, from, cnt, node) \ 720 - __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE) 708 + __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) 721 709 722 710 #define irq_alloc_desc(node) \ 723 711 irq_alloc_descs(-1, 0, 1, node)
+4
include/linux/irqchip/arm-gic-v3.h
··· 204 204 #define GITS_BASER_NR_REGS 8 205 205 206 206 #define GITS_BASER_VALID (1UL << 63) 207 + #define GITS_BASER_INDIRECT (1UL << 62) 207 208 #define GITS_BASER_nCnB (0UL << 59) 208 209 #define GITS_BASER_nC (1UL << 59) 209 210 #define GITS_BASER_RaWt (2UL << 59) ··· 229 228 #define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) 230 229 #define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) 231 230 #define GITS_BASER_PAGES_MAX 256 231 + #define GITS_BASER_PAGES_SHIFT (0) 232 232 233 233 #define GITS_BASER_TYPE_NONE 0 234 234 #define GITS_BASER_TYPE_DEVICE 1 ··· 239 237 #define GITS_BASER_TYPE_RESERVED5 5 240 238 #define GITS_BASER_TYPE_RESERVED6 6 241 239 #define GITS_BASER_TYPE_RESERVED7 7 240 + 241 + #define GITS_LVL1_ENTRY_SIZE (8UL) 242 242 243 243 /* 244 244 * ITS commands
+11
include/linux/irqchip/arm-gic.h
··· 101 101 #include <linux/irqdomain.h> 102 102 103 103 struct device_node; 104 + struct gic_chip_data; 104 105 105 106 void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); 106 107 int gic_cpu_if_down(unsigned int gic_nr); 108 + void gic_cpu_save(struct gic_chip_data *gic); 109 + void gic_cpu_restore(struct gic_chip_data *gic); 110 + void gic_dist_save(struct gic_chip_data *gic); 111 + void gic_dist_restore(struct gic_chip_data *gic); 107 112 108 113 /* 109 114 * Subdrivers that need some preparatory work can initialize their 110 115 * chips and call this to register their GICs. 111 116 */ 112 117 int gic_of_init(struct device_node *node, struct device_node *parent); 118 + 119 + /* 120 + * Initialises and registers a non-root or child GIC chip. Memory for 121 + * the gic_chip_data structure is dynamically allocated. 122 + */ 123 + int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); 113 124 114 125 /* 115 126 * Legacy platforms not converted to DT yet must use this to init
+9 -3
include/linux/irqdomain.h
··· 39 39 struct of_device_id; 40 40 struct irq_chip; 41 41 struct irq_data; 42 + struct cpumask; 42 43 43 44 /* Number of irqs reserved for a legacy isa controller */ 44 45 #define NUM_ISA_INTERRUPTS 16 ··· 218 217 enum irq_domain_bus_token bus_token); 219 218 extern void irq_set_default_host(struct irq_domain *host); 220 219 extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 221 - irq_hw_number_t hwirq, int node); 220 + irq_hw_number_t hwirq, int node, 221 + const struct cpumask *affinity); 222 222 223 223 static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node) 224 224 { ··· 391 389 392 390 extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 393 391 unsigned int nr_irqs, int node, void *arg, 394 - bool realloc); 392 + bool realloc, const struct cpumask *affinity); 395 393 extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); 396 394 extern void irq_domain_activate_irq(struct irq_data *irq_data); 397 395 extern void irq_domain_deactivate_irq(struct irq_data *irq_data); ··· 399 397 static inline int irq_domain_alloc_irqs(struct irq_domain *domain, 400 398 unsigned int nr_irqs, int node, void *arg) 401 399 { 402 - return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); 400 + return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false, 401 + NULL); 403 402 } 404 403 405 404 extern int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, ··· 454 451 { 455 452 return -1; 456 453 } 454 + 455 + static inline void irq_domain_free_irqs(unsigned int virq, 456 + unsigned int nr_irqs) { } 457 457 458 458 static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) 459 459 {
+4 -4
include/linux/msi.h
··· 47 47 * @nvec_used: The number of vectors used 48 48 * @dev: Pointer to the device which uses this descriptor 49 49 * @msg: The last set MSI message cached for reuse 50 + * @affinity: Optional pointer to a cpu affinity mask for this descriptor 50 51 * 51 52 * @masked: [PCI MSI/X] Mask bits 52 53 * @is_msix: [PCI MSI/X] True if MSI-X ··· 68 67 unsigned int nvec_used; 69 68 struct device *dev; 70 69 struct msi_msg msg; 70 + const struct cpumask *affinity; 71 71 72 72 union { 73 73 /* PCI MSI/X specific data */ ··· 266 264 * callbacks. 267 265 */ 268 266 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), 269 - /* Build identity map between hwirq and irq */ 270 - MSI_FLAG_IDENTITY_MAP = (1 << 2), 271 267 /* Support multiple PCI MSI interrupts */ 272 - MSI_FLAG_MULTI_PCI_MSI = (1 << 3), 268 + MSI_FLAG_MULTI_PCI_MSI = (1 << 2), 273 269 /* Support PCI MSIX interrupts */ 274 - MSI_FLAG_PCI_MSIX = (1 << 4), 270 + MSI_FLAG_PCI_MSIX = (1 << 3), 275 271 }; 276 272 277 273 int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
+1
kernel/irq/Makefile
··· 9 9 obj-$(CONFIG_PM_SLEEP) += pm.o 10 10 obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o 11 11 obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o 12 + obj-$(CONFIG_SMP) += affinity.o
+61
kernel/irq/affinity.c
··· 1 + 2 + #include <linux/interrupt.h> 3 + #include <linux/kernel.h> 4 + #include <linux/slab.h> 5 + #include <linux/cpu.h> 6 + 7 + static int get_first_sibling(unsigned int cpu) 8 + { 9 + unsigned int ret; 10 + 11 + ret = cpumask_first(topology_sibling_cpumask(cpu)); 12 + if (ret < nr_cpu_ids) 13 + return ret; 14 + return cpu; 15 + } 16 + 17 + /* 18 + * Take a map of online CPUs and the number of available interrupt vectors 19 + * and generate an output cpumask suitable for spreading MSI/MSI-X vectors 20 + * so that they are distributed as good as possible around the CPUs. If 21 + * more vectors than CPUs are available we'll map one to each CPU, 22 + * otherwise we map one to the first sibling of each socket. 23 + * 24 + * If there are more vectors than CPUs we will still only have one bit 25 + * set per CPU, but interrupt code will keep on assigning the vectors from 26 + * the start of the bitmap until we run out of vectors. 27 + */ 28 + struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 29 + { 30 + struct cpumask *affinity_mask; 31 + unsigned int max_vecs = *nr_vecs; 32 + 33 + if (max_vecs == 1) 34 + return NULL; 35 + 36 + affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL); 37 + if (!affinity_mask) { 38 + *nr_vecs = 1; 39 + return NULL; 40 + } 41 + 42 + if (max_vecs >= num_online_cpus()) { 43 + cpumask_copy(affinity_mask, cpu_online_mask); 44 + *nr_vecs = num_online_cpus(); 45 + } else { 46 + unsigned int vecs = 0, cpu; 47 + 48 + for_each_online_cpu(cpu) { 49 + if (cpu == get_first_sibling(cpu)) { 50 + cpumask_set_cpu(cpu, affinity_mask); 51 + vecs++; 52 + } 53 + 54 + if (--max_vecs == 0) 55 + break; 56 + } 57 + *nr_vecs = vecs; 58 + } 59 + 60 + return affinity_mask; 61 + }
+83
kernel/irq/chip.c
··· 426 426 } 427 427 EXPORT_SYMBOL_GPL(handle_simple_irq); 428 428 429 + /** 430 + * handle_untracked_irq - Simple and software-decoded IRQs. 431 + * @desc: the interrupt description structure for this irq 432 + * 433 + * Untracked interrupts are sent from a demultiplexing interrupt 434 + * handler when the demultiplexer does not know which device it its 435 + * multiplexed irq domain generated the interrupt. IRQ's handled 436 + * through here are not subjected to stats tracking, randomness, or 437 + * spurious interrupt detection. 438 + * 439 + * Note: Like handle_simple_irq, the caller is expected to handle 440 + * the ack, clear, mask and unmask issues if necessary. 441 + */ 442 + void handle_untracked_irq(struct irq_desc *desc) 443 + { 444 + unsigned int flags = 0; 445 + 446 + raw_spin_lock(&desc->lock); 447 + 448 + if (!irq_may_run(desc)) 449 + goto out_unlock; 450 + 451 + desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 452 + 453 + if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 454 + desc->istate |= IRQS_PENDING; 455 + goto out_unlock; 456 + } 457 + 458 + desc->istate &= ~IRQS_PENDING; 459 + irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 460 + raw_spin_unlock(&desc->lock); 461 + 462 + __handle_irq_event_percpu(desc, &flags); 463 + 464 + raw_spin_lock(&desc->lock); 465 + irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 466 + 467 + out_unlock: 468 + raw_spin_unlock(&desc->lock); 469 + } 470 + EXPORT_SYMBOL_GPL(handle_untracked_irq); 471 + 429 472 /* 430 473 * Called unconditionally from handle_level_irq() and only for oneshot 431 474 * interrupts from handle_fasteoi_irq() ··· 1135 1092 pos->chip->irq_compose_msi_msg(pos, msg); 1136 1093 1137 1094 return 0; 1095 + } 1096 + 1097 + /** 1098 + * irq_chip_pm_get - Enable power for an IRQ chip 1099 + * @data: Pointer to interrupt specific data 1100 + * 1101 + * Enable the power to the IRQ chip referenced by the interrupt data 1102 + * structure. 1103 + */ 1104 + int irq_chip_pm_get(struct irq_data *data) 1105 + { 1106 + int retval; 1107 + 1108 + if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { 1109 + retval = pm_runtime_get_sync(data->chip->parent_device); 1110 + if (retval < 0) { 1111 + pm_runtime_put_noidle(data->chip->parent_device); 1112 + return retval; 1113 + } 1114 + } 1115 + 1116 + return 0; 1117 + } 1118 + 1119 + /** 1120 + * irq_chip_pm_put - Disable power for an IRQ chip 1121 + * @data: Pointer to interrupt specific data 1122 + * 1123 + * Disable the power to the IRQ chip referenced by the interrupt data 1124 + * structure, belongs. Note that power will only be disabled, once this 1125 + * function has been called for all IRQs that have called irq_chip_pm_get(). 1126 + */ 1127 + int irq_chip_pm_put(struct irq_data *data) 1128 + { 1129 + int retval = 0; 1130 + 1131 + if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) 1132 + retval = pm_runtime_put(data->chip->parent_device); 1133 + 1134 + return (retval < 0) ? retval : 0; 1138 1135 }
+14 -4
kernel/irq/handle.c
··· 132 132 wake_up_process(action->thread); 133 133 } 134 134 135 - irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) 135 + irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags) 136 136 { 137 137 irqreturn_t retval = IRQ_NONE; 138 - unsigned int flags = 0, irq = desc->irq_data.irq; 138 + unsigned int irq = desc->irq_data.irq; 139 139 struct irqaction *action; 140 140 141 141 for_each_action_of_desc(desc, action) { ··· 164 164 165 165 /* Fall through to add to randomness */ 166 166 case IRQ_HANDLED: 167 - flags |= action->flags; 167 + *flags |= action->flags; 168 168 break; 169 169 170 170 default: ··· 174 174 retval |= res; 175 175 } 176 176 177 - add_interrupt_randomness(irq, flags); 177 + return retval; 178 + } 179 + 180 + irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) 181 + { 182 + irqreturn_t retval; 183 + unsigned int flags = 0; 184 + 185 + retval = __handle_irq_event_percpu(desc, &flags); 186 + 187 + add_interrupt_randomness(desc->irq_data.irq, flags); 178 188 179 189 if (!noirqdebug) 180 190 note_interrupt(desc, retval);
+4
kernel/irq/internals.h
··· 7 7 */ 8 8 #include <linux/irqdesc.h> 9 9 #include <linux/kernel_stat.h> 10 + #include <linux/pm_runtime.h> 10 11 11 12 #ifdef CONFIG_SPARSE_IRQ 12 13 # define IRQ_BITMAP_BITS (NR_IRQS + 8196) ··· 84 83 85 84 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 86 85 86 + irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags); 87 87 irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); 88 88 irqreturn_t handle_irq_event(struct irq_desc *desc); 89 89 ··· 106 104 static inline void unregister_handler_proc(unsigned int irq, 107 105 struct irqaction *action) { } 108 106 #endif 107 + 108 + extern bool irq_can_set_affinity_usr(unsigned int irq); 109 109 110 110 extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); 111 111
+2 -2
kernel/irq/ipi.c
··· 76 76 } 77 77 } 78 78 79 - virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE); 79 + virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL); 80 80 if (virq <= 0) { 81 81 pr_warn("Can't reserve IPI, failed to alloc descs\n"); 82 82 return -ENOMEM; 83 83 } 84 84 85 85 virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE, 86 - (void *) dest, true); 86 + (void *) dest, true, NULL); 87 87 88 88 if (virq <= 0) { 89 89 pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
+47 -16
kernel/irq/irqdesc.c
··· 68 68 return 0; 69 69 } 70 70 71 - static void desc_smp_init(struct irq_desc *desc, int node) 71 + static void desc_smp_init(struct irq_desc *desc, int node, 72 + const struct cpumask *affinity) 72 73 { 73 - cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); 74 + if (!affinity) 75 + affinity = irq_default_affinity; 76 + cpumask_copy(desc->irq_common_data.affinity, affinity); 77 + 74 78 #ifdef CONFIG_GENERIC_PENDING_IRQ 75 79 cpumask_clear(desc->pending_mask); 76 80 #endif ··· 86 82 #else 87 83 static inline int 88 84 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } 89 - static inline void desc_smp_init(struct irq_desc *desc, int node) { } 85 + static inline void 86 + desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } 90 87 #endif 91 88 92 89 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, 93 - struct module *owner) 90 + const struct cpumask *affinity, struct module *owner) 94 91 { 95 92 int cpu; 96 93 ··· 112 107 desc->owner = owner; 113 108 for_each_possible_cpu(cpu) 114 109 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; 115 - desc_smp_init(desc, node); 110 + desc_smp_init(desc, node, affinity); 116 111 } 117 112 118 113 int nr_irqs = NR_IRQS; ··· 163 158 mutex_unlock(&sparse_irq_lock); 164 159 } 165 160 166 - static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) 161 + static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, 162 + const struct cpumask *affinity, 163 + struct module *owner) 167 164 { 168 165 struct irq_desc *desc; 169 166 gfp_t gfp = GFP_KERNEL; ··· 185 178 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 186 179 init_rcu_head(&desc->rcu); 187 180 188 - desc_set_defaults(irq, desc, node, owner); 181 + desc_set_defaults(irq, desc, node, affinity, owner); 182 + irqd_set(&desc->irq_data, flags); 189 183 190 184 return desc; 191 185 ··· 231 223 } 232 224 233 225 static int alloc_descs(unsigned int start, unsigned int cnt, int node, 234 - struct module *owner) 226 + const struct cpumask *affinity, struct module *owner) 235 227 { 228 + const struct cpumask *mask = NULL; 236 229 struct irq_desc *desc; 237 - int i; 230 + unsigned int flags; 231 + int i, cpu = -1; 232 + 233 + if (affinity && cpumask_empty(affinity)) 234 + return -EINVAL; 235 + 236 + flags = affinity ? IRQD_AFFINITY_MANAGED : 0; 238 237 239 238 for (i = 0; i < cnt; i++) { 240 - desc = alloc_desc(start + i, node, owner); 239 + if (affinity) { 240 + cpu = cpumask_next(cpu, affinity); 241 + if (cpu >= nr_cpu_ids) 242 + cpu = cpumask_first(affinity); 243 + node = cpu_to_node(cpu); 244 + 245 + /* 246 + * For single allocations we use the caller provided 247 + * mask otherwise we use the mask of the target cpu 248 + */ 249 + mask = cnt == 1 ? affinity : cpumask_of(cpu); 250 + } 251 + desc = alloc_desc(start + i, node, flags, mask, owner); 241 252 if (!desc) 242 253 goto err; 243 254 mutex_lock(&sparse_irq_lock); ··· 304 277 nr_irqs = initcnt; 305 278 306 279 for (i = 0; i < initcnt; i++) { 307 - desc = alloc_desc(i, node, NULL); 280 + desc = alloc_desc(i, node, 0, NULL, NULL); 308 281 set_bit(i, allocated_irqs); 309 282 irq_insert_desc(i, desc); 310 283 } ··· 338 311 alloc_masks(&desc[i], GFP_KERNEL, node); 339 312 raw_spin_lock_init(&desc[i].lock); 340 313 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 341 - desc_set_defaults(i, &desc[i], node, NULL); 314 + desc_set_defaults(i, &desc[i], node, NULL, NULL); 342 315 } 343 316 return arch_early_irq_init(); 344 317 } ··· 355 328 unsigned long flags; 356 329 357 330 raw_spin_lock_irqsave(&desc->lock, flags); 358 - desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); 331 + desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); 359 332 raw_spin_unlock_irqrestore(&desc->lock, flags); 360 333 } 361 334 362 335 static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, 336 + const struct cpumask *affinity, 363 337 struct module *owner) 364 338 { 365 339 u32 i; ··· 481 453 * @cnt: Number of consecutive irqs to allocate. 482 454 * @node: Preferred node on which the irq descriptor should be allocated 483 455 * @owner: Owning module (can be NULL) 456 + * @affinity: Optional pointer to an affinity mask which hints where the 457 + * irq descriptors should be allocated and which default 458 + * affinities to use 484 459 * 485 460 * Returns the first irq number or error code 486 461 */ 487 462 int __ref 488 463 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 489 - struct module *owner) 464 + struct module *owner, const struct cpumask *affinity) 490 465 { 491 466 int start, ret; 492 467 ··· 525 494 526 495 bitmap_set(allocated_irqs, start, cnt); 527 496 mutex_unlock(&sparse_irq_lock); 528 - return alloc_descs(start, cnt, node, owner); 497 + return alloc_descs(start, cnt, node, affinity, owner); 529 498 530 499 err: 531 500 mutex_unlock(&sparse_irq_lock); ··· 543 512 */ 544 513 unsigned int irq_alloc_hwirqs(int cnt, int node) 545 514 { 546 - int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); 515 + int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL); 547 516 548 517 if (irq < 0) 549 518 return 0;
+73 -21
kernel/irq/irqdomain.c
··· 481 481 } 482 482 483 483 /* Allocate a virtual interrupt number */ 484 - virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node)); 484 + virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL); 485 485 if (virq <= 0) { 486 486 pr_debug("-> virq allocation failed\n"); 487 487 return 0; ··· 567 567 unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) 568 568 { 569 569 struct irq_domain *domain; 570 + struct irq_data *irq_data; 570 571 irq_hw_number_t hwirq; 571 572 unsigned int type = IRQ_TYPE_NONE; 572 573 int virq; ··· 589 588 if (irq_domain_translate(domain, fwspec, &hwirq, &type)) 590 589 return 0; 591 590 592 - if (irq_domain_is_hierarchy(domain)) { 591 + /* 592 + * WARN if the irqchip returns a type with bits 593 + * outside the sense mask set and clear these bits. 594 + */ 595 + if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK)) 596 + type &= IRQ_TYPE_SENSE_MASK; 597 + 598 + /* 599 + * If we've already configured this interrupt, 600 + * don't do it again, or hell will break loose. 601 + */ 602 + virq = irq_find_mapping(domain, hwirq); 603 + if (virq) { 593 604 /* 594 - * If we've already configured this interrupt, 595 - * don't do it again, or hell will break loose. 605 + * If the trigger type is not specified or matches the 606 + * current trigger type then we are done so return the 607 + * interrupt number. 596 608 */ 597 - virq = irq_find_mapping(domain, hwirq); 598 - if (virq) 609 + if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq)) 599 610 return virq; 600 611 612 + /* 613 + * If the trigger type has not been set yet, then set 614 + * it now and return the interrupt number. 615 + */ 616 + if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) { 617 + irq_data = irq_get_irq_data(virq); 618 + if (!irq_data) 619 + return 0; 620 + 621 + irqd_set_trigger_type(irq_data, type); 622 + return virq; 623 + } 624 + 625 + pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n", 626 + hwirq, of_node_full_name(to_of_node(fwspec->fwnode))); 627 + return 0; 628 + } 629 + 630 + if (irq_domain_is_hierarchy(domain)) { 601 631 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); 602 632 if (virq <= 0) 603 633 return 0; ··· 639 607 return virq; 640 608 } 641 609 642 - /* Set type if specified and different than the current one */ 643 - if (type != IRQ_TYPE_NONE && 644 - type != irq_get_trigger_type(virq)) 645 - irq_set_irq_type(virq, type); 610 + irq_data = irq_get_irq_data(virq); 611 + if (!irq_data) { 612 + if (irq_domain_is_hierarchy(domain)) 613 + irq_domain_free_irqs(virq, 1); 614 + else 615 + irq_dispose_mapping(virq); 616 + return 0; 617 + } 618 + 619 + /* Store trigger type */ 620 + irqd_set_trigger_type(irq_data, type); 621 + 646 622 return virq; 647 623 } 648 624 EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping); ··· 680 640 if (WARN_ON(domain == NULL)) 681 641 return; 682 642 683 - irq_domain_disassociate(domain, virq); 684 - irq_free_desc(virq); 643 + if (irq_domain_is_hierarchy(domain)) { 644 + irq_domain_free_irqs(virq, 1); 645 + } else { 646 + irq_domain_disassociate(domain, virq); 647 + irq_free_desc(virq); 648 + } 685 649 } 686 650 EXPORT_SYMBOL_GPL(irq_dispose_mapping); 687 651 ··· 879 835 EXPORT_SYMBOL_GPL(irq_domain_simple_ops); 880 836 881 837 int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq, 882 - int node) 838 + int node, const struct cpumask *affinity) 883 839 { 884 840 unsigned int hint; 885 841 886 842 if (virq >= 0) { 887 - virq = irq_alloc_descs(virq, virq, cnt, node); 843 + virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE, 844 + affinity); 888 845 } else { 889 846 hint = hwirq % nr_irqs; 890 847 if (hint == 0) 891 848 hint++; 892 - virq = irq_alloc_descs_from(hint, cnt, node); 893 - if (virq <= 0 && hint > 1) 894 - virq = irq_alloc_descs_from(1, cnt, node); 849 + virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE, 850 + affinity); 851 + if (virq <= 0 && hint > 1) { 852 + virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE, 853 + affinity); 854 + } 895 855 } 896 856 897 857 return virq; ··· 1192 1144 if (recursive) 1193 1145 ret = irq_domain_alloc_irqs_recursive(parent, irq_base, 1194 1146 nr_irqs, arg); 1195 - if (ret >= 0) 1196 - ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); 1147 + if (ret < 0) 1148 + return ret; 1149 + 1150 + ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); 1197 1151 if (ret < 0 && recursive) 1198 1152 irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); 1199 1153 ··· 1210 1160 * @node: NUMA node id for memory allocation 1211 1161 * @arg: domain specific argument 1212 1162 * @realloc: IRQ descriptors have already been allocated if true 1163 + * @affinity: Optional irq affinity mask for multiqueue devices 1213 1164 * 1214 1165 * Allocate IRQ numbers and initialized all data structures to support 1215 1166 * hierarchy IRQ domains. ··· 1226 1175 */ 1227 1176 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, 1228 1177 unsigned int nr_irqs, int node, void *arg, 1229 - bool realloc) 1178 + bool realloc, const struct cpumask *affinity) 1230 1179 { 1231 1180 int i, ret, virq; 1232 1181 ··· 1244 1193 if (realloc && irq_base >= 0) { 1245 1194 virq = irq_base; 1246 1195 } else { 1247 - virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); 1196 + virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node, 1197 + affinity); 1248 1198 if (virq < 0) { 1249 1199 pr_debug("cannot allocate IRQ(base %d, count %d)\n", 1250 1200 irq_base, nr_irqs);
+66 -7
kernel/irq/manage.c
··· 115 115 #ifdef CONFIG_SMP 116 116 cpumask_var_t irq_default_affinity; 117 117 118 - static int __irq_can_set_affinity(struct irq_desc *desc) 118 + static bool __irq_can_set_affinity(struct irq_desc *desc) 119 119 { 120 120 if (!desc || !irqd_can_balance(&desc->irq_data) || 121 121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 122 - return 0; 123 - return 1; 122 + return false; 123 + return true; 124 124 } 125 125 126 126 /** ··· 131 131 int irq_can_set_affinity(unsigned int irq) 132 132 { 133 133 return __irq_can_set_affinity(irq_to_desc(irq)); 134 + } 135 + 136 + /** 137 + * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space 138 + * @irq: Interrupt to check 139 + * 140 + * Like irq_can_set_affinity() above, but additionally checks for the 141 + * AFFINITY_MANAGED flag. 142 + */ 143 + bool irq_can_set_affinity_usr(unsigned int irq) 144 + { 145 + struct irq_desc *desc = irq_to_desc(irq); 146 + 147 + return __irq_can_set_affinity(desc) && 148 + !irqd_affinity_is_managed(&desc->irq_data); 134 149 } 135 150 136 151 /** ··· 353 338 return 0; 354 339 355 340 /* 356 - * Preserve an userspace affinity setup, but make sure that 357 - * one of the targets is online. 341 + * Preserve the managed affinity setting and an userspace affinity 342 + * setup, but make sure that one of the targets is online. 358 343 */ 359 - if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 344 + if (irqd_affinity_is_managed(&desc->irq_data) || 345 + irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 360 346 if (cpumask_intersects(desc->irq_common_data.affinity, 361 347 cpu_online_mask)) 362 348 set = desc->irq_common_data.affinity; ··· 1133 1117 new->irq = irq; 1134 1118 1135 1119 /* 1120 + * If the trigger type is not specified by the caller, 1121 + * then use the default for this interrupt. 1122 + */ 1123 + if (!(new->flags & IRQF_TRIGGER_MASK)) 1124 + new->flags |= irqd_get_trigger_type(&desc->irq_data); 1125 + 1126 + /* 1136 1127 * Check whether the interrupt nests into another interrupt 1137 1128 * thread. 1138 1129 */ ··· 1432 1409 1433 1410 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1434 1411 return -EINVAL; 1412 + 1413 + retval = irq_chip_pm_get(&desc->irq_data); 1414 + if (retval < 0) 1415 + return retval; 1416 + 1435 1417 chip_bus_lock(desc); 1436 1418 retval = __setup_irq(irq, desc, act); 1437 1419 chip_bus_sync_unlock(desc); 1420 + 1421 + if (retval) 1422 + irq_chip_pm_put(&desc->irq_data); 1438 1423 1439 1424 return retval; 1440 1425 } ··· 1537 1506 } 1538 1507 } 1539 1508 1509 + irq_chip_pm_put(&desc->irq_data); 1540 1510 module_put(desc->owner); 1541 1511 kfree(action->secondary); 1542 1512 return action; ··· 1680 1648 action->name = devname; 1681 1649 action->dev_id = dev_id; 1682 1650 1651 + retval = irq_chip_pm_get(&desc->irq_data); 1652 + if (retval < 0) 1653 + return retval; 1654 + 1683 1655 chip_bus_lock(desc); 1684 1656 retval = __setup_irq(irq, desc, action); 1685 1657 chip_bus_sync_unlock(desc); 1686 1658 1687 1659 if (retval) { 1660 + irq_chip_pm_put(&desc->irq_data); 1688 1661 kfree(action->secondary); 1689 1662 kfree(action); 1690 1663 } ··· 1767 1730 if (!desc) 1768 1731 return; 1769 1732 1733 + /* 1734 + * If the trigger type is not specified by the caller, then 1735 + * use the default for this interrupt. 1736 + */ 1770 1737 type &= IRQ_TYPE_SENSE_MASK; 1738 + if (type == IRQ_TYPE_NONE) 1739 + type = irqd_get_trigger_type(&desc->irq_data); 1740 + 1771 1741 if (type != IRQ_TYPE_NONE) { 1772 1742 int ret; 1773 1743 ··· 1866 1822 1867 1823 unregister_handler_proc(irq, action); 1868 1824 1825 + irq_chip_pm_put(&desc->irq_data); 1869 1826 module_put(desc->owner); 1870 1827 return action; 1871 1828 ··· 1929 1884 1930 1885 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 1931 1886 return -EINVAL; 1887 + 1888 + retval = irq_chip_pm_get(&desc->irq_data); 1889 + if (retval < 0) 1890 + return retval; 1891 + 1932 1892 chip_bus_lock(desc); 1933 1893 retval = __setup_irq(irq, desc, act); 1934 1894 chip_bus_sync_unlock(desc); 1895 + 1896 + if (retval) 1897 + irq_chip_pm_put(&desc->irq_data); 1935 1898 1936 1899 return retval; 1937 1900 } ··· 1984 1931 action->name = devname; 1985 1932 action->percpu_dev_id = dev_id; 1986 1933 1934 + retval = irq_chip_pm_get(&desc->irq_data); 1935 + if (retval < 0) 1936 + return retval; 1937 + 1987 1938 chip_bus_lock(desc); 1988 1939 retval = __setup_irq(irq, desc, action); 1989 1940 chip_bus_sync_unlock(desc); 1990 1941 1991 - if (retval) 1942 + if (retval) { 1943 + irq_chip_pm_put(&desc->irq_data); 1992 1944 kfree(action); 1945 + } 1993 1946 1994 1947 return retval; 1995 1948 }
+5 -7
kernel/irq/msi.c
··· 324 324 struct msi_domain_ops *ops = info->ops; 325 325 msi_alloc_info_t arg; 326 326 struct msi_desc *desc; 327 - int i, ret, virq = -1; 327 + int i, ret, virq; 328 328 329 329 ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg); 330 330 if (ret) ··· 332 332 333 333 for_each_msi_entry(desc, dev) { 334 334 ops->set_desc(&arg, desc); 335 - if (info->flags & MSI_FLAG_IDENTITY_MAP) 336 - virq = (int)ops->get_hwirq(info, &arg); 337 - else 338 - virq = -1; 339 335 340 - virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, 341 - dev_to_node(dev), &arg, false); 336 + virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used, 337 + dev_to_node(dev), &arg, false, 338 + desc->affinity); 342 339 if (virq < 0) { 343 340 ret = -ENOSPC; 344 341 if (ops->handle_error) ··· 353 356 ops->msi_finish(&arg, 0); 354 357 355 358 for_each_msi_entry(desc, dev) { 359 + virq = desc->irq; 356 360 if (desc->nvec_used == 1) 357 361 dev_dbg(dev, "irq %d for MSI\n", virq); 358 362 else
+2 -9
kernel/irq/proc.c
··· 96 96 cpumask_var_t new_value; 97 97 int err; 98 98 99 - if (!irq_can_set_affinity(irq) || no_irq_affinity) 99 + if (!irq_can_set_affinity_usr(irq) || no_irq_affinity) 100 100 return -EIO; 101 101 102 102 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) ··· 311 311 !name_unique(irq, action)) 312 312 return; 313 313 314 - memset(name, 0, MAX_NAMELEN); 315 314 snprintf(name, MAX_NAMELEN, "%s", action->name); 316 315 317 316 /* create /proc/irq/1234/handler/ */ ··· 339 340 if (desc->dir) 340 341 goto out_unlock; 341 342 342 - memset(name, 0, MAX_NAMELEN); 343 343 sprintf(name, "%d", irq); 344 344 345 345 /* create /proc/irq/1234 */ ··· 384 386 #endif 385 387 remove_proc_entry("spurious", desc->dir); 386 388 387 - memset(name, 0, MAX_NAMELEN); 388 389 sprintf(name, "%u", irq); 389 390 remove_proc_entry(name, root_irq_dir); 390 391 } ··· 418 421 /* 419 422 * Create entries for all existing IRQs. 420 423 */ 421 - for_each_irq_desc(irq, desc) { 422 - if (!desc) 423 - continue; 424 - 424 + for_each_irq_desc(irq, desc) 425 425 register_irq_proc(irq, desc); 426 - } 427 426 } 428 427 429 428 #ifdef CONFIG_GENERIC_IRQ_SHOW