Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core irq updates from Thomas Gleixner:
"Updates from the irq departement:

- Update the interrupt spreading code so it handles numa node with
different CPU counts properly.

- A large overhaul of the ARM GiCv3 driver to support new PPI and SPI
ranges.

- Conversion of all alloc_fwnode() users to use physical addresses
instead of virtual addresses so the virtual addresses are not
leaked. The physical address is sufficient to identify the
associated interrupt chip.

- Add support for Marvel MMP3, Amlogic Meson SM1 interrupt chips.

- Enforce interrupt threading at compile time if RT is enabled.

- Small updates and improvements all over the place"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
irqchip/gic-v3-its: Fix LPI release for Multi-MSI devices
irqchip/uniphier-aidet: Use devm_platform_ioremap_resource()
irqdomain: Add the missing assignment of domain->fwnode for named fwnode
irqchip/mmp: Coexist with GIC root IRQ controller
irqchip/mmp: Mask off interrupts from other cores
irqchip/mmp: Add missing chained_irq_{enter,exit}()
irqchip/mmp: Do not use of_address_to_resource() to get mux regs
irqchip/meson-gpio: Add support for meson sm1 SoCs
dt-bindings: interrupt-controller: New binding for the meson sm1 SoCs
genirq/affinity: Remove const qualifier from node_to_cpumask argument
genirq/affinity: Spread vectors on node according to nr_cpu ratio
genirq/affinity: Improve __irq_build_affinity_masks()
irqchip: Remove dev_err() usage after platform_get_irq()
irqchip: Add include guard to irq-partition-percpu.h
irqchip/mmp: Do not call irq_set_default_host() on DT platforms
irqchip/gic-v3-its: Remove the redundant set_bit for lpi_map
irqchip/gic-v3: Add quirks for HIP06/07 invalid GICD_TYPER erratum 161010803
irqchip/gic: Skip DT quirks when evaluating IIDR-based quirks
irqchip/gic-v3: Warn about inconsistent implementations of extended ranges
irqchip/gic-v3: Add EPPI range support
...

+734 -195
+2
Documentation/arm64/silicon-errata.rst
··· 115 115 +----------------+-----------------+-----------------+-----------------------------+ 116 116 | Hisilicon | Hip0{6,7} | #161010701 | N/A | 117 117 +----------------+-----------------+-----------------+-----------------------------+ 118 + | Hisilicon | Hip0{6,7} | #161010803 | N/A | 119 + +----------------+-----------------+-----------------+-----------------------------+ 118 120 | Hisilicon | Hip07 | #161600802 | HISILICON_ERRATUM_161600802 | 119 121 +----------------+-----------------+-----------------+-----------------------------+ 120 122 | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
+1
Documentation/devicetree/bindings/interrupt-controller/amlogic,meson-gpio-intc.txt
··· 16 16 "amlogic,meson-gxl-gpio-intc" for GXL SoCs (S905X, S912) 17 17 "amlogic,meson-axg-gpio-intc" for AXG SoCs (A113D, A113X) 18 18 "amlogic,meson-g12a-gpio-intc" for G12A SoCs (S905D2, S905X2, S905Y2) 19 + "amlogic,meson-sm1-gpio-intc" for SM1 SoCs (S905D3, S905X3, S905Y3) 19 20 - reg : Specifies base physical address and size of the registers. 20 21 - interrupt-controller : Identifies the node as an interrupt controller. 21 22 - #interrupt-cells : Specifies the number of cells needed to encode an
+4 -2
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
··· 44 44 be at least 4. 45 45 46 46 The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI 47 - interrupts. Other values are reserved for future use. 47 + interrupts, 2 for interrupts in the Extended SPI range, 3 for the 48 + Extended PPI range. Other values are reserved for future use. 48 49 49 50 The 2nd cell contains the interrupt number for the interrupt type. 50 51 SPI interrupts are in the range [0-987]. PPI interrupts are in the 51 - range [0-15]. 52 + range [0-15]. Extented SPI interrupts are in the range [0-1023]. 53 + Extended PPI interrupts are in the range [0-127]. 52 54 53 55 The 3rd cell is the flags, encoded as follows: 54 56 bits[3:0] trigger type and level flags.
+3
arch/arm/mach-mmp/regs-icu.h
··· 11 11 #define ICU_VIRT_BASE (AXI_VIRT_BASE + 0x82000) 12 12 #define ICU_REG(x) (ICU_VIRT_BASE + (x)) 13 13 14 + #define ICU2_VIRT_BASE (AXI_VIRT_BASE + 0x84000) 15 + #define ICU2_REG(x) (ICU2_VIRT_BASE + (x)) 16 + 14 17 #define ICU_INT_CONF(n) ICU_REG((n) << 2) 15 18 #define ICU_INT_CONF_MASK (0xf) 16 19
+19 -16
drivers/irqchip/irq-gic-common.c
··· 41 41 void *data) 42 42 { 43 43 for (; quirks->desc; quirks++) { 44 + if (quirks->compatible) 45 + continue; 44 46 if (quirks->iidr != (quirks->mask & iidr)) 45 47 continue; 46 48 if (quirks->init(data)) ··· 65 63 * for "irq", depending on "type". 66 64 */ 67 65 raw_spin_lock_irqsave(&irq_controller_lock, flags); 68 - val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); 66 + val = oldval = readl_relaxed(base + confoff); 69 67 if (type & IRQ_TYPE_LEVEL_MASK) 70 68 val &= ~confmask; 71 69 else if (type & IRQ_TYPE_EDGE_BOTH) ··· 85 83 * does not allow us to set the configuration or we are in a 86 84 * non-secure mode, and hence it may not be catastrophic. 87 85 */ 88 - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); 89 - if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) { 90 - if (WARN_ON(irq >= 32)) 91 - ret = -EINVAL; 92 - else 93 - pr_warn("GIC: PPI%d is secure or misconfigured\n", 94 - irq - 16); 95 - } 86 + writel_relaxed(val, base + confoff); 87 + if (readl_relaxed(base + confoff) != val) 88 + ret = -EINVAL; 89 + 96 90 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 97 91 98 92 if (sync_access) ··· 130 132 sync_access(); 131 133 } 132 134 133 - void gic_cpu_config(void __iomem *base, void (*sync_access)(void)) 135 + void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)) 134 136 { 135 137 int i; 136 138 137 139 /* 138 140 * Deal with the banked PPI and SGI interrupts - disable all 139 - * PPI interrupts, ensure all SGI interrupts are enabled. 140 - * Make sure everything is deactivated. 141 + * private interrupts. Make sure everything is deactivated. 141 142 */ 142 - writel_relaxed(GICD_INT_EN_CLR_X32, base + GIC_DIST_ACTIVE_CLEAR); 143 - writel_relaxed(GICD_INT_EN_CLR_PPI, base + GIC_DIST_ENABLE_CLEAR); 144 - writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 143 + for (i = 0; i < nr; i += 32) { 144 + writel_relaxed(GICD_INT_EN_CLR_X32, 145 + base + GIC_DIST_ACTIVE_CLEAR + i / 8); 146 + writel_relaxed(GICD_INT_EN_CLR_X32, 147 + base + GIC_DIST_ENABLE_CLEAR + i / 8); 148 + } 145 149 146 150 /* 147 151 * Set priority on PPI and SGI interrupts 148 152 */ 149 - for (i = 0; i < 32; i += 4) 153 + for (i = 0; i < nr; i += 4) 150 154 writel_relaxed(GICD_INT_DEF_PRI_X4, 151 155 base + GIC_DIST_PRI + i * 4 / 4); 156 + 157 + /* Ensure all SGI interrupts are now enabled */ 158 + writel_relaxed(GICD_INT_EN_SET_SGI, base + GIC_DIST_ENABLE_SET); 152 159 153 160 if (sync_access) 154 161 sync_access();
+1 -1
drivers/irqchip/irq-gic-common.h
··· 22 22 void __iomem *base, void (*sync_access)(void)); 23 23 void gic_dist_config(void __iomem *base, int gic_irqs, 24 24 void (*sync_access)(void)); 25 - void gic_cpu_config(void __iomem *base, void (*sync_access)(void)); 25 + void gic_cpu_config(void __iomem *base, int nr, void (*sync_access)(void)); 26 26 void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, 27 27 void *data); 28 28 void gic_enable_of_quirks(const struct device_node *np,
+1 -1
drivers/irqchip/irq-gic-v2m.c
··· 525 525 spi_start, nr_spis); 526 526 } 527 527 528 - fwnode = irq_domain_alloc_fwnode((void *)m->base_address); 528 + fwnode = irq_domain_alloc_fwnode(&res.start); 529 529 if (!fwnode) { 530 530 pr_err("Unable to allocate GICv2m domain token\n"); 531 531 return -EINVAL;
+6 -7
drivers/irqchip/irq-gic-v3-its.c
··· 2464 2464 { 2465 2465 int idx; 2466 2466 2467 + /* Find a free LPI region in lpi_map and allocate them. */ 2467 2468 idx = bitmap_find_free_region(dev->event_map.lpi_map, 2468 2469 dev->event_map.nr_lpis, 2469 2470 get_count_order(nvecs)); ··· 2472 2471 return -ENOSPC; 2473 2472 2474 2473 *hwirq = dev->event_map.lpi_base + idx; 2475 - set_bit(idx, dev->event_map.lpi_map); 2476 2474 2477 2475 return 0; 2478 2476 } ··· 2641 2641 struct its_node *its = its_dev->its; 2642 2642 int i; 2643 2643 2644 + bitmap_release_region(its_dev->event_map.lpi_map, 2645 + its_get_event_id(irq_domain_get_irq_data(domain, virq)), 2646 + get_count_order(nr_irqs)); 2647 + 2644 2648 for (i = 0; i < nr_irqs; i++) { 2645 2649 struct irq_data *data = irq_domain_get_irq_data(domain, 2646 2650 virq + i); 2647 - u32 event = its_get_event_id(data); 2648 - 2649 - /* Mark interrupt index as unused */ 2650 - clear_bit(event, its_dev->event_map.lpi_map); 2651 - 2652 2651 /* Nuke the entry in the domain */ 2653 2652 irq_domain_reset_irq_data(data); 2654 2653 } ··· 3920 3921 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; 3921 3922 res.flags = IORESOURCE_MEM; 3922 3923 3923 - dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address); 3924 + dom_handle = irq_domain_alloc_fwnode(&res.start); 3924 3925 if (!dom_handle) { 3925 3926 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n", 3926 3927 &res.start);
+303 -81
drivers/irqchip/irq-gic-v3.c
··· 51 51 u32 nr_redist_regions; 52 52 u64 flags; 53 53 bool has_rss; 54 - unsigned int irq_nr; 55 - struct partition_desc *ppi_descs[16]; 54 + unsigned int ppi_nr; 55 + struct partition_desc **ppi_descs; 56 56 }; 57 57 58 58 static struct gic_chip_data gic_data __read_mostly; 59 59 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 60 + 61 + #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) 62 + #define GIC_LINE_NR max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) 63 + #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) 60 64 61 65 /* 62 66 * The behaviours of RPR and PMR registers differ depending on the value of ··· 88 84 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); 89 85 90 86 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ 91 - static refcount_t ppi_nmi_refs[16]; 87 + static refcount_t *ppi_nmi_refs; 92 88 93 89 static struct gic_kvm_info gic_v3_kvm_info; 94 90 static DEFINE_PER_CPU(bool, has_rss); ··· 101 97 /* Our default, arbitrary priority value. Linux only uses one anyway. */ 102 98 #define DEFAULT_PMR_VALUE 0xf0 103 99 100 + enum gic_intid_range { 101 + PPI_RANGE, 102 + SPI_RANGE, 103 + EPPI_RANGE, 104 + ESPI_RANGE, 105 + LPI_RANGE, 106 + __INVALID_RANGE__ 107 + }; 108 + 109 + static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) 110 + { 111 + switch (hwirq) { 112 + case 16 ... 31: 113 + return PPI_RANGE; 114 + case 32 ... 1019: 115 + return SPI_RANGE; 116 + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): 117 + return EPPI_RANGE; 118 + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): 119 + return ESPI_RANGE; 120 + case 8192 ... GENMASK(23, 0): 121 + return LPI_RANGE; 122 + default: 123 + return __INVALID_RANGE__; 124 + } 125 + } 126 + 127 + static enum gic_intid_range get_intid_range(struct irq_data *d) 128 + { 129 + return __get_intid_range(d->hwirq); 130 + } 131 + 104 132 static inline unsigned int gic_irq(struct irq_data *d) 105 133 { 106 134 return d->hwirq; ··· 140 104 141 105 static inline int gic_irq_in_rdist(struct irq_data *d) 142 106 { 143 - return gic_irq(d) < 32; 107 + enum gic_intid_range range = get_intid_range(d); 108 + return range == PPI_RANGE || range == EPPI_RANGE; 144 109 } 145 110 146 111 static inline void __iomem *gic_dist_base(struct irq_data *d) 147 112 { 148 - if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ 113 + switch (get_intid_range(d)) { 114 + case PPI_RANGE: 115 + case EPPI_RANGE: 116 + /* SGI+PPI -> SGI_base for this CPU */ 149 117 return gic_data_rdist_sgi_base(); 150 118 151 - if (d->hwirq <= 1023) /* SPI -> dist_base */ 119 + case SPI_RANGE: 120 + case ESPI_RANGE: 121 + /* SPI -> dist_base */ 152 122 return gic_data.dist_base; 153 123 154 - return NULL; 124 + default: 125 + return NULL; 126 + } 155 127 } 156 128 157 129 static void gic_do_wait_for_rwp(void __iomem *base) ··· 240 196 /* 241 197 * Routines to disable, enable, EOI and route interrupts 242 198 */ 199 + static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) 200 + { 201 + switch (get_intid_range(d)) { 202 + case PPI_RANGE: 203 + case SPI_RANGE: 204 + *index = d->hwirq; 205 + return offset; 206 + case EPPI_RANGE: 207 + /* 208 + * Contrary to the ESPI range, the EPPI range is contiguous 209 + * to the PPI range in the registers, so let's adjust the 210 + * displacement accordingly. Consistency is overrated. 211 + */ 212 + *index = d->hwirq - EPPI_BASE_INTID + 32; 213 + return offset; 214 + case ESPI_RANGE: 215 + *index = d->hwirq - ESPI_BASE_INTID; 216 + switch (offset) { 217 + case GICD_ISENABLER: 218 + return GICD_ISENABLERnE; 219 + case GICD_ICENABLER: 220 + return GICD_ICENABLERnE; 221 + case GICD_ISPENDR: 222 + return GICD_ISPENDRnE; 223 + case GICD_ICPENDR: 224 + return GICD_ICPENDRnE; 225 + case GICD_ISACTIVER: 226 + return GICD_ISACTIVERnE; 227 + case GICD_ICACTIVER: 228 + return GICD_ICACTIVERnE; 229 + case GICD_IPRIORITYR: 230 + return GICD_IPRIORITYRnE; 231 + case GICD_ICFGR: 232 + return GICD_ICFGRnE; 233 + case GICD_IROUTER: 234 + return GICD_IROUTERnE; 235 + default: 236 + break; 237 + } 238 + break; 239 + default: 240 + break; 241 + } 242 + 243 + WARN_ON(1); 244 + *index = d->hwirq; 245 + return offset; 246 + } 247 + 243 248 static int gic_peek_irq(struct irq_data *d, u32 offset) 244 249 { 245 - u32 mask = 1 << (gic_irq(d) % 32); 246 250 void __iomem *base; 251 + u32 index, mask; 252 + 253 + offset = convert_offset_index(d, offset, &index); 254 + mask = 1 << (index % 32); 247 255 248 256 if (gic_irq_in_rdist(d)) 249 257 base = gic_data_rdist_sgi_base(); 250 258 else 251 259 base = gic_data.dist_base; 252 260 253 - return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); 261 + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); 254 262 } 255 263 256 264 static void gic_poke_irq(struct irq_data *d, u32 offset) 257 265 { 258 - u32 mask = 1 << (gic_irq(d) % 32); 259 266 void (*rwp_wait)(void); 260 267 void __iomem *base; 268 + u32 index, mask; 269 + 270 + offset = convert_offset_index(d, offset, &index); 271 + mask = 1 << (index % 32); 261 272 262 273 if (gic_irq_in_rdist(d)) { 263 274 base = gic_data_rdist_sgi_base(); ··· 322 223 rwp_wait = gic_dist_wait_for_rwp; 323 224 } 324 225 325 - writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); 226 + writel_relaxed(mask, base + offset + (index / 32) * 4); 326 227 rwp_wait(); 327 228 } 328 229 ··· 362 263 { 363 264 u32 reg; 364 265 365 - if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ 266 + if (d->hwirq >= 8192) /* PPI/SPI only */ 366 267 return -EINVAL; 367 268 368 269 switch (which) { ··· 389 290 static int gic_irq_get_irqchip_state(struct irq_data *d, 390 291 enum irqchip_irq_state which, bool *val) 391 292 { 392 - if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ 293 + if (d->hwirq >= 8192) /* PPI/SPI only */ 393 294 return -EINVAL; 394 295 395 296 switch (which) { ··· 415 316 static void gic_irq_set_prio(struct irq_data *d, u8 prio) 416 317 { 417 318 void __iomem *base = gic_dist_base(d); 319 + u32 offset, index; 418 320 419 - writeb_relaxed(prio, base + GICD_IPRIORITYR + gic_irq(d)); 321 + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); 322 + 323 + writeb_relaxed(prio, base + offset + index); 324 + } 325 + 326 + static u32 gic_get_ppi_index(struct irq_data *d) 327 + { 328 + switch (get_intid_range(d)) { 329 + case PPI_RANGE: 330 + return d->hwirq - 16; 331 + case EPPI_RANGE: 332 + return d->hwirq - EPPI_BASE_INTID + 16; 333 + default: 334 + unreachable(); 335 + } 420 336 } 421 337 422 338 static int gic_irq_nmi_setup(struct irq_data *d) ··· 454 340 return -EINVAL; 455 341 456 342 /* desc lock should already be held */ 457 - if (gic_irq(d) < 32) { 343 + if (gic_irq_in_rdist(d)) { 344 + u32 idx = gic_get_ppi_index(d); 345 + 458 346 /* Setting up PPI as NMI, only switch handler for first NMI */ 459 - if (!refcount_inc_not_zero(&ppi_nmi_refs[gic_irq(d) - 16])) { 460 - refcount_set(&ppi_nmi_refs[gic_irq(d) - 16], 1); 347 + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { 348 + refcount_set(&ppi_nmi_refs[idx], 1); 461 349 desc->handle_irq = handle_percpu_devid_fasteoi_nmi; 462 350 } 463 351 } else { ··· 491 375 return; 492 376 493 377 /* desc lock should already be held */ 494 - if (gic_irq(d) < 32) { 378 + if (gic_irq_in_rdist(d)) { 379 + u32 idx = gic_get_ppi_index(d); 380 + 495 381 /* Tearing down NMI, only switch handler for last NMI */ 496 - if (refcount_dec_and_test(&ppi_nmi_refs[gic_irq(d) - 16])) 382 + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) 497 383 desc->handle_irq = handle_percpu_devid_irq; 498 384 } else { 499 385 desc->handle_irq = handle_fasteoi_irq; ··· 522 404 523 405 static int gic_set_type(struct irq_data *d, unsigned int type) 524 406 { 407 + enum gic_intid_range range; 525 408 unsigned int irq = gic_irq(d); 526 409 void (*rwp_wait)(void); 527 410 void __iomem *base; 411 + u32 offset, index; 412 + int ret; 528 413 529 414 /* Interrupt configuration for SGIs can't be changed */ 530 415 if (irq < 16) 531 416 return -EINVAL; 532 417 418 + range = get_intid_range(d); 419 + 533 420 /* SPIs have restrictions on the supported types */ 534 - if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && 535 - type != IRQ_TYPE_EDGE_RISING) 421 + if ((range == SPI_RANGE || range == ESPI_RANGE) && 422 + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) 536 423 return -EINVAL; 537 424 538 425 if (gic_irq_in_rdist(d)) { ··· 548 425 rwp_wait = gic_dist_wait_for_rwp; 549 426 } 550 427 551 - return gic_configure_irq(irq, type, base, rwp_wait); 428 + offset = convert_offset_index(d, GICD_ICFGR, &index); 429 + 430 + ret = gic_configure_irq(index, type, base + offset, rwp_wait); 431 + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { 432 + /* Misconfigured PPIs are usually not fatal */ 433 + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); 434 + ret = 0; 435 + } 436 + 437 + return ret; 552 438 } 553 439 554 440 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) ··· 632 500 gic_arch_enable_irqs(); 633 501 } 634 502 635 - if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { 503 + /* Check for special IDs first */ 504 + if ((irqnr >= 1020 && irqnr <= 1023)) 505 + return; 506 + 507 + /* Treat anything but SGIs in a uniform way */ 508 + if (likely(irqnr > 15)) { 636 509 int err; 637 510 638 511 if (static_branch_likely(&supports_deactivate_key)) ··· 725 588 * do the right thing if the kernel is running in secure mode, 726 589 * but that's not the intended use case anyway. 727 590 */ 728 - for (i = 32; i < gic_data.irq_nr; i += 32) 591 + for (i = 32; i < GIC_LINE_NR; i += 32) 729 592 writel_relaxed(~0, base + GICD_IGROUPR + i / 8); 730 593 731 - gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); 594 + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ 595 + for (i = 0; i < GIC_ESPI_NR; i += 32) { 596 + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); 597 + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); 598 + } 599 + 600 + for (i = 0; i < GIC_ESPI_NR; i += 32) 601 + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); 602 + 603 + for (i = 0; i < GIC_ESPI_NR; i += 16) 604 + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); 605 + 606 + for (i = 0; i < GIC_ESPI_NR; i += 4) 607 + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); 608 + 609 + /* Now do the common stuff, and wait for the distributor to drain */ 610 + gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); 732 611 733 612 /* Enable distributor with ARE, Group1 */ 734 613 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, ··· 755 602 * enabled. 756 603 */ 757 604 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 758 - for (i = 32; i < gic_data.irq_nr; i++) 605 + for (i = 32; i < GIC_LINE_NR; i++) 759 606 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 607 + 608 + for (i = 0; i < GIC_ESPI_NR; i++) 609 + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); 760 610 } 761 611 762 612 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) ··· 845 689 return -ENODEV; 846 690 } 847 691 848 - static int __gic_update_vlpi_properties(struct redist_region *region, 849 - void __iomem *ptr) 692 + static int __gic_update_rdist_properties(struct redist_region *region, 693 + void __iomem *ptr) 850 694 { 851 695 u64 typer = gic_read_typer(ptr + GICR_TYPER); 852 696 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); 853 697 gic_data.rdists.has_direct_lpi &= !!(typer & GICR_TYPER_DirectLPIS); 698 + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); 854 699 855 700 return 1; 856 701 } 857 702 858 - static void gic_update_vlpi_properties(void) 703 + static void gic_update_rdist_properties(void) 859 704 { 860 - gic_iterate_rdists(__gic_update_vlpi_properties); 705 + gic_data.ppi_nr = UINT_MAX; 706 + gic_iterate_rdists(__gic_update_rdist_properties); 707 + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) 708 + gic_data.ppi_nr = 0; 709 + pr_info("%d PPIs implemented\n", gic_data.ppi_nr); 861 710 pr_info("%sVLPI support, %sdirect LPI support\n", 862 711 !gic_data.rdists.has_vlpis ? "no " : "", 863 712 !gic_data.rdists.has_direct_lpi ? "no " : ""); ··· 1006 845 static void gic_cpu_init(void) 1007 846 { 1008 847 void __iomem *rbase; 848 + int i; 1009 849 1010 850 /* Register ourselves with the rest of the world */ 1011 851 if (gic_populate_rdist()) ··· 1014 852 1015 853 gic_enable_redist(true); 1016 854 855 + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && 856 + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), 857 + "Distributor has extended ranges, but CPU%d doesn't\n", 858 + smp_processor_id()); 859 + 1017 860 rbase = gic_data_rdist_sgi_base(); 1018 861 1019 862 /* Configure SGIs/PPIs as non-secure Group-1 */ 1020 - writel_relaxed(~0, rbase + GICR_IGROUPR0); 863 + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) 864 + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); 1021 865 1022 - gic_cpu_config(rbase, gic_redist_wait_for_rwp); 866 + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); 1023 867 1024 868 /* initialise system registers */ 1025 869 gic_cpu_sys_reg_init(); ··· 1129 961 bool force) 1130 962 { 1131 963 unsigned int cpu; 964 + u32 offset, index; 1132 965 void __iomem *reg; 1133 966 int enabled; 1134 967 u64 val; ··· 1150 981 if (enabled) 1151 982 gic_mask_irq(d); 1152 983 1153 - reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); 984 + offset = convert_offset_index(d, GICD_IROUTER, &index); 985 + reg = gic_dist_base(d) + offset + (index * 8); 1154 986 val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); 1155 987 1156 988 gic_write_irouter(val, reg); ··· 1235 1065 IRQCHIP_MASK_ON_SUSPEND, 1236 1066 }; 1237 1067 1238 - #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) 1239 - 1240 1068 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 1241 1069 irq_hw_number_t hw) 1242 1070 { ··· 1243 1075 if (static_branch_likely(&supports_deactivate_key)) 1244 1076 chip = &gic_eoimode1_chip; 1245 1077 1246 - /* SGIs are private to the core kernel */ 1247 - if (hw < 16) 1248 - return -EPERM; 1249 - /* Nothing here */ 1250 - if (hw >= gic_data.irq_nr && hw < 8192) 1251 - return -EPERM; 1252 - /* Off limits */ 1253 - if (hw >= GIC_ID_NR) 1254 - return -EPERM; 1255 - 1256 - /* PPIs */ 1257 - if (hw < 32) { 1078 + switch (__get_intid_range(hw)) { 1079 + case PPI_RANGE: 1080 + case EPPI_RANGE: 1258 1081 irq_set_percpu_devid(irq); 1259 1082 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1260 1083 handle_percpu_devid_irq, NULL, NULL); 1261 1084 irq_set_status_flags(irq, IRQ_NOAUTOEN); 1262 - } 1263 - /* SPIs */ 1264 - if (hw >= 32 && hw < gic_data.irq_nr) { 1085 + break; 1086 + 1087 + case SPI_RANGE: 1088 + case ESPI_RANGE: 1265 1089 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1266 1090 handle_fasteoi_irq, NULL, NULL); 1267 1091 irq_set_probe(irq); 1268 1092 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 1269 - } 1270 - /* LPIs */ 1271 - if (hw >= 8192 && hw < GIC_ID_NR) { 1093 + break; 1094 + 1095 + case LPI_RANGE: 1272 1096 if (!gic_dist_supports_lpis()) 1273 1097 return -EPERM; 1274 1098 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1275 1099 handle_fasteoi_irq, NULL, NULL); 1100 + break; 1101 + 1102 + default: 1103 + return -EPERM; 1276 1104 } 1277 1105 1278 1106 return 0; ··· 1290 1126 *hwirq = fwspec->param[1] + 32; 1291 1127 break; 1292 1128 case 1: /* PPI */ 1293 - case GIC_IRQ_TYPE_PARTITION: 1294 1129 *hwirq = fwspec->param[1] + 16; 1130 + break; 1131 + case 2: /* ESPI */ 1132 + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; 1133 + break; 1134 + case 3: /* EPPI */ 1135 + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; 1295 1136 break; 1296 1137 case GIC_IRQ_TYPE_LPI: /* LPI */ 1297 1138 *hwirq = fwspec->param[1]; 1139 + break; 1140 + case GIC_IRQ_TYPE_PARTITION: 1141 + *hwirq = fwspec->param[1]; 1142 + if (fwspec->param[1] >= 16) 1143 + *hwirq += EPPI_BASE_INTID - 16; 1144 + else 1145 + *hwirq += 16; 1298 1146 break; 1299 1147 default: 1300 1148 return -EINVAL; ··· 1387 1211 * then we need to match the partition domain. 1388 1212 */ 1389 1213 if (fwspec->param_count >= 4 && 1390 - fwspec->param[0] == 1 && fwspec->param[3] != 0) 1214 + fwspec->param[0] == 1 && fwspec->param[3] != 0 && 1215 + gic_data.ppi_descs) 1391 1216 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); 1392 1217 1393 1218 return d == gic_data.domain; ··· 1408 1231 { 1409 1232 struct device_node *np; 1410 1233 int ret; 1234 + 1235 + if (!gic_data.ppi_descs) 1236 + return -ENOMEM; 1411 1237 1412 1238 np = of_find_node_by_phandle(fwspec->param[3]); 1413 1239 if (WARN_ON(!np)) ··· 1441 1261 return true; 1442 1262 } 1443 1263 1264 + static bool gic_enable_quirk_hip06_07(void *data) 1265 + { 1266 + struct gic_chip_data *d = data; 1267 + 1268 + /* 1269 + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite 1270 + * not being an actual ARM implementation). The saving grace is 1271 + * that GIC-600 doesn't have ESPI, so nothing to do in that case. 1272 + * HIP07 doesn't even have a proper IIDR, and still pretends to 1273 + * have ESPI. In both cases, put them right. 1274 + */ 1275 + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { 1276 + /* Zero both ESPI and the RES0 field next to it... */ 1277 + d->rdists.gicd_typer &= ~GENMASK(9, 8); 1278 + return true; 1279 + } 1280 + 1281 + return false; 1282 + } 1283 + 1284 + static const struct gic_quirk gic_quirks[] = { 1285 + { 1286 + .desc = "GICv3: Qualcomm MSM8996 broken firmware", 1287 + .compatible = "qcom,msm8996-gic-v3", 1288 + .init = gic_enable_quirk_msm8996, 1289 + }, 1290 + { 1291 + .desc = "GICv3: HIP06 erratum 161010803", 1292 + .iidr = 0x0204043b, 1293 + .mask = 0xffffffff, 1294 + .init = gic_enable_quirk_hip06_07, 1295 + }, 1296 + { 1297 + .desc = "GICv3: HIP07 erratum 161010803", 1298 + .iidr = 0x00000000, 1299 + .mask = 0xffffffff, 1300 + .init = gic_enable_quirk_hip06_07, 1301 + }, 1302 + { 1303 + } 1304 + }; 1305 + 1444 1306 static void gic_enable_nmi_support(void) 1445 1307 { 1446 1308 int i; 1447 1309 1448 - for (i = 0; i < 16; i++) 1310 + if (!gic_prio_masking_enabled()) 1311 + return; 1312 + 1313 + if (gic_has_group0() && !gic_dist_security_disabled()) { 1314 + pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); 1315 + return; 1316 + } 1317 + 1318 + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); 1319 + if (!ppi_nmi_refs) 1320 + return; 1321 + 1322 + for (i = 0; i < gic_data.ppi_nr; i++) 1449 1323 refcount_set(&ppi_nmi_refs[i], 0); 1450 1324 1451 1325 static_branch_enable(&supports_pseudo_nmis); ··· 1517 1283 struct fwnode_handle *handle) 1518 1284 { 1519 1285 u32 typer; 1520 - int gic_irqs; 1521 1286 int err; 1522 1287 1523 1288 if (!is_hyp_mode_available()) ··· 1533 1300 1534 1301 /* 1535 1302 * Find out how many interrupts are supported. 1536 - * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) 1537 1303 */ 1538 1304 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 1539 1305 gic_data.rdists.gicd_typer = typer; 1540 - gic_irqs = GICD_TYPER_IRQS(typer); 1541 - if (gic_irqs > 1020) 1542 - gic_irqs = 1020; 1543 - gic_data.irq_nr = gic_irqs; 1544 1306 1307 + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), 1308 + gic_quirks, &gic_data); 1309 + 1310 + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); 1311 + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); 1545 1312 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 1546 1313 &gic_data); 1547 1314 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); ··· 1566 1333 1567 1334 set_handle_irq(gic_handle_irq); 1568 1335 1569 - gic_update_vlpi_properties(); 1336 + gic_update_rdist_properties(); 1570 1337 1571 1338 gic_smp_init(); 1572 1339 gic_dist_init(); ··· 1581 1348 gicv2m_init(handle, gic_data.domain); 1582 1349 } 1583 1350 1584 - if (gic_prio_masking_enabled()) { 1585 - if (!gic_has_group0() || gic_dist_security_disabled()) 1586 - gic_enable_nmi_support(); 1587 - else 1588 - pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); 1589 - } 1351 + gic_enable_nmi_support(); 1590 1352 1591 1353 return 0; 1592 1354 ··· 1612 1384 1613 1385 parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); 1614 1386 if (!parts_node) 1387 + return; 1388 + 1389 + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); 1390 + if (!gic_data.ppi_descs) 1615 1391 return; 1616 1392 1617 1393 nr_parts = of_get_child_count(parts_node); ··· 1669 1437 part_idx++; 1670 1438 } 1671 1439 1672 - for (i = 0; i < 16; i++) { 1440 + for (i = 0; i < gic_data.ppi_nr; i++) { 1673 1441 unsigned int irq; 1674 1442 struct partition_desc *desc; 1675 1443 struct irq_fwspec ppi_fwspec = { ··· 1721 1489 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 1722 1490 gic_set_kvm_info(&gic_v3_kvm_info); 1723 1491 } 1724 - 1725 - static const struct gic_quirk gic_quirks[] = { 1726 - { 1727 - .desc = "GICv3: Qualcomm MSM8996 broken firmware", 1728 - .compatible = "qcom,msm8996-gic-v3", 1729 - .init = gic_enable_quirk_msm8996, 1730 - }, 1731 - { 1732 - } 1733 - }; 1734 1492 1735 1493 static int __init gic_of_init(struct device_node *node, struct device_node *parent) 1736 1494 { ··· 2067 1845 if (err) 2068 1846 goto out_redist_unmap; 2069 1847 2070 - domain_handle = irq_domain_alloc_fwnode(acpi_data.dist_base); 1848 + domain_handle = irq_domain_alloc_fwnode(&dist->base_address); 2071 1849 if (!domain_handle) { 2072 1850 err = -ENOMEM; 2073 1851 goto out_redist_unmap;
+11 -3
drivers/irqchip/irq-gic.c
··· 291 291 { 292 292 void __iomem *base = gic_dist_base(d); 293 293 unsigned int gicirq = gic_irq(d); 294 + int ret; 294 295 295 296 /* Interrupt configuration for SGIs can't be changed */ 296 297 if (gicirq < 16) ··· 302 301 type != IRQ_TYPE_EDGE_RISING) 303 302 return -EINVAL; 304 303 305 - return gic_configure_irq(gicirq, type, base, NULL); 304 + ret = gic_configure_irq(gicirq, type, base + GIC_DIST_CONFIG, NULL); 305 + if (ret && gicirq < 32) { 306 + /* Misconfigured PPIs are usually not fatal */ 307 + pr_warn("GIC: PPI%d is secure or misconfigured\n", gicirq - 16); 308 + ret = 0; 309 + } 310 + 311 + return ret; 306 312 } 307 313 308 314 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) ··· 543 535 gic_cpu_map[i] &= ~cpu_mask; 544 536 } 545 537 546 - gic_cpu_config(dist_base, NULL); 538 + gic_cpu_config(dist_base, 32, NULL); 547 539 548 540 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 549 541 gic_cpu_if_up(gic); ··· 1635 1627 /* 1636 1628 * Initialize GIC instance zero (no multi-GIC support). 1637 1629 */ 1638 - domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base); 1630 + domain_handle = irq_domain_alloc_fwnode(&dist->base_address); 1639 1631 if (!domain_handle) { 1640 1632 pr_err("Unable to allocate domain handle\n"); 1641 1633 gic_teardown(gic);
+7 -2
drivers/irqchip/irq-hip04.c
··· 130 130 131 131 raw_spin_lock(&irq_controller_lock); 132 132 133 - ret = gic_configure_irq(irq, type, base, NULL); 133 + ret = gic_configure_irq(irq, type, base + GIC_DIST_CONFIG, NULL); 134 + if (ret && irq < 32) { 135 + /* Misconfigured PPIs are usually not fatal */ 136 + pr_warn("GIC: PPI%d is secure or misconfigured\n", irq - 16); 137 + ret = 0; 138 + } 134 139 135 140 raw_spin_unlock(&irq_controller_lock); 136 141 ··· 273 268 if (i != cpu) 274 269 hip04_cpu_map[i] &= ~cpu_mask; 275 270 276 - gic_cpu_config(dist_base, NULL); 271 + gic_cpu_config(dist_base, 32, NULL); 277 272 278 273 writel_relaxed(0xf0, base + GIC_CPU_PRIMASK); 279 274 writel_relaxed(1, base + GIC_CPU_CTRL);
+2 -6
drivers/irqchip/irq-imgpdc.c
··· 362 362 } 363 363 for (i = 0; i < priv->nr_perips; ++i) { 364 364 irq = platform_get_irq(pdev, 1 + i); 365 - if (irq < 0) { 366 - dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i); 365 + if (irq < 0) 367 366 return irq; 368 - } 369 367 priv->perip_irqs[i] = irq; 370 368 } 371 369 /* check if too many were provided */ ··· 374 376 375 377 /* Get syswake IRQ number */ 376 378 irq = platform_get_irq(pdev, 0); 377 - if (irq < 0) { 378 - dev_err(&pdev->dev, "cannot find syswake IRQ\n"); 379 + if (irq < 0) 379 380 return irq; 380 - } 381 381 priv->syswake_irq = irq; 382 382 383 383 /* Set up an IRQ domain */
+1 -1
drivers/irqchip/irq-ixp4xx.c
··· 319 319 pr_crit("IXP4XX: could not ioremap interrupt controller\n"); 320 320 return; 321 321 } 322 - fwnode = irq_domain_alloc_fwnode(base); 322 + fwnode = irq_domain_alloc_fwnode(&irqbase); 323 323 if (!fwnode) { 324 324 pr_crit("IXP4XX: no domain handle\n"); 325 325 return;
+1 -3
drivers/irqchip/irq-keystone.c
··· 164 164 } 165 165 166 166 kirq->irq = platform_get_irq(pdev, 0); 167 - if (kirq->irq < 0) { 168 - dev_err(dev, "no irq resource %d\n", kirq->irq); 167 + if (kirq->irq < 0) 169 168 return kirq->irq; 170 - } 171 169 172 170 kirq->dev = dev; 173 171 kirq->mask = ~0x0;
+38 -14
drivers/irqchip/irq-meson-gpio.c
··· 24 24 #define REG_PIN_47_SEL 0x08 25 25 #define REG_FILTER_SEL 0x0c 26 26 27 - #define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x))) 27 + /* 28 + * Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by 29 + * bits 24 to 31. Tests on the actual HW show that these bits are 30 + * stuck at 0. Bits 8 to 15 are responsive and have the expected 31 + * effect. 32 + */ 28 33 #define REG_EDGE_POL_EDGE(x) BIT(x) 29 34 #define REG_EDGE_POL_LOW(x) BIT(16 + (x)) 35 + #define REG_BOTH_EDGE(x) BIT(8 + (x)) 36 + #define REG_EDGE_POL_MASK(x) ( \ 37 + REG_EDGE_POL_EDGE(x) | \ 38 + REG_EDGE_POL_LOW(x) | \ 39 + REG_BOTH_EDGE(x)) 30 40 #define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) 31 41 #define REG_FILTER_SEL_SHIFT(x) ((x) * 4) 32 42 33 43 struct meson_gpio_irq_params { 34 44 unsigned int nr_hwirq; 45 + bool support_edge_both; 35 46 }; 36 47 37 48 static const struct meson_gpio_irq_params meson8_params = { ··· 65 54 .nr_hwirq = 100, 66 55 }; 67 56 57 + static const struct meson_gpio_irq_params sm1_params = { 58 + .nr_hwirq = 100, 59 + .support_edge_both = true, 60 + }; 61 + 68 62 static const struct of_device_id meson_irq_gpio_matches[] = { 69 63 { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, 70 64 { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, ··· 77 61 { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, 78 62 { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params }, 79 63 { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params }, 64 + { .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params }, 80 65 { } 81 66 }; 82 67 83 68 struct meson_gpio_irq_controller { 84 - unsigned int nr_hwirq; 69 + const struct meson_gpio_irq_params *params; 85 70 void __iomem *base; 86 71 u32 channel_irqs[NUM_CHANNEL]; 87 72 DECLARE_BITMAP(channel_map, NUM_CHANNEL); ··· 185 168 */ 186 169 type &= IRQ_TYPE_SENSE_MASK; 187 170 188 - if (type == IRQ_TYPE_EDGE_BOTH) 189 - return -EINVAL; 171 + /* 172 + * New controller support EDGE_BOTH trigger. This setting takes 173 + * precedence over the other edge/polarity settings 174 + */ 175 + if (type == IRQ_TYPE_EDGE_BOTH) { 176 + if (!ctl->params->support_edge_both) 177 + return -EINVAL; 190 178 191 - if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 192 - val |= REG_EDGE_POL_EDGE(idx); 179 + val |= REG_BOTH_EDGE(idx); 180 + } else { 181 + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 182 + val |= REG_EDGE_POL_EDGE(idx); 193 183 194 - if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) 195 - val |= REG_EDGE_POL_LOW(idx); 184 + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) 185 + val |= REG_EDGE_POL_LOW(idx); 186 + } 196 187 197 188 spin_lock(&ctl->lock); 198 189 ··· 224 199 */ 225 200 if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 226 201 type |= IRQ_TYPE_LEVEL_HIGH; 227 - else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 202 + else 228 203 type |= IRQ_TYPE_EDGE_RISING; 229 204 230 205 return type; ··· 353 328 struct meson_gpio_irq_controller *ctl) 354 329 { 355 330 const struct of_device_id *match; 356 - const struct meson_gpio_irq_params *params; 357 331 int ret; 358 332 359 333 match = of_match_node(meson_irq_gpio_matches, node); 360 334 if (!match) 361 335 return -ENODEV; 362 336 363 - params = match->data; 364 - ctl->nr_hwirq = params->nr_hwirq; 337 + ctl->params = match->data; 365 338 366 339 ret = of_property_read_variable_u32_array(node, 367 340 "amlogic,channel-interrupts", ··· 408 385 if (ret) 409 386 goto free_channel_irqs; 410 387 411 - domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq, 388 + domain = irq_domain_create_hierarchy(parent_domain, 0, 389 + ctl->params->nr_hwirq, 412 390 of_node_to_fwnode(node), 413 391 &meson_gpio_irq_domain_ops, 414 392 ctl); ··· 420 396 } 421 397 422 398 pr_info("%d to %d gpio interrupt mux initialized\n", 423 - ctl->nr_hwirq, NUM_CHANNEL); 399 + ctl->params->nr_hwirq, NUM_CHANNEL); 424 400 425 401 return 0; 426 402
+74 -12
drivers/irqchip/irq-mmp.c
··· 13 13 #include <linux/init.h> 14 14 #include <linux/irq.h> 15 15 #include <linux/irqchip.h> 16 + #include <linux/irqchip/chained_irq.h> 16 17 #include <linux/irqdomain.h> 17 18 #include <linux/io.h> 18 19 #include <linux/ioport.h> ··· 44 43 unsigned int conf_enable; 45 44 unsigned int conf_disable; 46 45 unsigned int conf_mask; 46 + unsigned int conf2_mask; 47 47 unsigned int clr_mfp_irq_base; 48 48 unsigned int clr_mfp_hwirq; 49 49 struct irq_domain *domain; ··· 54 52 unsigned int conf_enable; 55 53 unsigned int conf_disable; 56 54 unsigned int conf_mask; 55 + unsigned int conf2_mask; 57 56 }; 58 57 59 58 static void __iomem *mmp_icu_base; 59 + static void __iomem *mmp_icu2_base; 60 60 static struct icu_chip_data icu_data[MAX_ICU_NR]; 61 61 static int max_icu_nr; 62 62 ··· 101 97 r &= ~data->conf_mask; 102 98 r |= data->conf_disable; 103 99 writel_relaxed(r, mmp_icu_base + (hwirq << 2)); 100 + 101 + if (data->conf2_mask) { 102 + /* 103 + * ICU1 (above) only controls PJ4 MP1; if using SMP, 104 + * we need to also mask the MP2 and MM cores via ICU2. 105 + */ 106 + r = readl_relaxed(mmp_icu2_base + (hwirq << 2)); 107 + r &= ~data->conf2_mask; 108 + writel_relaxed(r, mmp_icu2_base + (hwirq << 2)); 109 + } 104 110 } else { 105 111 r = readl_relaxed(data->reg_mask) | (1 << hwirq); 106 112 writel_relaxed(r, data->reg_mask); ··· 146 132 static void icu_mux_irq_demux(struct irq_desc *desc) 147 133 { 148 134 unsigned int irq = irq_desc_get_irq(desc); 135 + struct irq_chip *chip = irq_desc_get_chip(desc); 149 136 struct irq_domain *domain; 150 137 struct icu_chip_data *data; 151 138 int i; 152 139 unsigned long mask, status, n; 140 + 141 + chained_irq_enter(chip, desc); 153 142 154 143 for (i = 1; i < max_icu_nr; i++) { 155 144 if (irq == icu_data[i].cascade_irq) { ··· 163 146 } 164 147 if (i >= max_icu_nr) { 165 148 pr_err("Spurious irq %d in MMP INTC\n", irq); 166 - return; 149 + goto out; 167 150 } 168 151 169 152 mask = readl_relaxed(data->reg_mask); ··· 175 158 generic_handle_irq(icu_data[i].virq_base + n); 176 159 } 177 160 } 161 + 162 + out: 163 + chained_irq_exit(chip, desc); 178 164 } 179 165 180 166 static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq, ··· 212 192 .conf_disable = 0x0, 213 193 .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | 214 194 MMP2_ICU_INT_ROUTE_PJ4_FIQ, 195 + }; 196 + 197 + static struct mmp_intc_conf mmp3_conf = { 198 + .conf_enable = 0x20, 199 + .conf_disable = 0x0, 200 + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | 201 + MMP2_ICU_INT_ROUTE_PJ4_FIQ, 202 + .conf2_mask = 0xf0, 215 203 }; 216 204 217 205 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) ··· 423 395 icu_data[0].conf_enable = mmp_conf.conf_enable; 424 396 icu_data[0].conf_disable = mmp_conf.conf_disable; 425 397 icu_data[0].conf_mask = mmp_conf.conf_mask; 426 - irq_set_default_host(icu_data[0].domain); 427 398 set_handle_irq(mmp_handle_irq); 428 399 max_icu_nr = 1; 429 400 return 0; ··· 441 414 icu_data[0].conf_enable = mmp2_conf.conf_enable; 442 415 icu_data[0].conf_disable = mmp2_conf.conf_disable; 443 416 icu_data[0].conf_mask = mmp2_conf.conf_mask; 444 - irq_set_default_host(icu_data[0].domain); 445 417 set_handle_irq(mmp2_handle_irq); 446 418 max_icu_nr = 1; 447 419 return 0; 448 420 } 449 421 IRQCHIP_DECLARE(mmp2_intc, "mrvl,mmp2-intc", mmp2_of_init); 450 422 423 + static int __init mmp3_of_init(struct device_node *node, 424 + struct device_node *parent) 425 + { 426 + int ret; 427 + 428 + mmp_icu2_base = of_iomap(node, 1); 429 + if (!mmp_icu2_base) { 430 + pr_err("Failed to get interrupt controller register #2\n"); 431 + return -ENODEV; 432 + } 433 + 434 + ret = mmp_init_bases(node); 435 + if (ret < 0) { 436 + iounmap(mmp_icu2_base); 437 + return ret; 438 + } 439 + 440 + icu_data[0].conf_enable = mmp3_conf.conf_enable; 441 + icu_data[0].conf_disable = mmp3_conf.conf_disable; 442 + icu_data[0].conf_mask = mmp3_conf.conf_mask; 443 + icu_data[0].conf2_mask = mmp3_conf.conf2_mask; 444 + 445 + if (!parent) { 446 + /* This is the main interrupt controller. */ 447 + set_handle_irq(mmp2_handle_irq); 448 + } 449 + 450 + max_icu_nr = 1; 451 + return 0; 452 + } 453 + IRQCHIP_DECLARE(mmp3_intc, "marvell,mmp3-intc", mmp3_of_init); 454 + 451 455 static int __init mmp2_mux_of_init(struct device_node *node, 452 456 struct device_node *parent) 453 457 { 454 - struct resource res; 455 458 int i, ret, irq, j = 0; 456 459 u32 nr_irqs, mfp_irq; 460 + u32 reg[4]; 457 461 458 462 if (!parent) 459 463 return -ENODEV; ··· 496 438 pr_err("Not found mrvl,intc-nr-irqs property\n"); 497 439 return -EINVAL; 498 440 } 499 - ret = of_address_to_resource(node, 0, &res); 441 + 442 + /* 443 + * For historical reasons, the "regs" property of the 444 + * mrvl,mmp2-mux-intc is not a regular "regs" property containing 445 + * addresses on the parent bus, but offsets from the intc's base. 446 + * That is why we can't use of_address_to_resource() here. 447 + */ 448 + ret = of_property_read_variable_u32_array(node, "reg", reg, 449 + ARRAY_SIZE(reg), 450 + ARRAY_SIZE(reg)); 500 451 if (ret < 0) { 501 452 pr_err("Not found reg property\n"); 502 453 return -EINVAL; 503 454 } 504 - icu_data[i].reg_status = mmp_icu_base + res.start; 505 - ret = of_address_to_resource(node, 1, &res); 506 - if (ret < 0) { 507 - pr_err("Not found reg property\n"); 508 - return -EINVAL; 509 - } 510 - icu_data[i].reg_mask = mmp_icu_base + res.start; 455 + icu_data[i].reg_status = mmp_icu_base + reg[0]; 456 + icu_data[i].reg_mask = mmp_icu_base + reg[2]; 511 457 icu_data[i].cascade_irq = irq_of_parse_and_map(node, 0); 512 458 if (!icu_data[i].cascade_irq) 513 459 return -EINVAL;
+1 -3
drivers/irqchip/irq-uniphier-aidet.c
··· 166 166 struct device_node *parent_np; 167 167 struct irq_domain *parent_domain; 168 168 struct uniphier_aidet_priv *priv; 169 - struct resource *res; 170 169 171 170 parent_np = of_irq_find_parent(dev->of_node); 172 171 if (!parent_np) ··· 180 181 if (!priv) 181 182 return -ENOMEM; 182 183 183 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 184 - priv->reg_base = devm_ioremap_resource(dev, res); 184 + priv->reg_base = devm_platform_ioremap_resource(pdev, 0); 185 185 if (IS_ERR(priv->reg_base)) 186 186 return PTR_ERR(priv->reg_base); 187 187
+1 -3
drivers/irqchip/qcom-irq-combiner.c
··· 248 248 return err; 249 249 250 250 combiner->parent_irq = platform_get_irq(pdev, 0); 251 - if (combiner->parent_irq <= 0) { 252 - dev_err(&pdev->dev, "Error getting IRQ resource\n"); 251 + if (combiner->parent_irq <= 0) 253 252 return -EPROBE_DEFER; 254 - } 255 253 256 254 combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs, 257 255 &domain_ops, combiner);
+9 -1
drivers/pci/controller/pci-hyperv.c
··· 2521 2521 const struct hv_vmbus_device_id *dev_id) 2522 2522 { 2523 2523 struct hv_pcibus_device *hbus; 2524 + char *name; 2524 2525 int ret; 2525 2526 2526 2527 /* ··· 2590 2589 goto free_config; 2591 2590 } 2592 2591 2593 - hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus); 2592 + name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance); 2593 + if (!name) { 2594 + ret = -ENOMEM; 2595 + goto unmap; 2596 + } 2597 + 2598 + hbus->sysdata.fwnode = irq_domain_alloc_named_fwnode(name); 2599 + kfree(name); 2594 2600 if (!hbus->sysdata.fwnode) { 2595 2601 ret = -ENOMEM; 2596 2602 goto unmap;
+4
include/linux/interrupt.h
··· 472 472 bool state); 473 473 474 474 #ifdef CONFIG_IRQ_FORCED_THREADING 475 + # ifdef CONFIG_PREEMPT_RT 476 + # define force_irqthreads (true) 477 + # else 475 478 extern bool force_irqthreads; 479 + # endif 476 480 #else 477 481 #define force_irqthreads (0) 478 482 #endif
+29 -1
include/linux/irqchip/arm-gic-v3.h
··· 30 30 #define GICD_ICFGR 0x0C00 31 31 #define GICD_IGRPMODR 0x0D00 32 32 #define GICD_NSACR 0x0E00 33 + #define GICD_IGROUPRnE 0x1000 34 + #define GICD_ISENABLERnE 0x1200 35 + #define GICD_ICENABLERnE 0x1400 36 + #define GICD_ISPENDRnE 0x1600 37 + #define GICD_ICPENDRnE 0x1800 38 + #define GICD_ISACTIVERnE 0x1A00 39 + #define GICD_ICACTIVERnE 0x1C00 40 + #define GICD_IPRIORITYRnE 0x2000 41 + #define GICD_ICFGRnE 0x3000 33 42 #define GICD_IROUTER 0x6000 43 + #define GICD_IROUTERnE 0x8000 34 44 #define GICD_IDREGS 0xFFD0 35 45 #define GICD_PIDR2 0xFFE8 46 + 47 + #define ESPI_BASE_INTID 4096 36 48 37 49 /* 38 50 * Those registers are actually from GICv2, but the spec demands that they ··· 81 69 #define GICD_TYPER_RSS (1U << 26) 82 70 #define GICD_TYPER_LPIS (1U << 17) 83 71 #define GICD_TYPER_MBIS (1U << 16) 72 + #define GICD_TYPER_ESPI (1U << 8) 84 73 85 74 #define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) 86 75 #define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) 87 - #define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) 76 + #define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) 77 + #define GICD_TYPER_ESPIS(typer) \ 78 + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) 88 79 89 80 #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 90 81 #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) ··· 123 108 #define GICR_CTLR_RWP (1UL << 3) 124 109 125 110 #define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) 111 + 112 + #define EPPI_BASE_INTID 1056 113 + 114 + #define GICR_TYPER_NR_PPIS(r) \ 115 + ({ \ 116 + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ 117 + unsigned int __nr_ppis = 16; \ 118 + if (__ppinum == 1 || __ppinum == 2) \ 119 + __nr_ppis += __ppinum * 32; \ 120 + \ 121 + __nr_ppis; \ 122 + }) 126 123 127 124 #define GICR_WAKER_ProcessorSleep (1U << 1) 128 125 #define GICR_WAKER_ChildrenAsleep (1U << 2) ··· 496 469 #define ICC_CTLR_EL1_A3V_SHIFT 15 497 470 #define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) 498 471 #define ICC_CTLR_EL1_RSS (0x1 << 18) 472 + #define ICC_CTLR_EL1_ExtRange (0x1 << 19) 499 473 #define ICC_PMR_EL1_SHIFT 0 500 474 #define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) 501 475 #define ICC_BPR0_EL1_SHIFT 0
+5
include/linux/irqchip/irq-partition-percpu.h
··· 4 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 5 */ 6 6 7 + #ifndef __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H 8 + #define __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H 9 + 7 10 #include <linux/fwnode.h> 8 11 #include <linux/cpumask.h> 9 12 #include <linux/irqdomain.h> ··· 49 46 return NULL; 50 47 } 51 48 #endif 49 + 50 + #endif /* __LINUX_IRQCHIP_IRQ_PARTITION_PERCPU_H */
+3 -3
include/linux/irqdomain.h
··· 220 220 221 221 #ifdef CONFIG_IRQ_DOMAIN 222 222 struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, 223 - const char *name, void *data); 223 + const char *name, phys_addr_t *pa); 224 224 225 225 enum { 226 226 IRQCHIP_FWNODE_REAL, ··· 241 241 NULL); 242 242 } 243 243 244 - static inline struct fwnode_handle *irq_domain_alloc_fwnode(void *data) 244 + static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa) 245 245 { 246 - return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, data); 246 + return __irq_domain_alloc_fwnode(IRQCHIP_FWNODE_REAL, 0, NULL, pa); 247 247 } 248 248 249 249 void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
+201 -30
kernel/irq/affinity.c
··· 7 7 #include <linux/kernel.h> 8 8 #include <linux/slab.h> 9 9 #include <linux/cpu.h> 10 + #include <linux/sort.h> 10 11 11 12 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, 12 13 unsigned int cpus_per_vec) ··· 95 94 return nodes; 96 95 } 97 96 97 + struct node_vectors { 98 + unsigned id; 99 + 100 + union { 101 + unsigned nvectors; 102 + unsigned ncpus; 103 + }; 104 + }; 105 + 106 + static int ncpus_cmp_func(const void *l, const void *r) 107 + { 108 + const struct node_vectors *ln = l; 109 + const struct node_vectors *rn = r; 110 + 111 + return ln->ncpus - rn->ncpus; 112 + } 113 + 114 + /* 115 + * Allocate vector number for each node, so that for each node: 116 + * 117 + * 1) the allocated number is >= 1 118 + * 119 + * 2) the allocated numbver is <= active CPU number of this node 120 + * 121 + * The actual allocated total vectors may be less than @numvecs when 122 + * active total CPU number is less than @numvecs. 123 + * 124 + * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]' 125 + * for each node. 126 + */ 127 + static void alloc_nodes_vectors(unsigned int numvecs, 128 + cpumask_var_t *node_to_cpumask, 129 + const struct cpumask *cpu_mask, 130 + const nodemask_t nodemsk, 131 + struct cpumask *nmsk, 132 + struct node_vectors *node_vectors) 133 + { 134 + unsigned n, remaining_ncpus = 0; 135 + 136 + for (n = 0; n < nr_node_ids; n++) { 137 + node_vectors[n].id = n; 138 + node_vectors[n].ncpus = UINT_MAX; 139 + } 140 + 141 + for_each_node_mask(n, nodemsk) { 142 + unsigned ncpus; 143 + 144 + cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); 145 + ncpus = cpumask_weight(nmsk); 146 + 147 + if (!ncpus) 148 + continue; 149 + remaining_ncpus += ncpus; 150 + node_vectors[n].ncpus = ncpus; 151 + } 152 + 153 + numvecs = min_t(unsigned, remaining_ncpus, numvecs); 154 + 155 + sort(node_vectors, nr_node_ids, sizeof(node_vectors[0]), 156 + ncpus_cmp_func, NULL); 157 + 158 + /* 159 + * Allocate vectors for each node according to the ratio of this 160 + * node's nr_cpus to remaining un-assigned ncpus. 'numvecs' is 161 + * bigger than number of active numa nodes. Always start the 162 + * allocation from the node with minimized nr_cpus. 163 + * 164 + * This way guarantees that each active node gets allocated at 165 + * least one vector, and the theory is simple: over-allocation 166 + * is only done when this node is assigned by one vector, so 167 + * other nodes will be allocated >= 1 vector, since 'numvecs' is 168 + * bigger than number of numa nodes. 169 + * 170 + * One perfect invariant is that number of allocated vectors for 171 + * each node is <= CPU count of this node: 172 + * 173 + * 1) suppose there are two nodes: A and B 174 + * ncpu(X) is CPU count of node X 175 + * vecs(X) is the vector count allocated to node X via this 176 + * algorithm 177 + * 178 + * ncpu(A) <= ncpu(B) 179 + * ncpu(A) + ncpu(B) = N 180 + * vecs(A) + vecs(B) = V 181 + * 182 + * vecs(A) = max(1, round_down(V * ncpu(A) / N)) 183 + * vecs(B) = V - vecs(A) 184 + * 185 + * both N and V are integer, and 2 <= V <= N, suppose 186 + * V = N - delta, and 0 <= delta <= N - 2 187 + * 188 + * 2) obviously vecs(A) <= ncpu(A) because: 189 + * 190 + * if vecs(A) is 1, then vecs(A) <= ncpu(A) given 191 + * ncpu(A) >= 1 192 + * 193 + * otherwise, 194 + * vecs(A) <= V * ncpu(A) / N <= ncpu(A), given V <= N 195 + * 196 + * 3) prove how vecs(B) <= ncpu(B): 197 + * 198 + * if round_down(V * ncpu(A) / N) == 0, vecs(B) won't be 199 + * over-allocated, so vecs(B) <= ncpu(B), 200 + * 201 + * otherwise: 202 + * 203 + * vecs(A) = 204 + * round_down(V * ncpu(A) / N) = 205 + * round_down((N - delta) * ncpu(A) / N) = 206 + * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >= 207 + * round_down((N * ncpu(A) - delta * N) / N) = 208 + * cpu(A) - delta 209 + * 210 + * then: 211 + * 212 + * vecs(A) - V >= ncpu(A) - delta - V 213 + * => 214 + * V - vecs(A) <= V + delta - ncpu(A) 215 + * => 216 + * vecs(B) <= N - ncpu(A) 217 + * => 218 + * vecs(B) <= cpu(B) 219 + * 220 + * For nodes >= 3, it can be thought as one node and another big 221 + * node given that is exactly what this algorithm is implemented, 222 + * and we always re-calculate 'remaining_ncpus' & 'numvecs', and 223 + * finally for each node X: vecs(X) <= ncpu(X). 224 + * 225 + */ 226 + for (n = 0; n < nr_node_ids; n++) { 227 + unsigned nvectors, ncpus; 228 + 229 + if (node_vectors[n].ncpus == UINT_MAX) 230 + continue; 231 + 232 + WARN_ON_ONCE(numvecs == 0); 233 + 234 + ncpus = node_vectors[n].ncpus; 235 + nvectors = max_t(unsigned, 1, 236 + numvecs * ncpus / remaining_ncpus); 237 + WARN_ON_ONCE(nvectors > ncpus); 238 + 239 + node_vectors[n].nvectors = nvectors; 240 + 241 + remaining_ncpus -= ncpus; 242 + numvecs -= nvectors; 243 + } 244 + } 245 + 98 246 static int __irq_build_affinity_masks(unsigned int startvec, 99 247 unsigned int numvecs, 100 248 unsigned int firstvec, ··· 252 102 struct cpumask *nmsk, 253 103 struct irq_affinity_desc *masks) 254 104 { 255 - unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0; 105 + unsigned int i, n, nodes, cpus_per_vec, extra_vecs, done = 0; 256 106 unsigned int last_affv = firstvec + numvecs; 257 107 unsigned int curvec = startvec; 258 108 nodemask_t nodemsk = NODE_MASK_NONE; 109 + struct node_vectors *node_vectors; 259 110 260 111 if (!cpumask_weight(cpu_mask)) 261 112 return 0; ··· 277 126 return numvecs; 278 127 } 279 128 280 - for_each_node_mask(n, nodemsk) { 281 - unsigned int ncpus, v, vecs_to_assign, vecs_per_node; 129 + node_vectors = kcalloc(nr_node_ids, 130 + sizeof(struct node_vectors), 131 + GFP_KERNEL); 132 + if (!node_vectors) 133 + return -ENOMEM; 282 134 283 - /* Spread the vectors per node */ 284 - vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; 135 + /* allocate vector number for each node */ 136 + alloc_nodes_vectors(numvecs, node_to_cpumask, cpu_mask, 137 + nodemsk, nmsk, node_vectors); 138 + 139 + for (i = 0; i < nr_node_ids; i++) { 140 + unsigned int ncpus, v; 141 + struct node_vectors *nv = &node_vectors[i]; 142 + 143 + if (nv->nvectors == UINT_MAX) 144 + continue; 285 145 286 146 /* Get the cpus on this node which are in the mask */ 287 - cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); 288 - 289 - /* Calculate the number of cpus per vector */ 147 + cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]); 290 148 ncpus = cpumask_weight(nmsk); 291 - vecs_to_assign = min(vecs_per_node, ncpus); 149 + if (!ncpus) 150 + continue; 151 + 152 + WARN_ON_ONCE(nv->nvectors > ncpus); 292 153 293 154 /* Account for rounding errors */ 294 - extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); 155 + extra_vecs = ncpus - nv->nvectors * (ncpus / nv->nvectors); 295 156 296 - for (v = 0; curvec < last_affv && v < vecs_to_assign; 297 - curvec++, v++) { 298 - cpus_per_vec = ncpus / vecs_to_assign; 157 + /* Spread allocated vectors on CPUs of the current node */ 158 + for (v = 0; v < nv->nvectors; v++, curvec++) { 159 + cpus_per_vec = ncpus / nv->nvectors; 299 160 300 161 /* Account for extra vectors to compensate rounding errors */ 301 162 if (extra_vecs) { 302 163 cpus_per_vec++; 303 164 --extra_vecs; 304 165 } 166 + 167 + /* 168 + * wrapping has to be considered given 'startvec' 169 + * may start anywhere 170 + */ 171 + if (curvec >= last_affv) 172 + curvec = firstvec; 305 173 irq_spread_init_one(&masks[curvec].mask, nmsk, 306 174 cpus_per_vec); 307 175 } 308 - 309 - done += v; 310 - if (done >= numvecs) 311 - break; 312 - if (curvec >= last_affv) 313 - curvec = firstvec; 314 - --nodes; 176 + done += nv->nvectors; 315 177 } 178 + kfree(node_vectors); 316 179 return done; 317 180 } 318 181 ··· 339 174 unsigned int firstvec, 340 175 struct irq_affinity_desc *masks) 341 176 { 342 - unsigned int curvec = startvec, nr_present, nr_others; 177 + unsigned int curvec = startvec, nr_present = 0, nr_others = 0; 343 178 cpumask_var_t *node_to_cpumask; 344 179 cpumask_var_t nmsk, npresmsk; 345 180 int ret = -ENOMEM; ··· 354 189 if (!node_to_cpumask) 355 190 goto fail_npresmsk; 356 191 357 - ret = 0; 358 192 /* Stabilize the cpumasks */ 359 193 get_online_cpus(); 360 194 build_node_to_cpumask(node_to_cpumask); 361 195 362 196 /* Spread on present CPUs starting from affd->pre_vectors */ 363 - nr_present = __irq_build_affinity_masks(curvec, numvecs, 364 - firstvec, node_to_cpumask, 365 - cpu_present_mask, nmsk, masks); 197 + ret = __irq_build_affinity_masks(curvec, numvecs, firstvec, 198 + node_to_cpumask, cpu_present_mask, 199 + nmsk, masks); 200 + if (ret < 0) 201 + goto fail_build_affinity; 202 + nr_present = ret; 366 203 367 204 /* 368 205 * Spread on non present CPUs starting from the next vector to be ··· 377 210 else 378 211 curvec = firstvec + nr_present; 379 212 cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); 380 - nr_others = __irq_build_affinity_masks(curvec, numvecs, 381 - firstvec, node_to_cpumask, 382 - npresmsk, nmsk, masks); 213 + ret = __irq_build_affinity_masks(curvec, numvecs, firstvec, 214 + node_to_cpumask, npresmsk, nmsk, 215 + masks); 216 + if (ret >= 0) 217 + nr_others = ret; 218 + 219 + fail_build_affinity: 383 220 put_online_cpus(); 384 221 385 - if (nr_present < numvecs) 222 + if (ret >= 0) 386 223 WARN_ON(nr_present + nr_others < numvecs); 387 224 388 225 free_node_to_cpumask(node_to_cpumask); ··· 396 225 397 226 fail_nmsk: 398 227 free_cpumask_var(nmsk); 399 - return ret; 228 + return ret < 0 ? ret : 0; 400 229 } 401 230 402 231 static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
+6 -4
kernel/irq/irqdomain.c
··· 31 31 struct fwnode_handle fwnode; 32 32 unsigned int type; 33 33 char *name; 34 - void *data; 34 + phys_addr_t *pa; 35 35 }; 36 36 37 37 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS ··· 62 62 * domain struct. 63 63 */ 64 64 struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id, 65 - const char *name, void *data) 65 + const char *name, 66 + phys_addr_t *pa) 66 67 { 67 68 struct irqchip_fwid *fwid; 68 69 char *n; ··· 78 77 n = kasprintf(GFP_KERNEL, "%s-%d", name, id); 79 78 break; 80 79 default: 81 - n = kasprintf(GFP_KERNEL, "irqchip@%p", data); 80 + n = kasprintf(GFP_KERNEL, "irqchip@%pa", pa); 82 81 break; 83 82 } 84 83 ··· 90 89 91 90 fwid->type = type; 92 91 fwid->name = n; 93 - fwid->data = data; 92 + fwid->pa = pa; 94 93 fwid->fwnode.ops = &irqchip_fwnode_ops; 95 94 return &fwid->fwnode; 96 95 } ··· 149 148 switch (fwid->type) { 150 149 case IRQCHIP_FWNODE_NAMED: 151 150 case IRQCHIP_FWNODE_NAMED_ID: 151 + domain->fwnode = fwnode; 152 152 domain->name = kstrdup(fwid->name, GFP_KERNEL); 153 153 if (!domain->name) { 154 154 kfree(domain);
+1 -1
kernel/irq/manage.c
··· 23 23 24 24 #include "internals.h" 25 25 26 - #ifdef CONFIG_IRQ_FORCED_THREADING 26 + #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) 27 27 __read_mostly bool force_irqthreads; 28 28 EXPORT_SYMBOL_GPL(force_irqthreads); 29 29