Merge tag 'irqchip-fixes-6.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/urgent

Pull irqchip fixes from Marc Zyngier:

- Work around an erratum on GIC700, where a race between a CPU
handling a wake-up interrupt, a change of affinity, and another
CPU going to sleep can result in a lack of wake-up event on the
next interrupt.

- Fix the locking required on a VPE for GICv4

- Enable Rockchip 3588001 erratum workaround for RK3588S

- Fix the irq-bcm6345-l1 assumtions of the boot CPU always be
the first CPU in the system

Link: https://lore.kernel.org/lkml/20230717113857.304919-1-maz@kernel.org

Changed files
+117 -40
Documentation
drivers
+3
Documentation/arm64/silicon-errata.rst
··· 141 141 | ARM | MMU-500 | #841119,826419 | N/A | 142 142 +----------------+-----------------+-----------------+-----------------------------+ 143 143 +----------------+-----------------+-----------------+-----------------------------+ 144 + | ARM | GIC-700 | #2941627 | ARM64_ERRATUM_2941627 | 145 + +----------------+-----------------+-----------------+-----------------------------+ 146 + +----------------+-----------------+-----------------+-----------------------------+ 144 147 | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | 145 148 +----------------+-----------------+-----------------+-----------------------------+ 146 149 | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_843419 |
+5 -9
drivers/irqchip/irq-bcm6345-l1.c
··· 82 82 }; 83 83 84 84 struct bcm6345_l1_cpu { 85 + struct bcm6345_l1_chip *intc; 85 86 void __iomem *map_base; 86 87 unsigned int parent_irq; 87 88 u32 enable_cache[]; ··· 116 115 117 116 static void bcm6345_l1_irq_handle(struct irq_desc *desc) 118 117 { 119 - struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc); 120 - struct bcm6345_l1_cpu *cpu; 118 + struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc); 119 + struct bcm6345_l1_chip *intc = cpu->intc; 121 120 struct irq_chip *chip = irq_desc_get_chip(desc); 122 121 unsigned int idx; 123 - 124 - #ifdef CONFIG_SMP 125 - cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; 126 - #else 127 - cpu = intc->cpus[0]; 128 - #endif 129 122 130 123 chained_irq_enter(chip, desc); 131 124 ··· 248 253 if (!cpu) 249 254 return -ENOMEM; 250 255 256 + cpu->intc = intc; 251 257 cpu->map_base = ioremap(res.start, sz); 252 258 if (!cpu->map_base) 253 259 return -ENOMEM; ··· 267 271 return -EINVAL; 268 272 } 269 273 irq_set_chained_handler_and_data(cpu->parent_irq, 270 - bcm6345_l1_irq_handle, intc); 274 + bcm6345_l1_irq_handle, cpu); 271 275 272 276 return 0; 273 277 }
+48 -30
drivers/irqchip/irq-gic-v3-its.c
··· 273 273 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); 274 274 } 275 275 276 + static struct irq_chip its_vpe_irq_chip; 277 + 276 278 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) 277 279 { 278 - struct its_vlpi_map *map = get_vlpi_map(d); 280 + struct its_vpe *vpe = NULL; 279 281 int cpu; 280 282 281 - if (map) { 282 - cpu = vpe_to_cpuid_lock(map->vpe, flags); 283 + if (d->chip == &its_vpe_irq_chip) { 284 + vpe = irq_data_get_irq_chip_data(d); 285 + } else { 286 + struct its_vlpi_map *map = get_vlpi_map(d); 287 + if (map) 288 + vpe = map->vpe; 289 + } 290 + 291 + if (vpe) { 292 + cpu = vpe_to_cpuid_lock(vpe, flags); 283 293 } else { 284 294 /* Physical LPIs are already locked via the irq_desc lock */ 285 295 struct its_device *its_dev = irq_data_get_irq_chip_data(d); ··· 303 293 304 294 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) 305 295 { 306 - struct its_vlpi_map *map = get_vlpi_map(d); 296 + struct its_vpe *vpe = NULL; 307 297 308 - if (map) 309 - vpe_to_cpuid_unlock(map->vpe, flags); 298 + if (d->chip == &its_vpe_irq_chip) { 299 + vpe = irq_data_get_irq_chip_data(d); 300 + } else { 301 + struct its_vlpi_map *map = get_vlpi_map(d); 302 + if (map) 303 + vpe = map->vpe; 304 + } 305 + 306 + if (vpe) 307 + vpe_to_cpuid_unlock(vpe, flags); 310 308 } 311 309 312 310 static struct its_collection *valid_col(struct its_collection *col) ··· 1451 1433 cpu_relax(); 1452 1434 } 1453 1435 1436 + static void __direct_lpi_inv(struct irq_data *d, u64 val) 1437 + { 1438 + void __iomem *rdbase; 1439 + unsigned long flags; 1440 + int cpu; 1441 + 1442 + /* Target the redistributor this LPI is currently routed to */ 1443 + cpu = irq_to_cpuid_lock(d, &flags); 1444 + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); 1445 + 1446 + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; 1447 + gic_write_lpir(val, rdbase + GICR_INVLPIR); 1448 + wait_for_syncr(rdbase); 1449 + 1450 + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); 1451 + irq_to_cpuid_unlock(d, flags); 1452 + } 1453 + 1454 1454 static void direct_lpi_inv(struct irq_data *d) 1455 1455 { 1456 1456 struct its_vlpi_map *map = get_vlpi_map(d); 1457 - void __iomem *rdbase; 1458 - unsigned long flags; 1459 1457 u64 val; 1460 - int cpu; 1461 1458 1462 1459 if (map) { 1463 1460 struct its_device *its_dev = irq_data_get_irq_chip_data(d); ··· 1486 1453 val = d->hwirq; 1487 1454 } 1488 1455 1489 - /* Target the redistributor this LPI is currently routed to */ 1490 - cpu = irq_to_cpuid_lock(d, &flags); 1491 - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); 1492 - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; 1493 - gic_write_lpir(val, rdbase + GICR_INVLPIR); 1494 - 1495 - wait_for_syncr(rdbase); 1496 - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); 1497 - irq_to_cpuid_unlock(d, flags); 1456 + __direct_lpi_inv(d, val); 1498 1457 } 1499 1458 1500 1459 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) ··· 3978 3953 { 3979 3954 struct its_vpe *vpe = irq_data_get_irq_chip_data(d); 3980 3955 3981 - if (gic_rdists->has_direct_lpi) { 3982 - void __iomem *rdbase; 3983 - 3984 - /* Target the redistributor this VPE is currently known on */ 3985 - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); 3986 - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; 3987 - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); 3988 - wait_for_syncr(rdbase); 3989 - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); 3990 - } else { 3956 + if (gic_rdists->has_direct_lpi) 3957 + __direct_lpi_inv(d, d->parent_data->hwirq); 3958 + else 3991 3959 its_vpe_send_cmd(vpe, its_send_inv); 3992 - } 3993 3960 } 3994 3961 3995 3962 static void its_vpe_mask_irq(struct irq_data *d) ··· 4744 4727 { 4745 4728 struct its_node *its = data; 4746 4729 4747 - if (!of_machine_is_compatible("rockchip,rk3588")) 4730 + if (!of_machine_is_compatible("rockchip,rk3588") && 4731 + !of_machine_is_compatible("rockchip,rk3588s")) 4748 4732 return false; 4749 4733 4750 4734 its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+61 -1
drivers/irqchip/irq-gic-v3.c
··· 69 69 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; 70 70 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); 71 71 72 + static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); 73 + 72 74 static struct gic_chip_data gic_data __read_mostly; 73 75 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 74 76 ··· 594 592 gic_irq_set_prio(d, GICD_INT_DEF_PRI); 595 593 } 596 594 595 + static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) 596 + { 597 + enum gic_intid_range range; 598 + 599 + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) 600 + return false; 601 + 602 + range = get_intid_range(d); 603 + 604 + /* 605 + * The workaround is needed if the IRQ is an SPI and 606 + * the target cpu is different from the one we are 607 + * executing on. 608 + */ 609 + return (range == SPI_RANGE || range == ESPI_RANGE) && 610 + !cpumask_test_cpu(raw_smp_processor_id(), 611 + irq_data_get_effective_affinity_mask(d)); 612 + } 613 + 597 614 static void gic_eoi_irq(struct irq_data *d) 598 615 { 599 616 write_gicreg(gic_irq(d), ICC_EOIR1_EL1); 600 617 isb(); 618 + 619 + if (gic_arm64_erratum_2941627_needed(d)) { 620 + /* 621 + * Make sure the GIC stream deactivate packet 622 + * issued by ICC_EOIR1_EL1 has completed before 623 + * deactivating through GICD_IACTIVER. 624 + */ 625 + dsb(sy); 626 + gic_poke_irq(d, GICD_ICACTIVER); 627 + } 601 628 } 602 629 603 630 static void gic_eoimode1_eoi_irq(struct irq_data *d) ··· 637 606 */ 638 607 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 639 608 return; 640 - gic_write_dir(gic_irq(d)); 609 + 610 + if (!gic_arm64_erratum_2941627_needed(d)) 611 + gic_write_dir(gic_irq(d)); 612 + else 613 + gic_poke_irq(d, GICD_ICACTIVER); 641 614 } 642 615 643 616 static int gic_set_type(struct irq_data *d, unsigned int type) ··· 1851 1816 return true; 1852 1817 } 1853 1818 1819 + static bool gic_enable_quirk_arm64_2941627(void *data) 1820 + { 1821 + static_branch_enable(&gic_arm64_2941627_erratum); 1822 + return true; 1823 + } 1824 + 1854 1825 static const struct gic_quirk gic_quirks[] = { 1855 1826 { 1856 1827 .desc = "GICv3: Qualcomm MSM8996 broken firmware", ··· 1903 1862 .iidr = 0x0402043b, 1904 1863 .mask = 0xffffffff, 1905 1864 .init = gic_enable_quirk_nvidia_t241, 1865 + }, 1866 + { 1867 + /* 1868 + * GIC-700: 2941627 workaround - IP variant [0,1] 1869 + * 1870 + */ 1871 + .desc = "GICv3: ARM64 erratum 2941627", 1872 + .iidr = 0x0400043b, 1873 + .mask = 0xff0e0fff, 1874 + .init = gic_enable_quirk_arm64_2941627, 1875 + }, 1876 + { 1877 + /* 1878 + * GIC-700: 2941627 workaround - IP variant [2] 1879 + */ 1880 + .desc = "GICv3: ARM64 erratum 2941627", 1881 + .iidr = 0x0402043b, 1882 + .mask = 0xff0f0fff, 1883 + .init = gic_enable_quirk_arm64_2941627, 1906 1884 }, 1907 1885 { 1908 1886 }