Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'pci/controller/xilinx'

- Fix off-by-one error in INTx IRQ handler that caused INTx interrupts to
be lost or delivered as the wrong interrupt (Sean Anderson)

- Rate-limit misc interrupt messages (Sean Anderson)

- Turn off the clock on probe failure and device removal (Sean Anderson)

- Add DT binding and driver support for enabling/disabling PHYs (Sean
Anderson)

- Add PCIe phy bindings for the ZCU102 (Sean Anderson)

- Add support for Xilinx QDMA Soft IP PCIe Root Port Bridge to DT binding
and xilinx-dma-pl driver (Thippeswamy Havalige)

* pci/controller/xilinx:
PCI: xilinx-xdma: Add Xilinx QDMA Root Port driver
dt-bindings: PCI: xilinx-xdma: Add schemas for Xilinx QDMA PCIe Root Port Bridge
arm64: zynqmp: Add PCIe phys property for ZCU102
PCI: xilinx-nwl: Add PHY support
dt-bindings: pci: xilinx-nwl: Add phys property
PCI: xilinx-nwl: Clean up clock on probe failure/removal
PCI: xilinx-nwl: Rate-limit misc interrupt messages
PCI: xilinx-nwl: Fix register misspelling
PCI: xilinx-nwl: Fix off-by-one in INTx IRQ handler

+210 -26
+7
Documentation/devicetree/bindings/pci/xlnx,nwl-pcie.yaml
··· 61 61 interrupt-map: 62 62 maxItems: 4 63 63 64 + phys: 65 + minItems: 1 66 + maxItems: 4 67 + description: One phy per logical lane, in order 68 + 64 69 power-domains: 65 70 maxItems: 1 66 71 ··· 115 110 - | 116 111 #include <dt-bindings/interrupt-controller/arm-gic.h> 117 112 #include <dt-bindings/interrupt-controller/irq.h> 113 + #include <dt-bindings/phy/phy.h> 118 114 #include <dt-bindings/power/xlnx-zynqmp-power.h> 119 115 soc { 120 116 #address-cells = <2>; ··· 144 138 <0x0 0x0 0x0 0x3 &pcie_intc 0x3>, 145 139 <0x0 0x0 0x0 0x4 &pcie_intc 0x4>; 146 140 msi-parent = <&nwl_pcie>; 141 + phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>; 147 142 power-domains = <&zynqmp_firmware PD_PCIE>; 148 143 iommus = <&smmu 0x4d0>; 149 144 pcie_intc: legacy-interrupt-controller {
+34 -2
Documentation/devicetree/bindings/pci/xlnx,xdma-host.yaml
··· 14 14 15 15 properties: 16 16 compatible: 17 - const: xlnx,xdma-host-3.00 17 + enum: 18 + - xlnx,xdma-host-3.00 19 + - xlnx,qdma-host-3.00 18 20 19 21 reg: 20 - maxItems: 1 22 + items: 23 + - description: configuration region and XDMA bridge register. 24 + - description: QDMA bridge register. 25 + minItems: 1 26 + 27 + reg-names: 28 + items: 29 + - const: cfg 30 + - const: breg 31 + minItems: 1 21 32 22 33 ranges: 23 34 maxItems: 2 ··· 86 75 - interrupt-map-mask 87 76 - "#interrupt-cells" 88 77 - interrupt-controller 78 + 79 + if: 80 + properties: 81 + compatible: 82 + contains: 83 + enum: 84 + - xlnx,qdma-host-3.00 85 + then: 86 + properties: 87 + reg: 88 + minItems: 2 89 + reg-names: 90 + minItems: 2 91 + required: 92 + - reg-names 93 + else: 94 + properties: 95 + reg: 96 + maxItems: 1 97 + reg-names: 98 + maxItems: 1 89 99 90 100 unevaluatedProperties: false 91 101
+1
arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
··· 941 941 942 942 &pcie { 943 943 status = "okay"; 944 + phys = <&psgtr 0 PHY_TYPE_PCIE 0 0>; 944 945 }; 945 946 946 947 &psgtr {
+52 -1
drivers/pci/controller/pcie-xilinx-dma-pl.c
··· 71 71 72 72 /* Phy Status/Control Register definitions */ 73 73 #define XILINX_PCIE_DMA_REG_PSCR_LNKUP BIT(11) 74 + #define QDMA_BRIDGE_BASE_OFF 0xcd8 74 75 75 76 /* Number of MSI IRQs */ 76 77 #define XILINX_NUM_MSI_IRQS 64 78 + 79 + enum xilinx_pl_dma_version { 80 + XDMA, 81 + QDMA, 82 + }; 83 + 84 + /** 85 + * struct xilinx_pl_dma_variant - PL DMA PCIe variant information 86 + * @version: DMA version 87 + */ 88 + struct xilinx_pl_dma_variant { 89 + enum xilinx_pl_dma_version version; 90 + }; 77 91 78 92 struct xilinx_msi { 79 93 struct irq_domain *msi_domain; ··· 102 88 * struct pl_dma_pcie - PCIe port information 103 89 * @dev: Device pointer 104 90 * @reg_base: IO Mapped Register Base 91 + * @cfg_base: IO Mapped Configuration Base 105 92 * @irq: Interrupt number 106 93 * @cfg: Holds mappings of config space window 107 94 * @phys_reg_base: Physical address of reg base ··· 112 97 * @msi: MSI information 113 98 * @intx_irq: INTx error interrupt number 114 99 * @lock: Lock protecting shared register access 100 + * @variant: PL DMA PCIe version check pointer 115 101 */ 116 102 struct pl_dma_pcie { 117 103 struct device *dev; 118 104 void __iomem *reg_base; 105 + void __iomem *cfg_base; 119 106 int irq; 120 107 struct pci_config_window *cfg; 121 108 phys_addr_t phys_reg_base; ··· 127 110 struct xilinx_msi msi; 128 111 int intx_irq; 129 112 raw_spinlock_t lock; 113 + const struct xilinx_pl_dma_variant *variant; 130 114 }; 131 115 132 116 static inline u32 pcie_read(struct pl_dma_pcie *port, u32 reg) 133 117 { 118 + if (port->variant->version == QDMA) 119 + return readl(port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); 120 + 134 121 return readl(port->reg_base + reg); 135 122 } 136 123 137 124 static inline void pcie_write(struct pl_dma_pcie *port, u32 val, u32 reg) 138 125 { 139 - writel(val, port->reg_base + reg); 126 + if (port->variant->version == QDMA) 127 + writel(val, port->reg_base + reg + QDMA_BRIDGE_BASE_OFF); 128 + else 129 + writel(val, port->reg_base + reg); 140 130 } 141 131 142 132 static inline bool xilinx_pl_dma_pcie_link_up(struct pl_dma_pcie *port) ··· 196 172 197 173 if (!xilinx_pl_dma_pcie_valid_device(bus, devfn)) 198 174 return NULL; 175 + 176 + if (port->variant->version == QDMA) 177 + return port->cfg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); 199 178 200 179 return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); 201 180 } ··· 751 724 752 725 port->reg_base = port->cfg->win; 753 726 727 + if (port->variant->version == QDMA) { 728 + port->cfg_base = port->cfg->win; 729 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); 730 + port->reg_base = devm_ioremap_resource(dev, res); 731 + if (IS_ERR(port->reg_base)) 732 + return PTR_ERR(port->reg_base); 733 + port->phys_reg_base = res->start; 734 + } 735 + 754 736 err = xilinx_request_msi_irq(port); 755 737 if (err) { 756 738 pci_ecam_free(port->cfg); ··· 788 752 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); 789 753 if (!bus) 790 754 return -ENODEV; 755 + 756 + port->variant = of_device_get_match_data(dev); 791 757 792 758 err = xilinx_pl_dma_pcie_parse_dt(port, bus->res); 793 759 if (err) { ··· 822 784 return err; 823 785 } 824 786 787 + static const struct xilinx_pl_dma_variant xdma_host = { 788 + .version = XDMA, 789 + }; 790 + 791 + static const struct xilinx_pl_dma_variant qdma_host = { 792 + .version = QDMA, 793 + }; 794 + 825 795 static const struct of_device_id xilinx_pl_dma_pcie_of_match[] = { 826 796 { 827 797 .compatible = "xlnx,xdma-host-3.00", 798 + .data = &xdma_host, 799 + }, 800 + { 801 + .compatible = "xlnx,qdma-host-3.00", 802 + .data = &qdma_host, 828 803 }, 829 804 {} 830 805 };
+116 -23
drivers/pci/controller/pcie-xilinx-nwl.c
··· 19 19 #include <linux/of_platform.h> 20 20 #include <linux/pci.h> 21 21 #include <linux/pci-ecam.h> 22 + #include <linux/phy/phy.h> 22 23 #include <linux/platform_device.h> 23 24 #include <linux/irqchip/chained_irq.h> 24 25 ··· 81 80 #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) 82 81 #define MSGF_MISC_SR_FATAL_DEV BIT(23) 83 82 #define MSGF_MISC_SR_LINK_DOWN BIT(24) 84 - #define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) 85 - #define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) 83 + #define MSGF_MISC_SR_LINK_AUTO_BWIDTH BIT(25) 84 + #define MSGF_MISC_SR_LINK_BWIDTH BIT(26) 86 85 87 86 #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ 88 87 MSGF_MISC_SR_RXMSG_OVER | \ ··· 97 96 MSGF_MISC_SR_NON_FATAL_DEV | \ 98 97 MSGF_MISC_SR_FATAL_DEV | \ 99 98 MSGF_MISC_SR_LINK_DOWN | \ 100 - MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ 101 - MSGF_MSIC_SR_LINK_BWIDTH) 99 + MSGF_MISC_SR_LINK_AUTO_BWIDTH | \ 100 + MSGF_MISC_SR_LINK_BWIDTH) 102 101 103 102 /* Legacy interrupt status mask bits */ 104 103 #define MSGF_LEG_SR_INTA BIT(0) ··· 158 157 void __iomem *breg_base; 159 158 void __iomem *pcireg_base; 160 159 void __iomem *ecam_base; 160 + struct phy *phy[4]; 161 161 phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ 162 162 phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ 163 163 phys_addr_t phys_ecam_base; /* Physical Configuration Base */ ··· 269 267 return IRQ_NONE; 270 268 271 269 if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) 272 - dev_err(dev, "Received Message FIFO Overflow\n"); 270 + dev_err_ratelimited(dev, "Received Message FIFO Overflow\n"); 273 271 274 272 if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) 275 - dev_err(dev, "Slave error\n"); 273 + dev_err_ratelimited(dev, "Slave error\n"); 276 274 277 275 if (misc_stat & MSGF_MISC_SR_MASTER_ERR) 278 - dev_err(dev, "Master error\n"); 276 + dev_err_ratelimited(dev, "Master error\n"); 279 277 280 278 if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) 281 - dev_err(dev, "In Misc Ingress address translation error\n"); 279 + dev_err_ratelimited(dev, "In Misc Ingress address translation error\n"); 282 280 283 281 if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) 284 - dev_err(dev, "In Misc Egress address translation error\n"); 282 + dev_err_ratelimited(dev, "In Misc Egress address translation error\n"); 285 283 286 284 if (misc_stat & MSGF_MISC_SR_FATAL_AER) 287 - dev_err(dev, "Fatal Error in AER Capability\n"); 285 + dev_err_ratelimited(dev, "Fatal Error in AER Capability\n"); 288 286 289 287 if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) 290 - dev_err(dev, "Non-Fatal Error in AER Capability\n"); 288 + dev_err_ratelimited(dev, "Non-Fatal Error in AER Capability\n"); 291 289 292 290 if (misc_stat & MSGF_MISC_SR_CORR_AER) 293 - dev_err(dev, "Correctable Error in AER Capability\n"); 291 + dev_err_ratelimited(dev, "Correctable Error in AER Capability\n"); 294 292 295 293 if (misc_stat & MSGF_MISC_SR_UR_DETECT) 296 - dev_err(dev, "Unsupported request Detected\n"); 294 + dev_err_ratelimited(dev, "Unsupported request Detected\n"); 297 295 298 296 if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) 299 - dev_err(dev, "Non-Fatal Error Detected\n"); 297 + dev_err_ratelimited(dev, "Non-Fatal Error Detected\n"); 300 298 301 299 if (misc_stat & MSGF_MISC_SR_FATAL_DEV) 302 - dev_err(dev, "Fatal Error Detected\n"); 300 + dev_err_ratelimited(dev, "Fatal Error Detected\n"); 303 301 304 - if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) 302 + if (misc_stat & MSGF_MISC_SR_LINK_AUTO_BWIDTH) 305 303 dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); 306 304 307 - if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) 305 + if (misc_stat & MSGF_MISC_SR_LINK_BWIDTH) 308 306 dev_info(dev, "Link Bandwidth Management Status bit set\n"); 309 307 310 308 /* Clear misc interrupt status */ ··· 373 371 u32 mask; 374 372 u32 val; 375 373 376 - mask = 1 << (data->hwirq - 1); 374 + mask = 1 << data->hwirq; 377 375 raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); 378 376 val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); 379 377 nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); ··· 387 385 u32 mask; 388 386 u32 val; 389 387 390 - mask = 1 << (data->hwirq - 1); 388 + mask = 1 << data->hwirq; 391 389 raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); 392 390 val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); 393 391 nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); ··· 514 512 } 515 513 #endif 516 514 return 0; 515 + } 516 + 517 + static void nwl_pcie_phy_power_off(struct nwl_pcie *pcie, int i) 518 + { 519 + int err = phy_power_off(pcie->phy[i]); 520 + 521 + if (err) 522 + dev_err(pcie->dev, "could not power off phy %d (err=%d)\n", i, 523 + err); 524 + } 525 + 526 + static void nwl_pcie_phy_exit(struct nwl_pcie *pcie, int i) 527 + { 528 + int err = phy_exit(pcie->phy[i]); 529 + 530 + if (err) 531 + dev_err(pcie->dev, "could not exit phy %d (err=%d)\n", i, err); 532 + } 533 + 534 + static int nwl_pcie_phy_enable(struct nwl_pcie *pcie) 535 + { 536 + int i, ret; 537 + 538 + for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) { 539 + ret = phy_init(pcie->phy[i]); 540 + if (ret) 541 + goto err; 542 + 543 + ret = phy_power_on(pcie->phy[i]); 544 + if (ret) { 545 + nwl_pcie_phy_exit(pcie, i); 546 + goto err; 547 + } 548 + } 549 + 550 + return 0; 551 + 552 + err: 553 + while (i--) { 554 + nwl_pcie_phy_power_off(pcie, i); 555 + nwl_pcie_phy_exit(pcie, i); 556 + } 557 + 558 + return ret; 559 + } 560 + 561 + static void nwl_pcie_phy_disable(struct nwl_pcie *pcie) 562 + { 563 + int i; 564 + 565 + for (i = ARRAY_SIZE(pcie->phy); i--;) { 566 + nwl_pcie_phy_power_off(pcie, i); 567 + nwl_pcie_phy_exit(pcie, i); 568 + } 517 569 } 518 570 519 571 static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) ··· 781 725 { 782 726 struct device *dev = pcie->dev; 783 727 struct resource *res; 728 + int i; 784 729 785 730 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); 786 731 pcie->breg_base = devm_ioremap_resource(dev, res); ··· 809 752 irq_set_chained_handler_and_data(pcie->irq_intx, 810 753 nwl_pcie_leg_handler, pcie); 811 754 755 + 756 + for (i = 0; i < ARRAY_SIZE(pcie->phy); i++) { 757 + pcie->phy[i] = devm_of_phy_get_by_index(dev, dev->of_node, i); 758 + if (PTR_ERR(pcie->phy[i]) == -ENODEV) { 759 + pcie->phy[i] = NULL; 760 + break; 761 + } 762 + 763 + if (IS_ERR(pcie->phy[i])) 764 + return PTR_ERR(pcie->phy[i]); 765 + } 766 + 812 767 return 0; 813 768 } 814 769 ··· 841 772 return -ENODEV; 842 773 843 774 pcie = pci_host_bridge_priv(bridge); 775 + platform_set_drvdata(pdev, pcie); 844 776 845 777 pcie->dev = dev; 846 778 ··· 861 791 return err; 862 792 } 863 793 794 + err = nwl_pcie_phy_enable(pcie); 795 + if (err) { 796 + dev_err(dev, "could not enable PHYs\n"); 797 + goto err_clk; 798 + } 799 + 864 800 err = nwl_pcie_bridge_init(pcie); 865 801 if (err) { 866 802 dev_err(dev, "HW Initialization failed\n"); 867 - return err; 803 + goto err_phy; 868 804 } 869 805 870 806 err = nwl_pcie_init_irq_domain(pcie); 871 807 if (err) { 872 808 dev_err(dev, "Failed creating IRQ Domain\n"); 873 - return err; 809 + goto err_phy; 874 810 } 875 811 876 812 bridge->sysdata = pcie; ··· 886 810 err = nwl_pcie_enable_msi(pcie); 887 811 if (err < 0) { 888 812 dev_err(dev, "failed to enable MSI support: %d\n", err); 889 - return err; 813 + goto err_phy; 890 814 } 891 815 } 892 816 893 - return pci_host_probe(bridge); 817 + err = pci_host_probe(bridge); 818 + if (!err) 819 + return 0; 820 + 821 + err_phy: 822 + nwl_pcie_phy_disable(pcie); 823 + err_clk: 824 + clk_disable_unprepare(pcie->clk); 825 + return err; 826 + } 827 + 828 + static void nwl_pcie_remove(struct platform_device *pdev) 829 + { 830 + struct nwl_pcie *pcie = platform_get_drvdata(pdev); 831 + 832 + nwl_pcie_phy_disable(pcie); 833 + clk_disable_unprepare(pcie->clk); 894 834 } 895 835 896 836 static struct platform_driver nwl_pcie_driver = { ··· 916 824 .of_match_table = nwl_pcie_of_match, 917 825 }, 918 826 .probe = nwl_pcie_probe, 827 + .remove_new = nwl_pcie_remove, 919 828 }; 920 829 builtin_platform_driver(nwl_pcie_driver);