Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'pci/host-designware', 'pci/host-designware-common', 'pci/host-generic', 'pci/host-imx6', 'pci/host-iproc' and 'pci/host-xgene' into next

* pci/host-designware:
PCI: designware: Use iATU0 for cfg and IO, iATU1 for MEM
PCI: designware: Consolidate outbound iATU programming functions
PCI: designware: Add support for x8 links

* pci/host-designware-common:
PCI: designware: Wait for link to come up with consistent style
PCI: layerscape: Factor out ls_pcie_establish_link()
PCI: layerscape: Use dw_pcie_link_up() consistently
PCI: dra7xx: Use dw_pcie_link_up() consistently
PCI: imx6: Rename imx6_pcie_start_link() to imx6_pcie_establish_link()

* pci/host-generic:
of/pci: Fix pci_address_to_pio() conversion of CPU address to I/O port

* pci/host-imx6:
PCI: imx6: Add #define PCIE_RC_LCSR
PCI: imx6: Use "u32", not "uint32_t"
PCI: imx6: Add speed change timeout message

* pci/host-iproc:
PCI: iproc: Free resource list after registration
PCI: iproc: Directly add PCI resources
PCI: iproc: Add BCMA PCIe driver
PCI: iproc: Allow override of device tree IRQ mapping function

* pci/host-xgene:
arm64: dts: Add APM X-Gene PCIe MSI nodes
PCI: xgene: Add APM X-Gene v1 PCIe MSI/MSIX termination driver

+1033 -192
+68
Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
··· 1 + * AppliedMicro X-Gene v1 PCIe MSI controller 2 + 3 + Required properties: 4 + 5 + - compatible: should be "apm,xgene1-msi" to identify 6 + X-Gene v1 PCIe MSI controller block. 7 + - msi-controller: indicates that this is X-Gene v1 PCIe MSI controller node 8 + - reg: physical base address (0x79000000) and length (0x900000) for controller 9 + registers. These registers include the MSI termination address and data 10 + registers as well as the MSI interrupt status registers. 11 + - reg-names: not required 12 + - interrupts: A list of 16 interrupt outputs of the controller, starting from 13 + interrupt number 0x10 to 0x1f. 14 + - interrupt-names: not required 15 + 16 + Each PCIe node needs to have property msi-parent that points to msi controller node 17 + 18 + Examples: 19 + 20 + SoC DTSI: 21 + 22 + + MSI node: 23 + msi@79000000 { 24 + compatible = "apm,xgene1-msi"; 25 + msi-controller; 26 + reg = <0x00 0x79000000 0x0 0x900000>; 27 + interrupts = <0x0 0x10 0x4> 28 + <0x0 0x11 0x4> 29 + <0x0 0x12 0x4> 30 + <0x0 0x13 0x4> 31 + <0x0 0x14 0x4> 32 + <0x0 0x15 0x4> 33 + <0x0 0x16 0x4> 34 + <0x0 0x17 0x4> 35 + <0x0 0x18 0x4> 36 + <0x0 0x19 0x4> 37 + <0x0 0x1a 0x4> 38 + <0x0 0x1b 0x4> 39 + <0x0 0x1c 0x4> 40 + <0x0 0x1d 0x4> 41 + <0x0 0x1e 0x4> 42 + <0x0 0x1f 0x4>; 43 + }; 44 + 45 + + PCIe controller node with msi-parent property pointing to MSI node: 46 + pcie0: pcie@1f2b0000 { 47 + status = "disabled"; 48 + device_type = "pci"; 49 + compatible = "apm,xgene-storm-pcie", "apm,xgene-pcie"; 50 + #interrupt-cells = <1>; 51 + #size-cells = <2>; 52 + #address-cells = <3>; 53 + reg = < 0x00 0x1f2b0000 0x0 0x00010000 /* Controller registers */ 54 + 0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */ 55 + reg-names = "csr", "cfg"; 56 + ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */ 57 + 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */ 58 + dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000 59 + 0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>; 60 + interrupt-map-mask = <0x0 0x0 0x0 0x7>; 61 + interrupt-map = <0x0 0x0 0x0 0x1 &gic 0x0 0xc2 0x1 62 + 0x0 0x0 0x0 0x2 &gic 0x0 0xc3 0x1 63 + 0x0 0x0 0x0 0x3 &gic 0x0 0xc4 0x1 64 + 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; 65 + dma-coherent; 66 + clocks = <&pcie0clk 0>; 67 + msi-parent= <&msi>; 68 + };
+8
MAINTAINERS
··· 7564 7564 S: Orphan 7565 7565 F: drivers/pci/host/*spear* 7566 7566 7567 + PCI MSI DRIVER FOR APPLIEDMICRO XGENE 7568 + M: Duc Dang <dhdang@apm.com> 7569 + L: linux-pci@vger.kernel.org 7570 + L: linux-arm-kernel@lists.infradead.org 7571 + S: Maintained 7572 + F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt 7573 + F: drivers/pci/host/pci-xgene-msi.c 7574 + 7567 7575 PCMCIA SUBSYSTEM 7568 7576 P: Linux PCMCIA Team 7569 7577 L: linux-pcmcia@lists.infradead.org
+27
arch/arm64/boot/dts/apm/apm-storm.dtsi
··· 374 374 }; 375 375 }; 376 376 377 + msi: msi@79000000 { 378 + compatible = "apm,xgene1-msi"; 379 + msi-controller; 380 + reg = <0x00 0x79000000 0x0 0x900000>; 381 + interrupts = < 0x0 0x10 0x4 382 + 0x0 0x11 0x4 383 + 0x0 0x12 0x4 384 + 0x0 0x13 0x4 385 + 0x0 0x14 0x4 386 + 0x0 0x15 0x4 387 + 0x0 0x16 0x4 388 + 0x0 0x17 0x4 389 + 0x0 0x18 0x4 390 + 0x0 0x19 0x4 391 + 0x0 0x1a 0x4 392 + 0x0 0x1b 0x4 393 + 0x0 0x1c 0x4 394 + 0x0 0x1d 0x4 395 + 0x0 0x1e 0x4 396 + 0x0 0x1f 0x4>; 397 + }; 398 + 377 399 pcie0: pcie@1f2b0000 { 378 400 status = "disabled"; 379 401 device_type = "pci"; ··· 417 395 0x0 0x0 0x0 0x4 &gic 0x0 0xc5 0x1>; 418 396 dma-coherent; 419 397 clocks = <&pcie0clk 0>; 398 + msi-parent = <&msi>; 420 399 }; 421 400 422 401 pcie1: pcie@1f2c0000 { ··· 441 418 0x0 0x0 0x0 0x4 &gic 0x0 0xcb 0x1>; 442 419 dma-coherent; 443 420 clocks = <&pcie1clk 0>; 421 + msi-parent = <&msi>; 444 422 }; 445 423 446 424 pcie2: pcie@1f2d0000 { ··· 465 441 0x0 0x0 0x0 0x4 &gic 0x0 0xd1 0x1>; 466 442 dma-coherent; 467 443 clocks = <&pcie2clk 0>; 444 + msi-parent = <&msi>; 468 445 }; 469 446 470 447 pcie3: pcie@1f500000 { ··· 489 464 0x0 0x0 0x0 0x4 &gic 0x0 0xd7 0x1>; 490 465 dma-coherent; 491 466 clocks = <&pcie3clk 0>; 467 + msi-parent = <&msi>; 492 468 }; 493 469 494 470 pcie4: pcie@1f510000 { ··· 513 487 0x0 0x0 0x0 0x4 &gic 0x0 0xdd 0x1>; 514 488 dma-coherent; 515 489 clocks = <&pcie4clk 0>; 490 + msi-parent = <&msi>; 516 491 }; 517 492 518 493 serial0: serial@1c020000 {
+1 -1
drivers/of/address.c
··· 765 765 spin_lock(&io_range_lock); 766 766 list_for_each_entry(res, &io_range_list, list) { 767 767 if (address >= res->start && address < res->start + res->size) { 768 - addr = res->start - address + offset; 768 + addr = address - res->start + offset; 769 769 break; 770 770 } 771 771 offset += res->size;
+20
drivers/pci/host/Kconfig
··· 89 89 depends on ARCH_XGENE 90 90 depends on OF 91 91 select PCIEPORTBUS 92 + select PCI_MSI_IRQ_DOMAIN if PCI_MSI 92 93 help 93 94 Say Y here if you want internal PCI support on APM X-Gene SoC. 94 95 There are 5 internal PCIe ports available. Each port is GEN3 capable 95 96 and have varied lanes from x1 to x8. 97 + 98 + config PCI_XGENE_MSI 99 + bool "X-Gene v1 PCIe MSI feature" 100 + depends on PCI_XGENE && PCI_MSI 101 + default y 102 + help 103 + Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. 104 + This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. 96 105 97 106 config PCI_LAYERSCAPE 98 107 bool "Freescale Layerscape PCIe controller" ··· 133 124 help 134 125 Say Y here if you want to use the Broadcom iProc PCIe controller 135 126 through the generic platform bus interface 127 + 128 + config PCIE_IPROC_BCMA 129 + bool "Broadcom iProc PCIe BCMA bus driver" 130 + depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST) 131 + select PCIE_IPROC 132 + select BCMA 133 + select PCI_DOMAINS 134 + default ARCH_BCM_5301X 135 + help 136 + Say Y here if you want to use the Broadcom iProc PCIe controller 137 + through the BCMA bus interface 136 138 137 139 endmenu
+2
drivers/pci/host/Makefile
··· 11 11 obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o 12 12 obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o 13 13 obj-$(CONFIG_PCI_XGENE) += pci-xgene.o 14 + obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o 14 15 obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o 15 16 obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o 16 17 obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o 17 18 obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o 19 + obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+7 -12
drivers/pci/host/pci-dra7xx.c
··· 93 93 94 94 static int dra7xx_pcie_establish_link(struct pcie_port *pp) 95 95 { 96 - u32 reg; 97 - unsigned int retries = 1000; 98 96 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pp); 97 + u32 reg; 98 + unsigned int retries; 99 99 100 100 if (dw_pcie_link_up(pp)) { 101 101 dev_err(pp->dev, "link is already up\n"); ··· 106 106 reg |= LTSSM_EN; 107 107 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); 108 108 109 - while (retries--) { 110 - reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); 111 - if (reg & LINK_UP) 112 - break; 109 + for (retries = 0; retries < 1000; retries++) { 110 + if (dw_pcie_link_up(pp)) 111 + return 0; 113 112 usleep_range(10, 20); 114 113 } 115 114 116 - if (retries == 0) { 117 - dev_err(pp->dev, "link is not up\n"); 118 - return -ETIMEDOUT; 119 - } 120 - 121 - return 0; 115 + dev_err(pp->dev, "link is not up\n"); 116 + return -EINVAL; 122 117 } 123 118 124 119 static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp)
+15 -19
drivers/pci/host/pci-exynos.c
··· 316 316 317 317 static int exynos_pcie_establish_link(struct pcie_port *pp) 318 318 { 319 - u32 val; 320 - int count = 0; 321 319 struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp); 320 + u32 val; 321 + unsigned int retries; 322 322 323 323 if (dw_pcie_link_up(pp)) { 324 324 dev_err(pp->dev, "Link already up\n"); ··· 357 357 PCIE_APP_LTSSM_ENABLE); 358 358 359 359 /* check if the link is up or not */ 360 - while (!dw_pcie_link_up(pp)) { 361 - mdelay(100); 362 - count++; 363 - if (count == 10) { 364 - while (exynos_phy_readl(exynos_pcie, 365 - PCIE_PHY_PLL_LOCKED) == 0) { 366 - val = exynos_blk_readl(exynos_pcie, 367 - PCIE_PHY_PLL_LOCKED); 368 - dev_info(pp->dev, "PLL Locked: 0x%x\n", val); 369 - } 370 - /* power off phy */ 371 - exynos_pcie_power_off_phy(pp); 372 - 373 - dev_err(pp->dev, "PCIe Link Fail\n"); 374 - return -EINVAL; 360 + for (retries = 0; retries < 10; retries++) { 361 + if (dw_pcie_link_up(pp)) { 362 + dev_info(pp->dev, "Link up\n"); 363 + return 0; 375 364 } 365 + mdelay(100); 376 366 } 377 367 378 - dev_info(pp->dev, "Link up\n"); 368 + while (exynos_phy_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED) == 0) { 369 + val = exynos_blk_readl(exynos_pcie, PCIE_PHY_PLL_LOCKED); 370 + dev_info(pp->dev, "PLL Locked: 0x%x\n", val); 371 + } 372 + /* power off phy */ 373 + exynos_pcie_power_off_phy(pp); 379 374 380 - return 0; 375 + dev_err(pp->dev, "PCIe Link Fail\n"); 376 + return -EINVAL; 381 377 } 382 378 383 379 static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+49 -39
drivers/pci/host/pci-imx6.c
··· 47 47 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 48 48 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf 49 49 50 + #define PCIE_RC_LCSR 0x80 51 + 50 52 /* PCIe Port Logic registers (memory-mapped) */ 51 53 #define PL_OFFSET 0x700 52 54 #define PCIE_PL_PFLR (PL_OFFSET + 0x08) ··· 337 335 338 336 static int imx6_pcie_wait_for_link(struct pcie_port *pp) 339 337 { 340 - int count = 200; 338 + unsigned int retries; 341 339 342 - while (!dw_pcie_link_up(pp)) { 340 + for (retries = 0; retries < 200; retries++) { 341 + if (dw_pcie_link_up(pp)) 342 + return 0; 343 343 usleep_range(100, 1000); 344 - if (--count) 345 - continue; 346 - 347 - dev_err(pp->dev, "phy link never came up\n"); 348 - dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 349 - readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), 350 - readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); 351 - return -EINVAL; 352 344 } 353 345 354 - return 0; 346 + dev_err(pp->dev, "phy link never came up\n"); 347 + dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", 348 + readl(pp->dbi_base + PCIE_PHY_DEBUG_R0), 349 + readl(pp->dbi_base + PCIE_PHY_DEBUG_R1)); 350 + return -EINVAL; 351 + } 352 + 353 + static int imx6_pcie_wait_for_speed_change(struct pcie_port *pp) 354 + { 355 + u32 tmp; 356 + unsigned int retries; 357 + 358 + for (retries = 0; retries < 200; retries++) { 359 + tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); 360 + /* Test if the speed change finished. */ 361 + if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 362 + return 0; 363 + usleep_range(100, 1000); 364 + } 365 + 366 + dev_err(pp->dev, "Speed change timeout\n"); 367 + return -EINVAL; 355 368 } 356 369 357 370 static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) ··· 376 359 return dw_handle_msi_irq(pp); 377 360 } 378 361 379 - static int imx6_pcie_start_link(struct pcie_port *pp) 362 + static int imx6_pcie_establish_link(struct pcie_port *pp) 380 363 { 381 364 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); 382 - uint32_t tmp; 383 - int ret, count; 365 + u32 tmp; 366 + int ret; 384 367 385 368 /* 386 369 * Force Gen1 operation when starting the link. In case the link is ··· 414 397 tmp |= PORT_LOGIC_SPEED_CHANGE; 415 398 writel(tmp, pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); 416 399 417 - count = 200; 418 - while (count--) { 419 - tmp = readl(pp->dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL); 420 - /* Test if the speed change finished. */ 421 - if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) 422 - break; 423 - usleep_range(100, 1000); 400 + ret = imx6_pcie_wait_for_speed_change(pp); 401 + if (ret) { 402 + dev_err(pp->dev, "Failed to bring link up!\n"); 403 + return ret; 424 404 } 425 405 426 406 /* Make sure link training is finished as well! */ 427 - if (count) 428 - ret = imx6_pcie_wait_for_link(pp); 429 - else 430 - ret = -EINVAL; 431 - 407 + ret = imx6_pcie_wait_for_link(pp); 432 408 if (ret) { 433 409 dev_err(pp->dev, "Failed to bring link up!\n"); 434 - } else { 435 - tmp = readl(pp->dbi_base + 0x80); 436 - dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); 410 + return ret; 437 411 } 438 412 439 - return ret; 413 + tmp = readl(pp->dbi_base + PCIE_RC_LCSR); 414 + dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf); 415 + return 0; 440 416 } 441 417 442 418 static void imx6_pcie_host_init(struct pcie_port *pp) ··· 442 432 443 433 dw_pcie_setup_rc(pp); 444 434 445 - imx6_pcie_start_link(pp); 435 + imx6_pcie_establish_link(pp); 446 436 447 437 if (IS_ENABLED(CONFIG_PCI_MSI)) 448 438 dw_pcie_msi_init(pp); ··· 450 440 451 441 static void imx6_pcie_reset_phy(struct pcie_port *pp) 452 442 { 453 - uint32_t temp; 443 + u32 tmp; 454 444 455 - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); 456 - temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 457 - PHY_RX_OVRD_IN_LO_RX_PLL_EN); 458 - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); 445 + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); 446 + tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | 447 + PHY_RX_OVRD_IN_LO_RX_PLL_EN); 448 + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); 459 449 460 450 usleep_range(2000, 3000); 461 451 462 - pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &temp); 463 - temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 452 + pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp); 453 + tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | 464 454 PHY_RX_OVRD_IN_LO_RX_PLL_EN); 465 - pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, temp); 455 + pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp); 466 456 } 467 457 468 458 static int imx6_pcie_link_up(struct pcie_port *pp)
+7 -9
drivers/pci/host/pci-keystone.c
··· 88 88 static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) 89 89 { 90 90 struct pcie_port *pp = &ks_pcie->pp; 91 - int count = 200; 91 + unsigned int retries; 92 92 93 93 dw_pcie_setup_rc(pp); 94 94 ··· 99 99 100 100 ks_dw_pcie_initiate_link_train(ks_pcie); 101 101 /* check if the link is up or not */ 102 - while (!dw_pcie_link_up(pp)) { 102 + for (retries = 0; retries < 200; retries++) { 103 + if (dw_pcie_link_up(pp)) 104 + return 0; 103 105 usleep_range(100, 1000); 104 - if (--count) { 105 - ks_dw_pcie_initiate_link_train(ks_pcie); 106 - continue; 107 - } 108 - dev_err(pp->dev, "phy link never came up\n"); 109 - return -EINVAL; 106 + ks_dw_pcie_initiate_link_train(ks_pcie); 110 107 } 111 108 112 - return 0; 109 + dev_err(pp->dev, "phy link never came up\n"); 110 + return -EINVAL; 113 111 } 114 112 115 113 static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+15 -10
drivers/pci/host/pci-layerscape.c
··· 62 62 return 1; 63 63 } 64 64 65 + static int ls_pcie_establish_link(struct pcie_port *pp) 66 + { 67 + unsigned int retries; 68 + 69 + for (retries = 0; retries < 200; retries++) { 70 + if (dw_pcie_link_up(pp)) 71 + return 0; 72 + usleep_range(100, 1000); 73 + } 74 + 75 + dev_err(pp->dev, "phy link never came up\n"); 76 + return -EINVAL; 77 + } 78 + 65 79 static void ls_pcie_host_init(struct pcie_port *pp) 66 80 { 67 81 struct ls_pcie *pcie = to_ls_pcie(pp); 68 - int count = 0; 69 82 u32 val; 70 83 71 84 dw_pcie_setup_rc(pp); 72 - 73 - while (!ls_pcie_link_up(pp)) { 74 - usleep_range(100, 1000); 75 - count++; 76 - if (count >= 200) { 77 - dev_err(pp->dev, "phy link never came up\n"); 78 - return; 79 - } 80 - } 85 + ls_pcie_establish_link(pp); 81 86 82 87 /* 83 88 * LS1021A Workaround for internal TKT228622
+596
drivers/pci/host/pci-xgene-msi.c
··· 1 + /* 2 + * APM X-Gene MSI Driver 3 + * 4 + * Copyright (c) 2014, Applied Micro Circuits Corporation 5 + * Author: Tanmay Inamdar <tinamdar@apm.com> 6 + * Duc Dang <dhdang@apm.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the 10 + * Free Software Foundation; either version 2 of the License, or (at your 11 + * option) any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, 14 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 + * GNU General Public License for more details. 17 + */ 18 + #include <linux/cpu.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/module.h> 21 + #include <linux/msi.h> 22 + #include <linux/of_irq.h> 23 + #include <linux/irqchip/chained_irq.h> 24 + #include <linux/pci.h> 25 + #include <linux/platform_device.h> 26 + #include <linux/of_pci.h> 27 + 28 + #define MSI_IR0 0x000000 29 + #define MSI_INT0 0x800000 30 + #define IDX_PER_GROUP 8 31 + #define IRQS_PER_IDX 16 32 + #define NR_HW_IRQS 16 33 + #define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) 34 + 35 + struct xgene_msi_group { 36 + struct xgene_msi *msi; 37 + int gic_irq; 38 + u32 msi_grp; 39 + }; 40 + 41 + struct xgene_msi { 42 + struct device_node *node; 43 + struct msi_controller mchip; 44 + struct irq_domain *domain; 45 + u64 msi_addr; 46 + void __iomem *msi_regs; 47 + unsigned long *bitmap; 48 + struct mutex bitmap_lock; 49 + struct xgene_msi_group *msi_groups; 50 + int num_cpus; 51 + }; 52 + 53 + /* Global data */ 54 + static struct xgene_msi xgene_msi_ctrl; 55 + 56 + static struct irq_chip xgene_msi_top_irq_chip = { 57 + .name = "X-Gene1 MSI", 58 + .irq_enable = pci_msi_unmask_irq, 59 + .irq_disable = pci_msi_mask_irq, 60 + .irq_mask = pci_msi_mask_irq, 61 + .irq_unmask = pci_msi_unmask_irq, 62 + }; 63 + 64 + static struct msi_domain_info xgene_msi_domain_info = { 65 + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 66 + MSI_FLAG_PCI_MSIX), 67 + .chip = &xgene_msi_top_irq_chip, 68 + }; 69 + 70 + /* 71 + * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where 72 + * n is group number (0..F), x is index of registers in each group (0..7) 73 + * The register layout is as follows: 74 + * MSI0IR0 base_addr 75 + * MSI0IR1 base_addr + 0x10000 76 + * ... ... 77 + * MSI0IR6 base_addr + 0x60000 78 + * MSI0IR7 base_addr + 0x70000 79 + * MSI1IR0 base_addr + 0x80000 80 + * MSI1IR1 base_addr + 0x90000 81 + * ... ... 82 + * MSI1IR7 base_addr + 0xF0000 83 + * MSI2IR0 base_addr + 0x100000 84 + * ... ... 85 + * MSIFIR0 base_addr + 0x780000 86 + * MSIFIR1 base_addr + 0x790000 87 + * ... ... 88 + * MSIFIR7 base_addr + 0x7F0000 89 + * MSIINT0 base_addr + 0x800000 90 + * MSIINT1 base_addr + 0x810000 91 + * ... ... 92 + * MSIINTF base_addr + 0x8F0000 93 + * 94 + * Each index register supports 16 MSI vectors (0..15) to generate interrupt. 95 + * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination 96 + * registers. 97 + * 98 + * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate 99 + * the MSI pending status caused by 1 of its 8 index registers. 100 + */ 101 + 102 + /* MSInIRx read helper */ 103 + static u32 xgene_msi_ir_read(struct xgene_msi *msi, 104 + u32 msi_grp, u32 msir_idx) 105 + { 106 + return readl_relaxed(msi->msi_regs + MSI_IR0 + 107 + (msi_grp << 19) + (msir_idx << 16)); 108 + } 109 + 110 + /* MSIINTn read helper */ 111 + static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) 112 + { 113 + return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); 114 + } 115 + 116 + /* 117 + * With 2048 MSI vectors supported, the MSI message can be constructed using 118 + * following scheme: 119 + * - Divide into 8 256-vector groups 120 + * Group 0: 0-255 121 + * Group 1: 256-511 122 + * Group 2: 512-767 123 + * ... 124 + * Group 7: 1792-2047 125 + * - Each 256-vector group is divided into 16 16-vector groups 126 + * As an example: 16 16-vector groups for 256-vector group 0-255 is 127 + * Group 0: 0-15 128 + * Group 1: 16-32 129 + * ... 130 + * Group 15: 240-255 131 + * - The termination address of MSI vector in 256-vector group n and 16-vector 132 + * group x is the address of MSIxIRn 133 + * - The data for MSI vector in 16-vector group x is x 134 + */ 135 + static u32 hwirq_to_reg_set(unsigned long hwirq) 136 + { 137 + return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); 138 + } 139 + 140 + static u32 hwirq_to_group(unsigned long hwirq) 141 + { 142 + return (hwirq % NR_HW_IRQS); 143 + } 144 + 145 + static u32 hwirq_to_msi_data(unsigned long hwirq) 146 + { 147 + return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); 148 + } 149 + 150 + static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 151 + { 152 + struct xgene_msi *msi = irq_data_get_irq_chip_data(data); 153 + u32 reg_set = hwirq_to_reg_set(data->hwirq); 154 + u32 group = hwirq_to_group(data->hwirq); 155 + u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); 156 + 157 + msg->address_hi = upper_32_bits(target_addr); 158 + msg->address_lo = lower_32_bits(target_addr); 159 + msg->data = hwirq_to_msi_data(data->hwirq); 160 + } 161 + 162 + /* 163 + * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain 164 + * the expected behaviour of .set_affinity for each MSI interrupt, the 16 165 + * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs 166 + * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another 167 + * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a 168 + * consequence, the total MSI vectors that X-Gene v1 supports will be 169 + * reduced to 256 (2048/8) vectors. 170 + */ 171 + static int hwirq_to_cpu(unsigned long hwirq) 172 + { 173 + return (hwirq % xgene_msi_ctrl.num_cpus); 174 + } 175 + 176 + static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) 177 + { 178 + return (hwirq - hwirq_to_cpu(hwirq)); 179 + } 180 + 181 + static int xgene_msi_set_affinity(struct irq_data *irqdata, 182 + const struct cpumask *mask, bool force) 183 + { 184 + int target_cpu = cpumask_first(mask); 185 + int curr_cpu; 186 + 187 + curr_cpu = hwirq_to_cpu(irqdata->hwirq); 188 + if (curr_cpu == target_cpu) 189 + return IRQ_SET_MASK_OK_DONE; 190 + 191 + /* Update MSI number to target the new CPU */ 192 + irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; 193 + 194 + return IRQ_SET_MASK_OK; 195 + } 196 + 197 + static struct irq_chip xgene_msi_bottom_irq_chip = { 198 + .name = "MSI", 199 + .irq_set_affinity = xgene_msi_set_affinity, 200 + .irq_compose_msi_msg = xgene_compose_msi_msg, 201 + }; 202 + 203 + static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 204 + unsigned int nr_irqs, void *args) 205 + { 206 + struct xgene_msi *msi = domain->host_data; 207 + int msi_irq; 208 + 209 + mutex_lock(&msi->bitmap_lock); 210 + 211 + msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, 212 + msi->num_cpus, 0); 213 + if (msi_irq < NR_MSI_VEC) 214 + bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); 215 + else 216 + msi_irq = -ENOSPC; 217 + 218 + mutex_unlock(&msi->bitmap_lock); 219 + 220 + if (msi_irq < 0) 221 + return msi_irq; 222 + 223 + irq_domain_set_info(domain, virq, msi_irq, 224 + &xgene_msi_bottom_irq_chip, domain->host_data, 225 + handle_simple_irq, NULL, NULL); 226 + set_irq_flags(virq, IRQF_VALID); 227 + 228 + return 0; 229 + } 230 + 231 + static void xgene_irq_domain_free(struct irq_domain *domain, 232 + unsigned int virq, unsigned int nr_irqs) 233 + { 234 + struct irq_data *d = irq_domain_get_irq_data(domain, virq); 235 + struct xgene_msi *msi = irq_data_get_irq_chip_data(d); 236 + u32 hwirq; 237 + 238 + mutex_lock(&msi->bitmap_lock); 239 + 240 + hwirq = hwirq_to_canonical_hwirq(d->hwirq); 241 + bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); 242 + 243 + mutex_unlock(&msi->bitmap_lock); 244 + 245 + irq_domain_free_irqs_parent(domain, virq, nr_irqs); 246 + } 247 + 248 + static const struct irq_domain_ops msi_domain_ops = { 249 + .alloc = xgene_irq_domain_alloc, 250 + .free = xgene_irq_domain_free, 251 + }; 252 + 253 + static int xgene_allocate_domains(struct xgene_msi *msi) 254 + { 255 + msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, 256 + &msi_domain_ops, msi); 257 + if (!msi->domain) 258 + return -ENOMEM; 259 + 260 + msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, 261 + &xgene_msi_domain_info, 262 + msi->domain); 263 + 264 + if (!msi->mchip.domain) { 265 + irq_domain_remove(msi->domain); 266 + return -ENOMEM; 267 + } 268 + 269 + return 0; 270 + } 271 + 272 + static void xgene_free_domains(struct xgene_msi *msi) 273 + { 274 + if (msi->mchip.domain) 275 + irq_domain_remove(msi->mchip.domain); 276 + if (msi->domain) 277 + irq_domain_remove(msi->domain); 278 + } 279 + 280 + static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) 281 + { 282 + int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); 283 + 284 + xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); 285 + if (!xgene_msi->bitmap) 286 + return -ENOMEM; 287 + 288 + mutex_init(&xgene_msi->bitmap_lock); 289 + 290 + xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, 291 + sizeof(struct xgene_msi_group), 292 + GFP_KERNEL); 293 + if (!xgene_msi->msi_groups) 294 + return -ENOMEM; 295 + 296 + return 0; 297 + } 298 + 299 + static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) 300 + { 301 + struct irq_chip *chip = irq_desc_get_chip(desc); 302 + struct xgene_msi_group *msi_groups; 303 + struct xgene_msi *xgene_msi; 304 + unsigned int virq; 305 + int msir_index, msir_val, hw_irq; 306 + u32 intr_index, grp_select, msi_grp; 307 + 308 + chained_irq_enter(chip, desc); 309 + 310 + msi_groups = irq_desc_get_handler_data(desc); 311 + xgene_msi = msi_groups->msi; 312 + msi_grp = msi_groups->msi_grp; 313 + 314 + /* 315 + * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt 316 + * If bit x of this register is set (x is 0..7), one or more interupts 317 + * corresponding to MSInIRx is set. 318 + */ 319 + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); 320 + while (grp_select) { 321 + msir_index = ffs(grp_select) - 1; 322 + /* 323 + * Calculate MSInIRx address to read to check for interrupts 324 + * (refer to termination address and data assignment 325 + * described in xgene_compose_msi_msg() ) 326 + */ 327 + msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); 328 + while (msir_val) { 329 + intr_index = ffs(msir_val) - 1; 330 + /* 331 + * Calculate MSI vector number (refer to the termination 332 + * address and data assignment described in 333 + * xgene_compose_msi_msg function) 334 + */ 335 + hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * 336 + NR_HW_IRQS) + msi_grp; 337 + /* 338 + * As we have multiple hw_irq that maps to single MSI, 339 + * always look up the virq using the hw_irq as seen from 340 + * CPU0 341 + */ 342 + hw_irq = hwirq_to_canonical_hwirq(hw_irq); 343 + virq = irq_find_mapping(xgene_msi->domain, hw_irq); 344 + WARN_ON(!virq); 345 + if (virq != 0) 346 + generic_handle_irq(virq); 347 + msir_val &= ~(1 << intr_index); 348 + } 349 + grp_select &= ~(1 << msir_index); 350 + 351 + if (!grp_select) { 352 + /* 353 + * We handled all interrupts happened in this group, 354 + * resample this group MSI_INTx register in case 355 + * something else has been made pending in the meantime 356 + */ 357 + grp_select = xgene_msi_int_read(xgene_msi, msi_grp); 358 + } 359 + } 360 + 361 + chained_irq_exit(chip, desc); 362 + } 363 + 364 + static int xgene_msi_remove(struct platform_device *pdev) 365 + { 366 + int virq, i; 367 + struct xgene_msi *msi = platform_get_drvdata(pdev); 368 + 369 + for (i = 0; i < NR_HW_IRQS; i++) { 370 + virq = msi->msi_groups[i].gic_irq; 371 + if (virq != 0) { 372 + irq_set_chained_handler(virq, NULL); 373 + irq_set_handler_data(virq, NULL); 374 + } 375 + } 376 + kfree(msi->msi_groups); 377 + 378 + kfree(msi->bitmap); 379 + msi->bitmap = NULL; 380 + 381 + xgene_free_domains(msi); 382 + 383 + return 0; 384 + } 385 + 386 + static int xgene_msi_hwirq_alloc(unsigned int cpu) 387 + { 388 + struct xgene_msi *msi = &xgene_msi_ctrl; 389 + struct xgene_msi_group *msi_group; 390 + cpumask_var_t mask; 391 + int i; 392 + int err; 393 + 394 + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { 395 + msi_group = &msi->msi_groups[i]; 396 + if (!msi_group->gic_irq) 397 + continue; 398 + 399 + irq_set_chained_handler(msi_group->gic_irq, 400 + xgene_msi_isr); 401 + err = irq_set_handler_data(msi_group->gic_irq, msi_group); 402 + if (err) { 403 + pr_err("failed to register GIC IRQ handler\n"); 404 + return -EINVAL; 405 + } 406 + /* 407 + * Statically allocate MSI GIC IRQs to each CPU core. 408 + * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated 409 + * to each core. 410 + */ 411 + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 412 + cpumask_clear(mask); 413 + cpumask_set_cpu(cpu, mask); 414 + err = irq_set_affinity(msi_group->gic_irq, mask); 415 + if (err) 416 + pr_err("failed to set affinity for GIC IRQ"); 417 + free_cpumask_var(mask); 418 + } else { 419 + pr_err("failed to alloc CPU mask for affinity\n"); 420 + err = -EINVAL; 421 + } 422 + 423 + if (err) { 424 + irq_set_chained_handler(msi_group->gic_irq, NULL); 425 + irq_set_handler_data(msi_group->gic_irq, NULL); 426 + return err; 427 + } 428 + } 429 + 430 + return 0; 431 + } 432 + 433 + static void xgene_msi_hwirq_free(unsigned int cpu) 434 + { 435 + struct xgene_msi *msi = &xgene_msi_ctrl; 436 + struct xgene_msi_group *msi_group; 437 + int i; 438 + 439 + for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { 440 + msi_group = &msi->msi_groups[i]; 441 + if (!msi_group->gic_irq) 442 + continue; 443 + 444 + irq_set_chained_handler(msi_group->gic_irq, NULL); 445 + irq_set_handler_data(msi_group->gic_irq, NULL); 446 + } 447 + } 448 + 449 + static int xgene_msi_cpu_callback(struct notifier_block *nfb, 450 + unsigned long action, void *hcpu) 451 + { 452 + unsigned cpu = (unsigned long)hcpu; 453 + 454 + switch (action) { 455 + case CPU_ONLINE: 456 + case CPU_ONLINE_FROZEN: 457 + xgene_msi_hwirq_alloc(cpu); 458 + break; 459 + case CPU_DEAD: 460 + case CPU_DEAD_FROZEN: 461 + xgene_msi_hwirq_free(cpu); 462 + break; 463 + default: 464 + break; 465 + } 466 + 467 + return NOTIFY_OK; 468 + } 469 + 470 + static struct notifier_block xgene_msi_cpu_notifier = { 471 + .notifier_call = xgene_msi_cpu_callback, 472 + }; 473 + 474 + static const struct of_device_id xgene_msi_match_table[] = { 475 + {.compatible = "apm,xgene1-msi"}, 476 + {}, 477 + }; 478 + 479 + static int xgene_msi_probe(struct platform_device *pdev) 480 + { 481 + struct resource *res; 482 + int rc, irq_index; 483 + struct xgene_msi *xgene_msi; 484 + unsigned int cpu; 485 + int virt_msir; 486 + u32 msi_val, msi_idx; 487 + 488 + xgene_msi = &xgene_msi_ctrl; 489 + 490 + platform_set_drvdata(pdev, xgene_msi); 491 + 492 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 493 + xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); 494 + if (IS_ERR(xgene_msi->msi_regs)) { 495 + dev_err(&pdev->dev, "no reg space\n"); 496 + rc = -EINVAL; 497 + goto error; 498 + } 499 + xgene_msi->msi_addr = res->start; 500 + 501 + xgene_msi->num_cpus = num_possible_cpus(); 502 + 503 + rc = xgene_msi_init_allocator(xgene_msi); 504 + if (rc) { 505 + dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); 506 + goto error; 507 + } 508 + 509 + rc = xgene_allocate_domains(xgene_msi); 510 + if (rc) { 511 + dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); 512 + goto error; 513 + } 514 + 515 + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { 516 + virt_msir = platform_get_irq(pdev, irq_index); 517 + if (virt_msir < 0) { 518 + dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", 519 + irq_index); 520 + rc = -EINVAL; 521 + goto error; 522 + } 523 + xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; 524 + xgene_msi->msi_groups[irq_index].msi_grp = irq_index; 525 + xgene_msi->msi_groups[irq_index].msi = xgene_msi; 526 + } 527 + 528 + /* 529 + * MSInIRx registers are read-to-clear; before registering 530 + * interrupt handlers, read all of them to clear spurious 531 + * interrupts that may occur before the driver is probed. 532 + */ 533 + for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { 534 + for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) 535 + msi_val = xgene_msi_ir_read(xgene_msi, irq_index, 536 + msi_idx); 537 + /* Read MSIINTn to confirm */ 538 + msi_val = xgene_msi_int_read(xgene_msi, irq_index); 539 + if (msi_val) { 540 + dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); 541 + rc = -EINVAL; 542 + goto error; 543 + } 544 + } 545 + 546 + cpu_notifier_register_begin(); 547 + 548 + for_each_online_cpu(cpu) 549 + if (xgene_msi_hwirq_alloc(cpu)) { 550 + dev_err(&pdev->dev, "failed to register MSI handlers\n"); 551 + cpu_notifier_register_done(); 552 + goto error; 553 + } 554 + 555 + rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); 556 + if (rc) { 557 + dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); 558 + cpu_notifier_register_done(); 559 + goto error; 560 + } 561 + 562 + cpu_notifier_register_done(); 563 + 564 + xgene_msi->mchip.of_node = pdev->dev.of_node; 565 + rc = of_pci_msi_chip_add(&xgene_msi->mchip); 566 + if (rc) { 567 + dev_err(&pdev->dev, "failed to add MSI controller chip\n"); 568 + goto error_notifier; 569 + } 570 + 571 + dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); 572 + 573 + return 0; 574 + 575 + error_notifier: 576 + unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); 577 + error: 578 + xgene_msi_remove(pdev); 579 + return rc; 580 + } 581 + 582 + static struct platform_driver xgene_msi_driver = { 583 + .driver = { 584 + .name = "xgene-msi", 585 + .owner = THIS_MODULE, 586 + .of_match_table = xgene_msi_match_table, 587 + }, 588 + .probe = xgene_msi_probe, 589 + .remove = xgene_msi_remove, 590 + }; 591 + 592 + static int __init xgene_pcie_msi_init(void) 593 + { 594 + return platform_driver_register(&xgene_msi_driver); 595 + } 596 + subsys_initcall(xgene_pcie_msi_init);
+21
drivers/pci/host/pci-xgene.c
··· 468 468 return 0; 469 469 } 470 470 471 + static int xgene_pcie_msi_enable(struct pci_bus *bus) 472 + { 473 + struct device_node *msi_node; 474 + 475 + msi_node = of_parse_phandle(bus->dev.of_node, 476 + "msi-parent", 0); 477 + if (!msi_node) 478 + return -ENODEV; 479 + 480 + bus->msi = of_pci_find_msi_chip_by_node(msi_node); 481 + if (!bus->msi) 482 + return -ENODEV; 483 + 484 + bus->msi->dev = &bus->dev; 485 + return 0; 486 + } 487 + 471 488 static int xgene_pcie_probe_bridge(struct platform_device *pdev) 472 489 { 473 490 struct device_node *dn = pdev->dev.of_node; ··· 520 503 &xgene_pcie_ops, port, &res); 521 504 if (!bus) 522 505 return -ENOMEM; 506 + 507 + if (IS_ENABLED(CONFIG_PCI_MSI)) 508 + if (xgene_pcie_msi_enable(bus)) 509 + dev_info(port->dev, "failed to enable MSI\n"); 523 510 524 511 pci_scan_child_bus(bus); 525 512 pci_assign_unassigned_bus_resources(bus);
+68 -82
drivers/pci/host/pcie-designware.c
··· 31 31 #define PORT_LINK_MODE_1_LANES (0x1 << 16) 32 32 #define PORT_LINK_MODE_2_LANES (0x3 << 16) 33 33 #define PORT_LINK_MODE_4_LANES (0x7 << 16) 34 + #define PORT_LINK_MODE_8_LANES (0xf << 16) 34 35 35 36 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C 36 37 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) ··· 39 38 #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) 40 39 #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) 41 40 #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) 41 + #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) 42 42 43 43 #define PCIE_MSI_ADDR_LO 0x820 44 44 #define PCIE_MSI_ADDR_HI 0x824 ··· 150 148 size, val); 151 149 152 150 return ret; 151 + } 152 + 153 + static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, 154 + int type, u64 cpu_addr, u64 pci_addr, u32 size) 155 + { 156 + dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, 157 + PCIE_ATU_VIEWPORT); 158 + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); 159 + dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); 160 + dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), 161 + PCIE_ATU_LIMIT); 162 + dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); 163 + dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); 164 + dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); 165 + dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 153 166 } 154 167 155 168 static struct irq_chip dw_msi_irq_chip = { ··· 510 493 if (pp->ops->host_init) 511 494 pp->ops->host_init(pp); 512 495 496 + if (!pp->ops->rd_other_conf) 497 + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 498 + PCIE_ATU_TYPE_MEM, pp->mem_mod_base, 499 + pp->mem_bus_addr, pp->mem_size); 500 + 513 501 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); 514 502 515 503 /* program correct class for RC */ ··· 537 515 return 0; 538 516 } 539 517 540 - static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) 541 - { 542 - /* Program viewport 0 : OUTBOUND : CFG0 */ 543 - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, 544 - PCIE_ATU_VIEWPORT); 545 - dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); 546 - dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); 547 - dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, 548 - PCIE_ATU_LIMIT); 549 - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 550 - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 551 - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); 552 - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 553 - } 554 - 555 - static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) 556 - { 557 - /* Program viewport 1 : OUTBOUND : CFG1 */ 558 - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, 559 - PCIE_ATU_VIEWPORT); 560 - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); 561 - dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); 562 - dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); 563 - dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, 564 - PCIE_ATU_LIMIT); 565 - dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); 566 - dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); 567 - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 568 - } 569 - 570 - static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) 571 - { 572 - /* Program viewport 0 : OUTBOUND : MEM */ 573 - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, 574 - PCIE_ATU_VIEWPORT); 575 - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); 576 - dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); 577 - dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); 578 - dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, 579 - PCIE_ATU_LIMIT); 580 - dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); 581 - dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), 582 - PCIE_ATU_UPPER_TARGET); 583 - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 584 - } 585 - 586 - static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) 587 - { 588 - /* Program viewport 1 : OUTBOUND : IO */ 589 - dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, 590 - PCIE_ATU_VIEWPORT); 591 - dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); 592 - dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); 593 - dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); 594 - dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, 595 - PCIE_ATU_LIMIT); 596 - dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); 597 - dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), 598 - PCIE_ATU_UPPER_TARGET); 599 - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); 600 - } 601 - 602 518 static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 603 519 u32 devfn, int where, int size, u32 *val) 604 520 { 605 - int ret = PCIBIOS_SUCCESSFUL; 606 - u32 address, busdev; 521 + int ret, type; 522 + u32 address, busdev, cfg_size; 523 + u64 cpu_addr; 524 + void __iomem *va_cfg_base; 607 525 608 526 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 609 527 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 610 528 address = where & ~0x3; 611 529 612 530 if (bus->parent->number == pp->root_bus_nr) { 613 - dw_pcie_prog_viewport_cfg0(pp, busdev); 614 - ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, 615 - val); 616 - dw_pcie_prog_viewport_mem_outbound(pp); 531 + type = PCIE_ATU_TYPE_CFG0; 532 + cpu_addr = pp->cfg0_mod_base; 533 + cfg_size = pp->cfg0_size; 534 + va_cfg_base = pp->va_cfg0_base; 617 535 } else { 618 - dw_pcie_prog_viewport_cfg1(pp, busdev); 619 - ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, 620 - val); 621 - dw_pcie_prog_viewport_io_outbound(pp); 536 + type = PCIE_ATU_TYPE_CFG1; 537 + cpu_addr = pp->cfg1_mod_base; 538 + cfg_size = pp->cfg1_size; 539 + va_cfg_base = pp->va_cfg1_base; 622 540 } 541 + 542 + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 543 + type, cpu_addr, 544 + busdev, cfg_size); 545 + ret = dw_pcie_cfg_read(va_cfg_base + address, where, size, val); 546 + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 547 + PCIE_ATU_TYPE_IO, pp->io_mod_base, 548 + pp->io_bus_addr, pp->io_size); 623 549 624 550 return ret; 625 551 } ··· 575 605 static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 576 606 u32 devfn, int where, int size, u32 val) 577 607 { 578 - int ret = PCIBIOS_SUCCESSFUL; 579 - u32 address, busdev; 608 + int ret, type; 609 + u32 address, busdev, cfg_size; 610 + u64 cpu_addr; 611 + void __iomem *va_cfg_base; 580 612 581 613 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 582 614 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 583 615 address = where & ~0x3; 584 616 585 617 if (bus->parent->number == pp->root_bus_nr) { 586 - dw_pcie_prog_viewport_cfg0(pp, busdev); 587 - ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, 588 - val); 589 - dw_pcie_prog_viewport_mem_outbound(pp); 618 + type = PCIE_ATU_TYPE_CFG0; 619 + cpu_addr = pp->cfg0_mod_base; 620 + cfg_size = pp->cfg0_size; 621 + va_cfg_base = pp->va_cfg0_base; 590 622 } else { 591 - dw_pcie_prog_viewport_cfg1(pp, busdev); 592 - ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, 593 - val); 594 - dw_pcie_prog_viewport_io_outbound(pp); 623 + type = PCIE_ATU_TYPE_CFG1; 624 + cpu_addr = pp->cfg1_mod_base; 625 + cfg_size = pp->cfg1_size; 626 + va_cfg_base = pp->va_cfg1_base; 595 627 } 628 + 629 + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 630 + type, cpu_addr, 631 + busdev, cfg_size); 632 + ret = dw_pcie_cfg_write(va_cfg_base + address, where, size, val); 633 + dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 634 + PCIE_ATU_TYPE_IO, pp->io_mod_base, 635 + pp->io_bus_addr, pp->io_size); 596 636 597 637 return ret; 598 638 } ··· 756 776 case 4: 757 777 val |= PORT_LINK_MODE_4_LANES; 758 778 break; 779 + case 8: 780 + val |= PORT_LINK_MODE_8_LANES; 781 + break; 759 782 } 760 783 dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); 761 784 ··· 774 791 break; 775 792 case 4: 776 793 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 794 + break; 795 + case 8: 796 + val |= PORT_LOGIC_LINK_WIDTH_8_LANES; 777 797 break; 778 798 } 779 799 dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
+110
drivers/pci/host/pcie-iproc-bcma.c
··· 1 + /* 2 + * Copyright (C) 2015 Broadcom Corporation 3 + * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de> 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation version 2. 8 + * 9 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 10 + * kind, whether express or implied; without even the implied warranty 11 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + */ 14 + 15 + #include <linux/kernel.h> 16 + #include <linux/pci.h> 17 + #include <linux/module.h> 18 + #include <linux/slab.h> 19 + #include <linux/phy/phy.h> 20 + #include <linux/bcma/bcma.h> 21 + #include <linux/ioport.h> 22 + 23 + #include "pcie-iproc.h" 24 + 25 + 26 + /* NS: CLASS field is R/O, and set to wrong 0x200 value */ 27 + static void bcma_pcie2_fixup_class(struct pci_dev *dev) 28 + { 29 + dev->class = PCI_CLASS_BRIDGE_PCI << 8; 30 + } 31 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); 32 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); 33 + 34 + static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 35 + { 36 + struct pci_sys_data *sys = dev->sysdata; 37 + struct iproc_pcie *pcie = sys->private_data; 38 + struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); 39 + 40 + return bcma_core_irq(bdev, 5); 41 + } 42 + 43 + static int iproc_pcie_bcma_probe(struct bcma_device *bdev) 44 + { 45 + struct iproc_pcie *pcie; 46 + LIST_HEAD(res); 47 + struct resource res_mem; 48 + int ret; 49 + 50 + pcie = devm_kzalloc(&bdev->dev, sizeof(*pcie), GFP_KERNEL); 51 + if (!pcie) 52 + return -ENOMEM; 53 + 54 + pcie->dev = &bdev->dev; 55 + bcma_set_drvdata(bdev, pcie); 56 + 57 + pcie->base = bdev->io_addr; 58 + 59 + res_mem.start = bdev->addr_s[0]; 60 + res_mem.end = bdev->addr_s[0] + SZ_128M - 1; 61 + res_mem.name = "PCIe MEM space"; 62 + res_mem.flags = IORESOURCE_MEM; 63 + pci_add_resource(&res, &res_mem); 64 + 65 + pcie->map_irq = iproc_pcie_bcma_map_irq; 66 + 67 + ret = iproc_pcie_setup(pcie, &res); 68 + if (ret) 69 + dev_err(pcie->dev, "PCIe controller setup failed\n"); 70 + 71 + pci_free_resource_list(&res); 72 + 73 + return ret; 74 + } 75 + 76 + static void iproc_pcie_bcma_remove(struct bcma_device *bdev) 77 + { 78 + struct iproc_pcie *pcie = bcma_get_drvdata(bdev); 79 + 80 + iproc_pcie_remove(pcie); 81 + } 82 + 83 + static const struct bcma_device_id iproc_pcie_bcma_table[] = { 84 + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), 85 + {}, 86 + }; 87 + MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); 88 + 89 + static struct bcma_driver iproc_pcie_bcma_driver = { 90 + .name = KBUILD_MODNAME, 91 + .id_table = iproc_pcie_bcma_table, 92 + .probe = iproc_pcie_bcma_probe, 93 + .remove = iproc_pcie_bcma_remove, 94 + }; 95 + 96 + static int __init iproc_pcie_bcma_init(void) 97 + { 98 + return bcma_driver_register(&iproc_pcie_bcma_driver); 99 + } 100 + module_init(iproc_pcie_bcma_init); 101 + 102 + static void __exit iproc_pcie_bcma_exit(void) 103 + { 104 + bcma_driver_unregister(&iproc_pcie_bcma_driver); 105 + } 106 + module_exit(iproc_pcie_bcma_exit); 107 + 108 + MODULE_AUTHOR("Hauke Mehrtens"); 109 + MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); 110 + MODULE_LICENSE("GPL v2");
+6 -6
drivers/pci/host/pcie-iproc-platform.c
··· 69 69 return ret; 70 70 } 71 71 72 - pcie->resources = &res; 72 + pcie->map_irq = of_irq_parse_and_map_pci; 73 73 74 - ret = iproc_pcie_setup(pcie); 75 - if (ret) { 74 + ret = iproc_pcie_setup(pcie, &res); 75 + if (ret) 76 76 dev_err(pcie->dev, "PCIe controller setup failed\n"); 77 - return ret; 78 - } 79 77 80 - return 0; 78 + pci_free_resource_list(&res); 79 + 80 + return ret; 81 81 } 82 82 83 83 static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
+3 -3
drivers/pci/host/pcie-iproc.c
··· 183 183 writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); 184 184 } 185 185 186 - int iproc_pcie_setup(struct iproc_pcie *pcie) 186 + int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) 187 187 { 188 188 int ret; 189 189 struct pci_bus *bus; ··· 211 211 pcie->sysdata.private_data = pcie; 212 212 213 213 bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, 214 - &pcie->sysdata, pcie->resources); 214 + &pcie->sysdata, res); 215 215 if (!bus) { 216 216 dev_err(pcie->dev, "unable to create PCI root bus\n"); 217 217 ret = -ENOMEM; ··· 229 229 230 230 pci_scan_child_bus(bus); 231 231 pci_assign_unassigned_bus_resources(bus); 232 - pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); 232 + pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); 233 233 pci_bus_add_devices(bus); 234 234 235 235 return 0;
+2 -2
drivers/pci/host/pcie-iproc.h
··· 29 29 struct iproc_pcie { 30 30 struct device *dev; 31 31 void __iomem *base; 32 - struct list_head *resources; 33 32 struct pci_sys_data sysdata; 34 33 struct pci_bus *root_bus; 35 34 struct phy *phy; 36 35 int irqs[IPROC_PCIE_MAX_NUM_IRQS]; 36 + int (*map_irq)(const struct pci_dev *, u8, u8); 37 37 }; 38 38 39 - int iproc_pcie_setup(struct iproc_pcie *pcie); 39 + int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); 40 40 int iproc_pcie_remove(struct iproc_pcie *pcie); 41 41 42 42 #endif /* _PCIE_IPROC_H */
+8 -9
drivers/pci/host/pcie-spear13xx.c
··· 146 146 static int spear13xx_pcie_establish_link(struct pcie_port *pp) 147 147 { 148 148 u32 val; 149 - int count = 0; 150 149 struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pp); 151 150 struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; 152 151 u32 exp_cap_off = EXP_CAP_ID_OFFSET; 152 + unsigned int retries; 153 153 154 154 if (dw_pcie_link_up(pp)) { 155 155 dev_err(pp->dev, "link already up\n"); ··· 201 201 &app_reg->app_ctrl_0); 202 202 203 203 /* check if the link is up or not */ 204 - while (!dw_pcie_link_up(pp)) { 205 - mdelay(100); 206 - count++; 207 - if (count == 10) { 208 - dev_err(pp->dev, "link Fail\n"); 209 - return -EINVAL; 204 + for (retries = 0; retries < 10; retries++) { 205 + if (dw_pcie_link_up(pp)) { 206 + dev_info(pp->dev, "link up\n"); 207 + return 0; 210 208 } 209 + mdelay(100); 211 210 } 212 - dev_info(pp->dev, "link up\n"); 213 211 214 - return 0; 212 + dev_err(pp->dev, "link Fail\n"); 213 + return -EINVAL; 215 214 } 216 215 217 216 static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)