Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.10-rc5 902 lines 24 kB view raw
1/* 2 * Synopsys Designware PCIe host controller driver 3 * 4 * Copyright (C) 2013 Samsung Electronics Co., Ltd. 5 * http://www.samsung.com 6 * 7 * Author: Jingoo Han <jg1.han@samsung.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#include <linux/irq.h> 15#include <linux/irqdomain.h> 16#include <linux/kernel.h> 17#include <linux/msi.h> 18#include <linux/of_address.h> 19#include <linux/of_pci.h> 20#include <linux/pci.h> 21#include <linux/pci_regs.h> 22#include <linux/platform_device.h> 23#include <linux/types.h> 24#include <linux/delay.h> 25 26#include "pcie-designware.h" 27 28/* Parameters for the waiting for link up routine */ 29#define LINK_WAIT_MAX_RETRIES 10 30#define LINK_WAIT_USLEEP_MIN 90000 31#define LINK_WAIT_USLEEP_MAX 100000 32 33/* Parameters for the waiting for iATU enabled routine */ 34#define LINK_WAIT_MAX_IATU_RETRIES 5 35#define LINK_WAIT_IATU_MIN 9000 36#define LINK_WAIT_IATU_MAX 10000 37 38/* Synopsys-specific PCIe configuration registers */ 39#define PCIE_PORT_LINK_CONTROL 0x710 40#define PORT_LINK_MODE_MASK (0x3f << 16) 41#define PORT_LINK_MODE_1_LANES (0x1 << 16) 42#define PORT_LINK_MODE_2_LANES (0x3 << 16) 43#define PORT_LINK_MODE_4_LANES (0x7 << 16) 44#define PORT_LINK_MODE_8_LANES (0xf << 16) 45 46#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C 47#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) 48#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) 49#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) 50#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) 51#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) 52#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) 53 54#define PCIE_MSI_ADDR_LO 0x820 55#define PCIE_MSI_ADDR_HI 0x824 56#define PCIE_MSI_INTR0_ENABLE 0x828 57#define PCIE_MSI_INTR0_MASK 0x82C 58#define PCIE_MSI_INTR0_STATUS 0x830 59 60#define PCIE_ATU_VIEWPORT 0x900 61#define PCIE_ATU_REGION_INBOUND (0x1 << 31) 62#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) 63#define PCIE_ATU_REGION_INDEX2 (0x2 << 0) 64#define PCIE_ATU_REGION_INDEX1 (0x1 << 0) 65#define PCIE_ATU_REGION_INDEX0 (0x0 << 0) 66#define PCIE_ATU_CR1 0x904 67#define PCIE_ATU_TYPE_MEM (0x0 << 0) 68#define PCIE_ATU_TYPE_IO (0x2 << 0) 69#define PCIE_ATU_TYPE_CFG0 (0x4 << 0) 70#define PCIE_ATU_TYPE_CFG1 (0x5 << 0) 71#define PCIE_ATU_CR2 0x908 72#define PCIE_ATU_ENABLE (0x1 << 31) 73#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) 74#define PCIE_ATU_LOWER_BASE 0x90C 75#define PCIE_ATU_UPPER_BASE 0x910 76#define PCIE_ATU_LIMIT 0x914 77#define PCIE_ATU_LOWER_TARGET 0x918 78#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) 79#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) 80#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) 81#define PCIE_ATU_UPPER_TARGET 0x91C 82 83/* 84 * iATU Unroll-specific register definitions 85 * From 4.80 core version the address translation will be made by unroll 86 */ 87#define PCIE_ATU_UNR_REGION_CTRL1 0x00 88#define PCIE_ATU_UNR_REGION_CTRL2 0x04 89#define PCIE_ATU_UNR_LOWER_BASE 0x08 90#define PCIE_ATU_UNR_UPPER_BASE 0x0C 91#define PCIE_ATU_UNR_LIMIT 0x10 92#define PCIE_ATU_UNR_LOWER_TARGET 0x14 93#define PCIE_ATU_UNR_UPPER_TARGET 0x18 94 95/* Register address builder */ 96#define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) ((0x3 << 20) | (region << 9)) 97 98/* PCIe Port Logic registers */ 99#define PLR_OFFSET 0x700 100#define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) 101#define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) 102#define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) 103 104static struct pci_ops dw_pcie_ops; 105 106int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val) 107{ 108 if ((uintptr_t)addr & (size - 1)) { 109 *val = 0; 110 return PCIBIOS_BAD_REGISTER_NUMBER; 111 } 112 113 if (size == 4) 114 *val = readl(addr); 115 else if (size == 2) 116 *val = readw(addr); 117 else if (size == 1) 118 *val = readb(addr); 119 else { 120 *val = 0; 121 return PCIBIOS_BAD_REGISTER_NUMBER; 122 } 123 124 return PCIBIOS_SUCCESSFUL; 125} 126 127int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) 128{ 129 if ((uintptr_t)addr & (size - 1)) 130 return PCIBIOS_BAD_REGISTER_NUMBER; 131 132 if (size == 4) 133 writel(val, addr); 134 else if (size == 2) 135 writew(val, addr); 136 else if (size == 1) 137 writeb(val, addr); 138 else 139 return PCIBIOS_BAD_REGISTER_NUMBER; 140 141 return PCIBIOS_SUCCESSFUL; 142} 143 144u32 dw_pcie_readl_rc(struct pcie_port *pp, u32 reg) 145{ 146 if (pp->ops->readl_rc) 147 return pp->ops->readl_rc(pp, reg); 148 149 return readl(pp->dbi_base + reg); 150} 151 152void dw_pcie_writel_rc(struct pcie_port *pp, u32 reg, u32 val) 153{ 154 if (pp->ops->writel_rc) 155 pp->ops->writel_rc(pp, reg, val); 156 else 157 writel(val, pp->dbi_base + reg); 158} 159 160static u32 dw_pcie_readl_unroll(struct pcie_port *pp, u32 index, u32 reg) 161{ 162 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 163 164 return dw_pcie_readl_rc(pp, offset + reg); 165} 166 167static void dw_pcie_writel_unroll(struct pcie_port *pp, u32 index, u32 reg, 168 u32 val) 169{ 170 u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); 171 172 dw_pcie_writel_rc(pp, offset + reg, val); 173} 174 175static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, 176 u32 *val) 177{ 178 if (pp->ops->rd_own_conf) 179 return pp->ops->rd_own_conf(pp, where, size, val); 180 181 return dw_pcie_cfg_read(pp->dbi_base + where, size, val); 182} 183 184static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, 185 u32 val) 186{ 187 if (pp->ops->wr_own_conf) 188 return pp->ops->wr_own_conf(pp, where, size, val); 189 190 return dw_pcie_cfg_write(pp->dbi_base + where, size, val); 191} 192 193static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, 194 int type, u64 cpu_addr, u64 pci_addr, u32 size) 195{ 196 u32 retries, val; 197 198 if (pp->iatu_unroll_enabled) { 199 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_BASE, 200 lower_32_bits(cpu_addr)); 201 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_BASE, 202 upper_32_bits(cpu_addr)); 203 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LIMIT, 204 lower_32_bits(cpu_addr + size - 1)); 205 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_LOWER_TARGET, 206 lower_32_bits(pci_addr)); 207 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_UPPER_TARGET, 208 upper_32_bits(pci_addr)); 209 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL1, 210 type); 211 dw_pcie_writel_unroll(pp, index, PCIE_ATU_UNR_REGION_CTRL2, 212 PCIE_ATU_ENABLE); 213 } else { 214 dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT, 215 PCIE_ATU_REGION_OUTBOUND | index); 216 dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_BASE, 217 lower_32_bits(cpu_addr)); 218 dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_BASE, 219 upper_32_bits(cpu_addr)); 220 dw_pcie_writel_rc(pp, PCIE_ATU_LIMIT, 221 lower_32_bits(cpu_addr + size - 1)); 222 dw_pcie_writel_rc(pp, PCIE_ATU_LOWER_TARGET, 223 lower_32_bits(pci_addr)); 224 dw_pcie_writel_rc(pp, PCIE_ATU_UPPER_TARGET, 225 upper_32_bits(pci_addr)); 226 dw_pcie_writel_rc(pp, PCIE_ATU_CR1, type); 227 dw_pcie_writel_rc(pp, PCIE_ATU_CR2, PCIE_ATU_ENABLE); 228 } 229 230 /* 231 * Make sure ATU enable takes effect before any subsequent config 232 * and I/O accesses. 233 */ 234 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { 235 if (pp->iatu_unroll_enabled) 236 val = dw_pcie_readl_unroll(pp, index, 237 PCIE_ATU_UNR_REGION_CTRL2); 238 else 239 val = dw_pcie_readl_rc(pp, PCIE_ATU_CR2); 240 241 if (val == PCIE_ATU_ENABLE) 242 return; 243 244 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 245 } 246 dev_err(pp->dev, "iATU is not being enabled\n"); 247} 248 249static struct irq_chip dw_msi_irq_chip = { 250 .name = "PCI-MSI", 251 .irq_enable = pci_msi_unmask_irq, 252 .irq_disable = pci_msi_mask_irq, 253 .irq_mask = pci_msi_mask_irq, 254 .irq_unmask = pci_msi_unmask_irq, 255}; 256 257/* MSI int handler */ 258irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) 259{ 260 unsigned long val; 261 int i, pos, irq; 262 irqreturn_t ret = IRQ_NONE; 263 264 for (i = 0; i < MAX_MSI_CTRLS; i++) { 265 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, 266 (u32 *)&val); 267 if (val) { 268 ret = IRQ_HANDLED; 269 pos = 0; 270 while ((pos = find_next_bit(&val, 32, pos)) != 32) { 271 irq = irq_find_mapping(pp->irq_domain, 272 i * 32 + pos); 273 dw_pcie_wr_own_conf(pp, 274 PCIE_MSI_INTR0_STATUS + i * 12, 275 4, 1 << pos); 276 generic_handle_irq(irq); 277 pos++; 278 } 279 } 280 } 281 282 return ret; 283} 284 285void dw_pcie_msi_init(struct pcie_port *pp) 286{ 287 u64 msi_target; 288 289 pp->msi_data = __get_free_pages(GFP_KERNEL, 0); 290 msi_target = virt_to_phys((void *)pp->msi_data); 291 292 /* program the msi_data */ 293 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, 294 (u32)(msi_target & 0xffffffff)); 295 dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 296 (u32)(msi_target >> 32 & 0xffffffff)); 297} 298 299static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) 300{ 301 unsigned int res, bit, val; 302 303 res = (irq / 32) * 12; 304 bit = irq % 32; 305 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); 306 val &= ~(1 << bit); 307 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); 308} 309 310static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, 311 unsigned int nvec, unsigned int pos) 312{ 313 unsigned int i; 314 315 for (i = 0; i < nvec; i++) { 316 irq_set_msi_desc_off(irq_base, i, NULL); 317 /* Disable corresponding interrupt on MSI controller */ 318 if (pp->ops->msi_clear_irq) 319 pp->ops->msi_clear_irq(pp, pos + i); 320 else 321 dw_pcie_msi_clear_irq(pp, pos + i); 322 } 323 324 bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec)); 325} 326 327static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) 328{ 329 unsigned int res, bit, val; 330 331 res = (irq / 32) * 12; 332 bit = irq % 32; 333 dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); 334 val |= 1 << bit; 335 dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); 336} 337 338static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) 339{ 340 int irq, pos0, i; 341 struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(desc); 342 343 pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, 344 order_base_2(no_irqs)); 345 if (pos0 < 0) 346 goto no_valid_irq; 347 348 irq = irq_find_mapping(pp->irq_domain, pos0); 349 if (!irq) 350 goto no_valid_irq; 351 352 /* 353 * irq_create_mapping (called from dw_pcie_host_init) pre-allocates 354 * descs so there is no need to allocate descs here. We can therefore 355 * assume that if irq_find_mapping above returns non-zero, then the 356 * descs are also successfully allocated. 357 */ 358 359 for (i = 0; i < no_irqs; i++) { 360 if (irq_set_msi_desc_off(irq, i, desc) != 0) { 361 clear_irq_range(pp, irq, i, pos0); 362 goto no_valid_irq; 363 } 364 /*Enable corresponding interrupt in MSI interrupt controller */ 365 if (pp->ops->msi_set_irq) 366 pp->ops->msi_set_irq(pp, pos0 + i); 367 else 368 dw_pcie_msi_set_irq(pp, pos0 + i); 369 } 370 371 *pos = pos0; 372 desc->nvec_used = no_irqs; 373 desc->msi_attrib.multiple = order_base_2(no_irqs); 374 375 return irq; 376 377no_valid_irq: 378 *pos = pos0; 379 return -ENOSPC; 380} 381 382static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos) 383{ 384 struct msi_msg msg; 385 u64 msi_target; 386 387 if (pp->ops->get_msi_addr) 388 msi_target = pp->ops->get_msi_addr(pp); 389 else 390 msi_target = virt_to_phys((void *)pp->msi_data); 391 392 msg.address_lo = (u32)(msi_target & 0xffffffff); 393 msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff); 394 395 if (pp->ops->get_msi_data) 396 msg.data = pp->ops->get_msi_data(pp, pos); 397 else 398 msg.data = pos; 399 400 pci_write_msi_msg(irq, &msg); 401} 402 403static int dw_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, 404 struct msi_desc *desc) 405{ 406 int irq, pos; 407 struct pcie_port *pp = pdev->bus->sysdata; 408 409 if (desc->msi_attrib.is_msix) 410 return -EINVAL; 411 412 irq = assign_irq(1, desc, &pos); 413 if (irq < 0) 414 return irq; 415 416 dw_msi_setup_msg(pp, irq, pos); 417 418 return 0; 419} 420 421static int dw_msi_setup_irqs(struct msi_controller *chip, struct pci_dev *pdev, 422 int nvec, int type) 423{ 424#ifdef CONFIG_PCI_MSI 425 int irq, pos; 426 struct msi_desc *desc; 427 struct pcie_port *pp = pdev->bus->sysdata; 428 429 /* MSI-X interrupts are not supported */ 430 if (type == PCI_CAP_ID_MSIX) 431 return -EINVAL; 432 433 WARN_ON(!list_is_singular(&pdev->dev.msi_list)); 434 desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); 435 436 irq = assign_irq(nvec, desc, &pos); 437 if (irq < 0) 438 return irq; 439 440 dw_msi_setup_msg(pp, irq, pos); 441 442 return 0; 443#else 444 return -EINVAL; 445#endif 446} 447 448static void dw_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) 449{ 450 struct irq_data *data = irq_get_irq_data(irq); 451 struct msi_desc *msi = irq_data_get_msi_desc(data); 452 struct pcie_port *pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi); 453 454 clear_irq_range(pp, irq, 1, data->hwirq); 455} 456 457static struct msi_controller dw_pcie_msi_chip = { 458 .setup_irq = dw_msi_setup_irq, 459 .setup_irqs = dw_msi_setup_irqs, 460 .teardown_irq = dw_msi_teardown_irq, 461}; 462 463int dw_pcie_wait_for_link(struct pcie_port *pp) 464{ 465 int retries; 466 467 /* check if the link is up or not */ 468 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { 469 if (dw_pcie_link_up(pp)) { 470 dev_info(pp->dev, "link up\n"); 471 return 0; 472 } 473 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); 474 } 475 476 dev_err(pp->dev, "phy link never came up\n"); 477 478 return -ETIMEDOUT; 479} 480 481int dw_pcie_link_up(struct pcie_port *pp) 482{ 483 u32 val; 484 485 if (pp->ops->link_up) 486 return pp->ops->link_up(pp); 487 488 val = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1); 489 return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && 490 (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); 491} 492 493static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, 494 irq_hw_number_t hwirq) 495{ 496 irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); 497 irq_set_chip_data(irq, domain->host_data); 498 499 return 0; 500} 501 502static const struct irq_domain_ops msi_domain_ops = { 503 .map = dw_pcie_msi_map, 504}; 505 506static u8 dw_pcie_iatu_unroll_enabled(struct pcie_port *pp) 507{ 508 u32 val; 509 510 val = dw_pcie_readl_rc(pp, PCIE_ATU_VIEWPORT); 511 if (val == 0xffffffff) 512 return 1; 513 514 return 0; 515} 516 517int dw_pcie_host_init(struct pcie_port *pp) 518{ 519 struct device_node *np = pp->dev->of_node; 520 struct platform_device *pdev = to_platform_device(pp->dev); 521 struct pci_bus *bus, *child; 522 struct resource *cfg_res; 523 int i, ret; 524 LIST_HEAD(res); 525 struct resource_entry *win, *tmp; 526 527 cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); 528 if (cfg_res) { 529 pp->cfg0_size = resource_size(cfg_res)/2; 530 pp->cfg1_size = resource_size(cfg_res)/2; 531 pp->cfg0_base = cfg_res->start; 532 pp->cfg1_base = cfg_res->start + pp->cfg0_size; 533 } else if (!pp->va_cfg0_base) { 534 dev_err(pp->dev, "missing *config* reg space\n"); 535 } 536 537 ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); 538 if (ret) 539 return ret; 540 541 ret = devm_request_pci_bus_resources(&pdev->dev, &res); 542 if (ret) 543 goto error; 544 545 /* Get the I/O and memory ranges from DT */ 546 resource_list_for_each_entry_safe(win, tmp, &res) { 547 switch (resource_type(win->res)) { 548 case IORESOURCE_IO: 549 ret = pci_remap_iospace(win->res, pp->io_base); 550 if (ret) { 551 dev_warn(pp->dev, "error %d: failed to map resource %pR\n", 552 ret, win->res); 553 resource_list_destroy_entry(win); 554 } else { 555 pp->io = win->res; 556 pp->io->name = "I/O"; 557 pp->io_size = resource_size(pp->io); 558 pp->io_bus_addr = pp->io->start - win->offset; 559 } 560 break; 561 case IORESOURCE_MEM: 562 pp->mem = win->res; 563 pp->mem->name = "MEM"; 564 pp->mem_size = resource_size(pp->mem); 565 pp->mem_bus_addr = pp->mem->start - win->offset; 566 break; 567 case 0: 568 pp->cfg = win->res; 569 pp->cfg0_size = resource_size(pp->cfg)/2; 570 pp->cfg1_size = resource_size(pp->cfg)/2; 571 pp->cfg0_base = pp->cfg->start; 572 pp->cfg1_base = pp->cfg->start + pp->cfg0_size; 573 break; 574 case IORESOURCE_BUS: 575 pp->busn = win->res; 576 break; 577 } 578 } 579 580 if (!pp->dbi_base) { 581 pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start, 582 resource_size(pp->cfg)); 583 if (!pp->dbi_base) { 584 dev_err(pp->dev, "error with ioremap\n"); 585 ret = -ENOMEM; 586 goto error; 587 } 588 } 589 590 pp->mem_base = pp->mem->start; 591 592 if (!pp->va_cfg0_base) { 593 pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, 594 pp->cfg0_size); 595 if (!pp->va_cfg0_base) { 596 dev_err(pp->dev, "error with ioremap in function\n"); 597 ret = -ENOMEM; 598 goto error; 599 } 600 } 601 602 if (!pp->va_cfg1_base) { 603 pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base, 604 pp->cfg1_size); 605 if (!pp->va_cfg1_base) { 606 dev_err(pp->dev, "error with ioremap\n"); 607 ret = -ENOMEM; 608 goto error; 609 } 610 } 611 612 ret = of_property_read_u32(np, "num-lanes", &pp->lanes); 613 if (ret) 614 pp->lanes = 0; 615 616 ret = of_property_read_u32(np, "num-viewport", &pp->num_viewport); 617 if (ret) 618 pp->num_viewport = 2; 619 620 if (IS_ENABLED(CONFIG_PCI_MSI)) { 621 if (!pp->ops->msi_host_init) { 622 pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, 623 MAX_MSI_IRQS, &msi_domain_ops, 624 &dw_pcie_msi_chip); 625 if (!pp->irq_domain) { 626 dev_err(pp->dev, "irq domain init failed\n"); 627 ret = -ENXIO; 628 goto error; 629 } 630 631 for (i = 0; i < MAX_MSI_IRQS; i++) 632 irq_create_mapping(pp->irq_domain, i); 633 } else { 634 ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip); 635 if (ret < 0) 636 goto error; 637 } 638 } 639 640 if (pp->ops->host_init) 641 pp->ops->host_init(pp); 642 643 pp->root_bus_nr = pp->busn->start; 644 if (IS_ENABLED(CONFIG_PCI_MSI)) { 645 bus = pci_scan_root_bus_msi(pp->dev, pp->root_bus_nr, 646 &dw_pcie_ops, pp, &res, 647 &dw_pcie_msi_chip); 648 dw_pcie_msi_chip.dev = pp->dev; 649 } else 650 bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, 651 pp, &res); 652 if (!bus) { 653 ret = -ENOMEM; 654 goto error; 655 } 656 657 if (pp->ops->scan_bus) 658 pp->ops->scan_bus(pp); 659 660#ifdef CONFIG_ARM 661 /* support old dtbs that incorrectly describe IRQs */ 662 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); 663#endif 664 665 pci_bus_size_bridges(bus); 666 pci_bus_assign_resources(bus); 667 668 list_for_each_entry(child, &bus->children, node) 669 pcie_bus_configure_settings(child); 670 671 pci_bus_add_devices(bus); 672 return 0; 673 674error: 675 pci_free_resource_list(&res); 676 return ret; 677} 678 679static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, 680 u32 devfn, int where, int size, u32 *val) 681{ 682 int ret, type; 683 u32 busdev, cfg_size; 684 u64 cpu_addr; 685 void __iomem *va_cfg_base; 686 687 if (pp->ops->rd_other_conf) 688 return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); 689 690 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 691 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 692 693 if (bus->parent->number == pp->root_bus_nr) { 694 type = PCIE_ATU_TYPE_CFG0; 695 cpu_addr = pp->cfg0_base; 696 cfg_size = pp->cfg0_size; 697 va_cfg_base = pp->va_cfg0_base; 698 } else { 699 type = PCIE_ATU_TYPE_CFG1; 700 cpu_addr = pp->cfg1_base; 701 cfg_size = pp->cfg1_size; 702 va_cfg_base = pp->va_cfg1_base; 703 } 704 705 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 706 type, cpu_addr, 707 busdev, cfg_size); 708 ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); 709 if (pp->num_viewport <= 2) 710 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 711 PCIE_ATU_TYPE_IO, pp->io_base, 712 pp->io_bus_addr, pp->io_size); 713 714 return ret; 715} 716 717static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, 718 u32 devfn, int where, int size, u32 val) 719{ 720 int ret, type; 721 u32 busdev, cfg_size; 722 u64 cpu_addr; 723 void __iomem *va_cfg_base; 724 725 if (pp->ops->wr_other_conf) 726 return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); 727 728 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | 729 PCIE_ATU_FUNC(PCI_FUNC(devfn)); 730 731 if (bus->parent->number == pp->root_bus_nr) { 732 type = PCIE_ATU_TYPE_CFG0; 733 cpu_addr = pp->cfg0_base; 734 cfg_size = pp->cfg0_size; 735 va_cfg_base = pp->va_cfg0_base; 736 } else { 737 type = PCIE_ATU_TYPE_CFG1; 738 cpu_addr = pp->cfg1_base; 739 cfg_size = pp->cfg1_size; 740 va_cfg_base = pp->va_cfg1_base; 741 } 742 743 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 744 type, cpu_addr, 745 busdev, cfg_size); 746 ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); 747 if (pp->num_viewport <= 2) 748 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, 749 PCIE_ATU_TYPE_IO, pp->io_base, 750 pp->io_bus_addr, pp->io_size); 751 752 return ret; 753} 754 755static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, 756 int dev) 757{ 758 /* If there is no link, then there is no device */ 759 if (bus->number != pp->root_bus_nr) { 760 if (!dw_pcie_link_up(pp)) 761 return 0; 762 } 763 764 /* access only one slot on each root port */ 765 if (bus->number == pp->root_bus_nr && dev > 0) 766 return 0; 767 768 return 1; 769} 770 771static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, 772 int size, u32 *val) 773{ 774 struct pcie_port *pp = bus->sysdata; 775 776 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { 777 *val = 0xffffffff; 778 return PCIBIOS_DEVICE_NOT_FOUND; 779 } 780 781 if (bus->number == pp->root_bus_nr) 782 return dw_pcie_rd_own_conf(pp, where, size, val); 783 784 return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); 785} 786 787static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, 788 int where, int size, u32 val) 789{ 790 struct pcie_port *pp = bus->sysdata; 791 792 if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) 793 return PCIBIOS_DEVICE_NOT_FOUND; 794 795 if (bus->number == pp->root_bus_nr) 796 return dw_pcie_wr_own_conf(pp, where, size, val); 797 798 return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); 799} 800 801static struct pci_ops dw_pcie_ops = { 802 .read = dw_pcie_rd_conf, 803 .write = dw_pcie_wr_conf, 804}; 805 806void dw_pcie_setup_rc(struct pcie_port *pp) 807{ 808 u32 val; 809 810 /* set the number of lanes */ 811 val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL); 812 val &= ~PORT_LINK_MODE_MASK; 813 switch (pp->lanes) { 814 case 1: 815 val |= PORT_LINK_MODE_1_LANES; 816 break; 817 case 2: 818 val |= PORT_LINK_MODE_2_LANES; 819 break; 820 case 4: 821 val |= PORT_LINK_MODE_4_LANES; 822 break; 823 case 8: 824 val |= PORT_LINK_MODE_8_LANES; 825 break; 826 default: 827 dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes); 828 return; 829 } 830 dw_pcie_writel_rc(pp, PCIE_PORT_LINK_CONTROL, val); 831 832 /* set link width speed control register */ 833 val = dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL); 834 val &= ~PORT_LOGIC_LINK_WIDTH_MASK; 835 switch (pp->lanes) { 836 case 1: 837 val |= PORT_LOGIC_LINK_WIDTH_1_LANES; 838 break; 839 case 2: 840 val |= PORT_LOGIC_LINK_WIDTH_2_LANES; 841 break; 842 case 4: 843 val |= PORT_LOGIC_LINK_WIDTH_4_LANES; 844 break; 845 case 8: 846 val |= PORT_LOGIC_LINK_WIDTH_8_LANES; 847 break; 848 } 849 dw_pcie_writel_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 850 851 /* setup RC BARs */ 852 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_0, 0x00000004); 853 dw_pcie_writel_rc(pp, PCI_BASE_ADDRESS_1, 0x00000000); 854 855 /* setup interrupt pins */ 856 val = dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE); 857 val &= 0xffff00ff; 858 val |= 0x00000100; 859 dw_pcie_writel_rc(pp, PCI_INTERRUPT_LINE, val); 860 861 /* setup bus numbers */ 862 val = dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS); 863 val &= 0xff000000; 864 val |= 0x00010100; 865 dw_pcie_writel_rc(pp, PCI_PRIMARY_BUS, val); 866 867 /* setup command register */ 868 val = dw_pcie_readl_rc(pp, PCI_COMMAND); 869 val &= 0xffff0000; 870 val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | 871 PCI_COMMAND_MASTER | PCI_COMMAND_SERR; 872 dw_pcie_writel_rc(pp, PCI_COMMAND, val); 873 874 /* 875 * If the platform provides ->rd_other_conf, it means the platform 876 * uses its own address translation component rather than ATU, so 877 * we should not program the ATU here. 878 */ 879 if (!pp->ops->rd_other_conf) { 880 /* get iATU unroll support */ 881 pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp); 882 dev_dbg(pp->dev, "iATU unroll: %s\n", 883 pp->iatu_unroll_enabled ? "enabled" : "disabled"); 884 885 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, 886 PCIE_ATU_TYPE_MEM, pp->mem_base, 887 pp->mem_bus_addr, pp->mem_size); 888 if (pp->num_viewport > 2) 889 dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX2, 890 PCIE_ATU_TYPE_IO, pp->io_base, 891 pp->io_bus_addr, pp->io_size); 892 } 893 894 dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); 895 896 /* program correct class for RC */ 897 dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); 898 899 dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); 900 val |= PORT_LOGIC_SPEED_CHANGE; 901 dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); 902}