Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'remotes/lorenzo/pci/aardvark'

- Define macros for PCI_EXP_DEVCTL_PAYLOAD_* (Pali Rohár)

- Set Max Payload Size to 512 bytes per Marvell spec (Pali Rohár)

- Downgrade PIO Response Status messages to debug level (Marek Behún)

- Preserve CRS SV (Config Request Retry Software Visibility) bit in
emulated Root Control register (Pali Rohár)

- Fix issue in configuring reference clock (Pali Rohár)

- Don't clear status bits for masked interrupts (Pali Rohár)

- Don't mask unused interrupts (Pali Rohár)

- Avoid code repetition in advk_pcie_rd_conf() (Marek Behún)

- Retry config accesses on CRS response (Pali Rohár)

- Simplify emulated Root Capabilities initialization (Pali Rohár)

- Fix several link training issues (Pali Rohár)

- Fix link-up checking via LTSSM (Pali Rohár)

- Fix reporting of Data Link Layer Link Active (Pali Rohár)

- Fix emulation of W1C bits (Marek Behún)

- Fix MSI domain .alloc() method to return zero on success (Marek Behún)

- Read entire 16-bit MSI vector in MSI handler, not just low 8 bits (Marek
Behún)

- Clear Root Port I/O Space, Memory Space, and Bus Master Enable bits at
startup; PCI core will set those as necessary (Pali Rohár)

- When operating as a Root Port, set class code to "PCI Bridge" instead of
the default "Mass Storage Controller" (Pali Rohár)

- Add emulation for PCI_BRIDGE_CTL_BUS_RESET since aardvark doesn't
implement this per spec (Pali Rohár)

- Add emulation of option ROM BAR since aardvark doesn't implement this per
spec (Pali Rohár)

* remotes/lorenzo/pci/aardvark:
PCI: aardvark: Fix support for PCI_ROM_ADDRESS1 on emulated bridge
PCI: aardvark: Fix support for PCI_BRIDGE_CTL_BUS_RESET on emulated bridge
PCI: aardvark: Set PCI Bridge Class Code to PCI Bridge
PCI: aardvark: Fix support for bus mastering and PCI_COMMAND on emulated bridge
PCI: aardvark: Read all 16-bits from PCIE_MSI_PAYLOAD_REG
PCI: aardvark: Fix return value of MSI domain .alloc() method
PCI: pci-bridge-emul: Fix emulation of W1C bits
PCI: aardvark: Fix reporting Data Link Layer Link Active
PCI: aardvark: Fix checking for link up via LTSSM state
PCI: aardvark: Fix link training
PCI: aardvark: Simplify initialization of rootcap on virtual bridge
PCI: aardvark: Implement re-issuing config requests on CRS response
PCI: aardvark: Deduplicate code in advk_pcie_rd_conf()
PCI: aardvark: Do not unmask unused interrupts
PCI: aardvark: Do not clear status bits of masked interrupts
PCI: aardvark: Fix configuring Reference clock
PCI: aardvark: Fix preserving PCI_EXP_RTCTL_CRSSVE flag on emulated bridge
PCI: aardvark: Don't spam about PIO Response Status
PCI: aardvark: Fix PCIe Max Payload Size setting
PCI: Add PCI_EXP_DEVCTL_PAYLOAD_* macros

+332 -180
+313 -180
drivers/pci/controller/pci-aardvark.c
··· 31 31 /* PCIe core registers */ 32 32 #define PCIE_CORE_DEV_ID_REG 0x0 33 33 #define PCIE_CORE_CMD_STATUS_REG 0x4 34 - #define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) 35 - #define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) 36 - #define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) 37 34 #define PCIE_CORE_DEV_REV_REG 0x8 35 + #define PCIE_CORE_EXP_ROM_BAR_REG 0x30 38 36 #define PCIE_CORE_PCIEXP_CAP 0xc0 39 37 #define PCIE_CORE_ERR_CAPCTL_REG 0x118 40 38 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) ··· 97 99 #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) 98 100 #define PCIE_CORE_REF_CLK_REG (CONTROL_BASE_ADDR + 0x14) 99 101 #define PCIE_CORE_REF_CLK_TX_ENABLE BIT(1) 102 + #define PCIE_CORE_REF_CLK_RX_ENABLE BIT(2) 100 103 #define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30) 101 104 #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) 102 105 #define PCIE_MSG_PM_PME_MASK BIT(7) ··· 105 106 #define PCIE_ISR0_MSI_INT_PENDING BIT(24) 106 107 #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) 107 108 #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) 108 - #define PCIE_ISR0_ALL_MASK GENMASK(26, 0) 109 + #define PCIE_ISR0_ALL_MASK GENMASK(31, 0) 109 110 #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) 110 111 #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) 111 112 #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) 112 113 #define PCIE_ISR1_FLUSH BIT(5) 113 114 #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) 114 - #define PCIE_ISR1_ALL_MASK GENMASK(11, 4) 115 + #define PCIE_ISR1_ALL_MASK GENMASK(31, 0) 115 116 #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) 116 117 #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) 117 118 #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) 118 119 #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) 119 120 #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) 121 + #define PCIE_MSI_DATA_MASK GENMASK(15, 0) 120 122 121 123 /* PCIe window configuration */ 122 124 #define OB_WIN_BASE_ADDR 0x4c00 ··· 164 164 #define CFG_REG (LMI_BASE_ADDR + 0x0) 165 165 #define LTSSM_SHIFT 24 166 166 #define LTSSM_MASK 0x3f 167 - #define LTSSM_L0 0x10 168 167 #define RC_BAR_CONFIG 0x300 168 + 169 + /* LTSSM values in CFG_REG */ 170 + enum { 171 + LTSSM_DETECT_QUIET = 0x0, 172 + LTSSM_DETECT_ACTIVE = 0x1, 173 + LTSSM_POLLING_ACTIVE = 0x2, 174 + LTSSM_POLLING_COMPLIANCE = 0x3, 175 + LTSSM_POLLING_CONFIGURATION = 0x4, 176 + LTSSM_CONFIG_LINKWIDTH_START = 0x5, 177 + LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6, 178 + LTSSM_CONFIG_LANENUM_ACCEPT = 0x7, 179 + LTSSM_CONFIG_LANENUM_WAIT = 0x8, 180 + LTSSM_CONFIG_COMPLETE = 0x9, 181 + LTSSM_CONFIG_IDLE = 0xa, 182 + LTSSM_RECOVERY_RCVR_LOCK = 0xb, 183 + LTSSM_RECOVERY_SPEED = 0xc, 184 + LTSSM_RECOVERY_RCVR_CFG = 0xd, 185 + LTSSM_RECOVERY_IDLE = 0xe, 186 + LTSSM_L0 = 0x10, 187 + LTSSM_RX_L0S_ENTRY = 0x11, 188 + LTSSM_RX_L0S_IDLE = 0x12, 189 + LTSSM_RX_L0S_FTS = 0x13, 190 + LTSSM_TX_L0S_ENTRY = 0x14, 191 + LTSSM_TX_L0S_IDLE = 0x15, 192 + LTSSM_TX_L0S_FTS = 0x16, 193 + LTSSM_L1_ENTRY = 0x17, 194 + LTSSM_L1_IDLE = 0x18, 195 + LTSSM_L2_IDLE = 0x19, 196 + LTSSM_L2_TRANSMIT_WAKE = 0x1a, 197 + LTSSM_DISABLED = 0x20, 198 + LTSSM_LOOPBACK_ENTRY_MASTER = 0x21, 199 + LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22, 200 + LTSSM_LOOPBACK_EXIT_MASTER = 0x23, 201 + LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24, 202 + LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25, 203 + LTSSM_LOOPBACK_EXIT_SLAVE = 0x26, 204 + LTSSM_HOT_RESET = 0x27, 205 + LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28, 206 + LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29, 207 + LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a, 208 + LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b, 209 + }; 210 + 169 211 #define VENDOR_ID_REG (LMI_BASE_ADDR + 0x44) 170 212 171 213 /* PCIe core controller registers */ ··· 240 198 #define PCIE_IRQ_MSI_INT2_DET BIT(21) 241 199 #define PCIE_IRQ_RC_DBELL_DET BIT(22) 242 200 #define PCIE_IRQ_EP_STATUS BIT(23) 243 - #define PCIE_IRQ_ALL_MASK 0xfff0fb 201 + #define PCIE_IRQ_ALL_MASK GENMASK(31, 0) 244 202 #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT 245 203 246 204 /* Transaction types */ ··· 299 257 return readl(pcie->base + reg); 300 258 } 301 259 302 - static inline u16 advk_read16(struct advk_pcie *pcie, u64 reg) 260 + static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie) 303 261 { 304 - return advk_readl(pcie, (reg & ~0x3)) >> ((reg & 0x3) * 8); 305 - } 306 - 307 - static int advk_pcie_link_up(struct advk_pcie *pcie) 308 - { 309 - u32 val, ltssm_state; 262 + u32 val; 263 + u8 ltssm_state; 310 264 311 265 val = advk_readl(pcie, CFG_REG); 312 266 ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; 313 - return ltssm_state >= LTSSM_L0; 267 + return ltssm_state; 268 + } 269 + 270 + static inline bool advk_pcie_link_up(struct advk_pcie *pcie) 271 + { 272 + /* check if LTSSM is in normal operation - some L* state */ 273 + u8 ltssm_state = advk_pcie_ltssm_state(pcie); 274 + return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED; 275 + } 276 + 277 + static inline bool advk_pcie_link_active(struct advk_pcie *pcie) 278 + { 279 + /* 280 + * According to PCIe Base specification 3.0, Table 4-14: Link 281 + * Status Mapped to the LTSSM, and 4.2.6.3.6 Configuration.Idle 282 + * is Link Up mapped to LTSSM Configuration.Idle, Recovery, L0, 283 + * L0s, L1 and L2 states. And according to 3.2.1. Data Link 284 + * Control and Management State Machine Rules is DL Up status 285 + * reported in DL Active state. 286 + */ 287 + u8 ltssm_state = advk_pcie_ltssm_state(pcie); 288 + return ltssm_state >= LTSSM_CONFIG_IDLE && ltssm_state < LTSSM_DISABLED; 289 + } 290 + 291 + static inline bool advk_pcie_link_training(struct advk_pcie *pcie) 292 + { 293 + /* 294 + * According to PCIe Base specification 3.0, Table 4-14: Link 295 + * Status Mapped to the LTSSM is Link Training mapped to LTSSM 296 + * Configuration and Recovery states. 297 + */ 298 + u8 ltssm_state = advk_pcie_ltssm_state(pcie); 299 + return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START && 300 + ltssm_state < LTSSM_L0) || 301 + (ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 && 302 + ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3)); 314 303 } 315 304 316 305 static int advk_pcie_wait_for_link(struct advk_pcie *pcie) ··· 364 291 size_t retries; 365 292 366 293 for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { 367 - if (!advk_pcie_link_up(pcie)) 294 + if (advk_pcie_link_training(pcie)) 368 295 break; 369 296 udelay(RETRAIN_WAIT_USLEEP_US); 370 297 } ··· 372 299 373 300 static void advk_pcie_issue_perst(struct advk_pcie *pcie) 374 301 { 375 - u32 reg; 376 - 377 302 if (!pcie->reset_gpio) 378 303 return; 379 - 380 - /* 381 - * As required by PCI Express spec (PCI Express Base Specification, REV. 382 - * 4.0 PCI Express, February 19 2014, 6.6.1 Conventional Reset) a delay 383 - * for at least 100ms after de-asserting PERST# signal is needed before 384 - * link training is enabled. So ensure that link training is disabled 385 - * prior de-asserting PERST# signal to fulfill that PCI Express spec 386 - * requirement. 387 - */ 388 - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); 389 - reg &= ~LINK_TRAINING_EN; 390 - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); 391 304 392 305 /* 10ms delay is needed for some cards */ 393 306 dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n"); ··· 382 323 gpiod_set_value_cansleep(pcie->reset_gpio, 0); 383 324 } 384 325 385 - static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen) 326 + static void advk_pcie_train_link(struct advk_pcie *pcie) 386 327 { 387 - int ret, neg_gen; 328 + struct device *dev = &pcie->pdev->dev; 388 329 u32 reg; 330 + int ret; 389 331 390 - /* Setup link speed */ 332 + /* 333 + * Setup PCIe rev / gen compliance based on device tree property 334 + * 'max-link-speed' which also forces maximal link speed. 335 + */ 391 336 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); 392 337 reg &= ~PCIE_GEN_SEL_MSK; 393 - if (gen == 3) 338 + if (pcie->link_gen == 3) 394 339 reg |= SPEED_GEN_3; 395 - else if (gen == 2) 340 + else if (pcie->link_gen == 2) 396 341 reg |= SPEED_GEN_2; 397 342 else 398 343 reg |= SPEED_GEN_1; 399 344 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); 400 345 401 346 /* 402 - * Enable link training. This is not needed in every call to this 403 - * function, just once suffices, but it does not break anything either. 347 + * Set maximal link speed value also into PCIe Link Control 2 register. 348 + * Armada 3700 Functional Specification says that default value is based 349 + * on SPEED_GEN but tests showed that default value is always 8.0 GT/s. 404 350 */ 351 + reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); 352 + reg &= ~PCI_EXP_LNKCTL2_TLS; 353 + if (pcie->link_gen == 3) 354 + reg |= PCI_EXP_LNKCTL2_TLS_8_0GT; 355 + else if (pcie->link_gen == 2) 356 + reg |= PCI_EXP_LNKCTL2_TLS_5_0GT; 357 + else 358 + reg |= PCI_EXP_LNKCTL2_TLS_2_5GT; 359 + advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2); 360 + 361 + /* Enable link training after selecting PCIe generation */ 405 362 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); 406 363 reg |= LINK_TRAINING_EN; 407 364 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); 408 - 409 - /* 410 - * Start link training immediately after enabling it. 411 - * This solves problems for some buggy cards. 412 - */ 413 - reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL); 414 - reg |= PCI_EXP_LNKCTL_RL; 415 - advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL); 416 - 417 - ret = advk_pcie_wait_for_link(pcie); 418 - if (ret) 419 - return ret; 420 - 421 - reg = advk_read16(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKSTA); 422 - neg_gen = reg & PCI_EXP_LNKSTA_CLS; 423 - 424 - return neg_gen; 425 - } 426 - 427 - static void advk_pcie_train_link(struct advk_pcie *pcie) 428 - { 429 - struct device *dev = &pcie->pdev->dev; 430 - int neg_gen = -1, gen; 431 365 432 366 /* 433 367 * Reset PCIe card via PERST# signal. Some cards are not detected ··· 432 380 * PERST# signal could have been asserted by pinctrl subsystem before 433 381 * probe() callback has been called or issued explicitly by reset gpio 434 382 * function advk_pcie_issue_perst(), making the endpoint going into 435 - * fundamental reset. As required by PCI Express spec a delay for at 436 - * least 100ms after such a reset before link training is needed. 383 + * fundamental reset. As required by PCI Express spec (PCI Express 384 + * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1 385 + * Conventional Reset) a delay for at least 100ms after such a reset 386 + * before sending a Configuration Request to the device is needed. 387 + * So wait until PCIe link is up. Function advk_pcie_wait_for_link() 388 + * waits for link at least 900ms. 437 389 */ 438 - msleep(PCI_PM_D3COLD_WAIT); 439 - 440 - /* 441 - * Try link training at link gen specified by device tree property 442 - * 'max-link-speed'. If this fails, iteratively train at lower gen. 443 - */ 444 - for (gen = pcie->link_gen; gen > 0; --gen) { 445 - neg_gen = advk_pcie_train_at_gen(pcie, gen); 446 - if (neg_gen > 0) 447 - break; 448 - } 449 - 450 - if (neg_gen < 0) 451 - goto err; 452 - 453 - /* 454 - * After successful training if negotiated gen is lower than requested, 455 - * train again on negotiated gen. This solves some stability issues for 456 - * some buggy gen1 cards. 457 - */ 458 - if (neg_gen < gen) { 459 - gen = neg_gen; 460 - neg_gen = advk_pcie_train_at_gen(pcie, gen); 461 - } 462 - 463 - if (neg_gen == gen) { 464 - dev_info(dev, "link up at gen %i\n", gen); 465 - return; 466 - } 467 - 468 - err: 469 - dev_err(dev, "link never came up\n"); 390 + ret = advk_pcie_wait_for_link(pcie); 391 + if (ret < 0) 392 + dev_err(dev, "link never came up\n"); 393 + else 394 + dev_info(dev, "link up\n"); 470 395 } 471 396 472 397 /* ··· 480 451 u32 reg; 481 452 int i; 482 453 483 - /* Enable TX */ 454 + /* 455 + * Configure PCIe Reference clock. Direction is from the PCIe 456 + * controller to the endpoint card, so enable transmitting of 457 + * Reference clock differential signal off-chip and disable 458 + * receiving off-chip differential signal. 459 + */ 484 460 reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG); 485 461 reg |= PCIE_CORE_REF_CLK_TX_ENABLE; 462 + reg &= ~PCIE_CORE_REF_CLK_RX_ENABLE; 486 463 advk_writel(pcie, reg, PCIE_CORE_REF_CLK_REG); 487 464 488 465 /* Set to Direct mode */ ··· 512 477 reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL; 513 478 advk_writel(pcie, reg, VENDOR_ID_REG); 514 479 480 + /* 481 + * Change Class Code of PCI Bridge device to PCI Bridge (0x600400), 482 + * because the default value is Mass storage controller (0x010400). 483 + * 484 + * Note that this Aardvark PCI Bridge does not have compliant Type 1 485 + * Configuration Space and it even cannot be accessed via Aardvark's 486 + * PCI config space access method. Something like config space is 487 + * available in internal Aardvark registers starting at offset 0x0 488 + * and is reported as Type 0. In range 0x10 - 0x34 it has totally 489 + * different registers. 490 + * 491 + * Therefore driver uses emulation of PCI Bridge which emulates 492 + * access to configuration space via internal Aardvark registers or 493 + * emulated configuration buffer. 494 + */ 495 + reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG); 496 + reg &= ~0xffffff00; 497 + reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8; 498 + advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG); 499 + 500 + /* Disable Root Bridge I/O space, memory space and bus mastering */ 501 + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); 502 + reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); 503 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); 504 + 515 505 /* Set Advanced Error Capabilities and Control PF0 register */ 516 506 reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | 517 507 PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | ··· 548 488 reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); 549 489 reg &= ~PCI_EXP_DEVCTL_RELAX_EN; 550 490 reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN; 491 + reg &= ~PCI_EXP_DEVCTL_PAYLOAD; 551 492 reg &= ~PCI_EXP_DEVCTL_READRQ; 552 - reg |= PCI_EXP_DEVCTL_PAYLOAD; /* Set max payload size */ 493 + reg |= PCI_EXP_DEVCTL_PAYLOAD_512B; 553 494 reg |= PCI_EXP_DEVCTL_READRQ_512B; 554 495 advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL); 555 496 ··· 635 574 advk_pcie_disable_ob_win(pcie, i); 636 575 637 576 advk_pcie_train_link(pcie); 638 - 639 - /* 640 - * FIXME: The following register update is suspicious. This register is 641 - * applicable only when the PCI controller is configured for Endpoint 642 - * mode, not as a Root Complex. But apparently when this code is 643 - * removed, some cards stop working. This should be investigated and 644 - * a comment explaining this should be put here. 645 - */ 646 - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); 647 - reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | 648 - PCIE_CORE_CMD_IO_ACCESS_EN | 649 - PCIE_CORE_CMD_MEM_IO_REQ_EN; 650 - advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); 651 577 } 652 578 653 579 static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val) ··· 643 595 u32 reg; 644 596 unsigned int status; 645 597 char *strcomp_status, *str_posted; 598 + int ret; 646 599 647 600 reg = advk_readl(pcie, PIO_STAT); 648 601 status = (reg & PIO_COMPLETION_STATUS_MASK) >> ··· 668 619 case PIO_COMPLETION_STATUS_OK: 669 620 if (reg & PIO_ERR_STATUS) { 670 621 strcomp_status = "COMP_ERR"; 622 + ret = -EFAULT; 671 623 break; 672 624 } 673 625 /* Get the read result */ ··· 676 626 *val = advk_readl(pcie, PIO_RD_DATA); 677 627 /* No error */ 678 628 strcomp_status = NULL; 629 + ret = 0; 679 630 break; 680 631 case PIO_COMPLETION_STATUS_UR: 681 632 strcomp_status = "UR"; 633 + ret = -EOPNOTSUPP; 682 634 break; 683 635 case PIO_COMPLETION_STATUS_CRS: 684 636 if (allow_crs && val) { ··· 698 646 */ 699 647 *val = CFG_RD_CRS_VAL; 700 648 strcomp_status = NULL; 649 + ret = 0; 701 650 break; 702 651 } 703 652 /* PCIe r4.0, sec 2.3.2, says: ··· 714 661 * Request and taking appropriate action, e.g., complete the 715 662 * Request to the host as a failed transaction. 716 663 * 717 - * To simplify implementation do not re-issue the Configuration 718 - * Request and complete the Request as a failed transaction. 664 + * So return -EAGAIN and caller (pci-aardvark.c driver) will 665 + * re-issue request again up to the PIO_RETRY_CNT retries. 719 666 */ 720 667 strcomp_status = "CRS"; 668 + ret = -EAGAIN; 721 669 break; 722 670 case PIO_COMPLETION_STATUS_CA: 723 671 strcomp_status = "CA"; 672 + ret = -ECANCELED; 724 673 break; 725 674 default: 726 675 strcomp_status = "Unknown"; 676 + ret = -EINVAL; 727 677 break; 728 678 } 729 679 730 680 if (!strcomp_status) 731 - return 0; 681 + return ret; 732 682 733 683 if (reg & PIO_NON_POSTED_REQ) 734 684 str_posted = "Non-posted"; 735 685 else 736 686 str_posted = "Posted"; 737 687 738 - dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n", 688 + dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n", 739 689 str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); 740 690 741 - return -EFAULT; 691 + return ret; 742 692 } 743 693 744 694 static int advk_pcie_wait_pio(struct advk_pcie *pcie) ··· 749 693 struct device *dev = &pcie->pdev->dev; 750 694 int i; 751 695 752 - for (i = 0; i < PIO_RETRY_CNT; i++) { 696 + for (i = 1; i <= PIO_RETRY_CNT; i++) { 753 697 u32 start, isr; 754 698 755 699 start = advk_readl(pcie, PIO_START); 756 700 isr = advk_readl(pcie, PIO_ISR); 757 701 if (!start && isr) 758 - return 0; 702 + return i; 759 703 udelay(PIO_RETRY_DELAY); 760 704 } 761 705 ··· 763 707 return -ETIMEDOUT; 764 708 } 765 709 710 + static pci_bridge_emul_read_status_t 711 + advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge, 712 + int reg, u32 *value) 713 + { 714 + struct advk_pcie *pcie = bridge->data; 715 + 716 + switch (reg) { 717 + case PCI_COMMAND: 718 + *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); 719 + return PCI_BRIDGE_EMUL_HANDLED; 720 + 721 + case PCI_ROM_ADDRESS1: 722 + *value = advk_readl(pcie, PCIE_CORE_EXP_ROM_BAR_REG); 723 + return PCI_BRIDGE_EMUL_HANDLED; 724 + 725 + case PCI_INTERRUPT_LINE: { 726 + /* 727 + * From the whole 32bit register we support reading from HW only 728 + * one bit: PCI_BRIDGE_CTL_BUS_RESET. 729 + * Other bits are retrieved only from emulated config buffer. 730 + */ 731 + __le32 *cfgspace = (__le32 *)&bridge->conf; 732 + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]); 733 + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN) 734 + val |= PCI_BRIDGE_CTL_BUS_RESET << 16; 735 + else 736 + val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16); 737 + *value = val; 738 + return PCI_BRIDGE_EMUL_HANDLED; 739 + } 740 + 741 + default: 742 + return PCI_BRIDGE_EMUL_NOT_HANDLED; 743 + } 744 + } 745 + 746 + static void 747 + advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge, 748 + int reg, u32 old, u32 new, u32 mask) 749 + { 750 + struct advk_pcie *pcie = bridge->data; 751 + 752 + switch (reg) { 753 + case PCI_COMMAND: 754 + advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG); 755 + break; 756 + 757 + case PCI_ROM_ADDRESS1: 758 + advk_writel(pcie, new, PCIE_CORE_EXP_ROM_BAR_REG); 759 + break; 760 + 761 + case PCI_INTERRUPT_LINE: 762 + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) { 763 + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG); 764 + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16)) 765 + val |= HOT_RESET_GEN; 766 + else 767 + val &= ~HOT_RESET_GEN; 768 + advk_writel(pcie, val, PCIE_CORE_CTRL1_REG); 769 + } 770 + break; 771 + 772 + default: 773 + break; 774 + } 775 + } 766 776 767 777 static pci_bridge_emul_read_status_t 768 778 advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, ··· 845 723 case PCI_EXP_RTCTL: { 846 724 u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); 847 725 *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; 726 + *value |= le16_to_cpu(bridge->pcie_conf.rootctl) & PCI_EXP_RTCTL_CRSSVE; 848 727 *value |= PCI_EXP_RTCAP_CRSVIS << 16; 849 728 return PCI_BRIDGE_EMUL_HANDLED; 850 729 } ··· 857 734 return PCI_BRIDGE_EMUL_HANDLED; 858 735 } 859 736 737 + case PCI_EXP_LNKCAP: { 738 + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); 739 + /* 740 + * PCI_EXP_LNKCAP_DLLLARC bit is hardwired in aardvark HW to 0. 741 + * But support for PCI_EXP_LNKSTA_DLLLA is emulated via ltssm 742 + * state so explicitly enable PCI_EXP_LNKCAP_DLLLARC flag. 743 + */ 744 + val |= PCI_EXP_LNKCAP_DLLLARC; 745 + *value = val; 746 + return PCI_BRIDGE_EMUL_HANDLED; 747 + } 748 + 860 749 case PCI_EXP_LNKCTL: { 861 750 /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ 862 751 u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & 863 752 ~(PCI_EXP_LNKSTA_LT << 16); 864 - if (!advk_pcie_link_up(pcie)) 753 + if (advk_pcie_link_training(pcie)) 865 754 val |= (PCI_EXP_LNKSTA_LT << 16); 755 + if (advk_pcie_link_active(pcie)) 756 + val |= (PCI_EXP_LNKSTA_DLLLA << 16); 866 757 *value = val; 867 758 return PCI_BRIDGE_EMUL_HANDLED; 868 759 } ··· 884 747 case PCI_CAP_LIST_ID: 885 748 case PCI_EXP_DEVCAP: 886 749 case PCI_EXP_DEVCTL: 887 - case PCI_EXP_LNKCAP: 888 750 *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); 889 751 return PCI_BRIDGE_EMUL_HANDLED; 890 752 default: ··· 930 794 } 931 795 932 796 static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = { 797 + .read_base = advk_pci_bridge_emul_base_conf_read, 798 + .write_base = advk_pci_bridge_emul_base_conf_write, 933 799 .read_pcie = advk_pci_bridge_emul_pcie_conf_read, 934 800 .write_pcie = advk_pci_bridge_emul_pcie_conf_write, 935 801 }; ··· 943 805 static int advk_sw_pci_bridge_init(struct advk_pcie *pcie) 944 806 { 945 807 struct pci_bridge_emul *bridge = &pcie->bridge; 946 - int ret; 947 808 948 809 bridge->conf.vendor = 949 810 cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff); ··· 962 825 /* Support interrupt A for MSI feature */ 963 826 bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE; 964 827 828 + /* Indicates supports for Completion Retry Status */ 829 + bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); 830 + 965 831 bridge->has_pcie = true; 966 832 bridge->data = pcie; 967 833 bridge->ops = &advk_pci_bridge_emul_ops; 968 834 969 - /* PCIe config space can be initialized after pci_bridge_emul_init() */ 970 - ret = pci_bridge_emul_init(bridge, 0); 971 - if (ret < 0) 972 - return ret; 973 - 974 - /* Indicates supports for Completion Retry Status */ 975 - bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS); 976 - 977 - return 0; 835 + return pci_bridge_emul_init(bridge, 0); 978 836 } 979 837 980 838 static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, ··· 1021 889 int where, int size, u32 *val) 1022 890 { 1023 891 struct advk_pcie *pcie = bus->sysdata; 892 + int retry_count; 1024 893 bool allow_crs; 1025 894 u32 reg; 1026 895 int ret; ··· 1044 911 (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) & 1045 912 PCI_EXP_RTCTL_CRSSVE); 1046 913 1047 - if (advk_pcie_pio_is_running(pcie)) { 1048 - /* 1049 - * If it is possible return Completion Retry Status so caller 1050 - * tries to issue the request again instead of failing. 1051 - */ 1052 - if (allow_crs) { 1053 - *val = CFG_RD_CRS_VAL; 1054 - return PCIBIOS_SUCCESSFUL; 1055 - } 1056 - *val = 0xffffffff; 1057 - return PCIBIOS_SET_FAILED; 1058 - } 914 + if (advk_pcie_pio_is_running(pcie)) 915 + goto try_crs; 1059 916 1060 917 /* Program the control register */ 1061 918 reg = advk_readl(pcie, PIO_CTRL); ··· 1064 941 /* Program the data strobe */ 1065 942 advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); 1066 943 1067 - /* Clear PIO DONE ISR and start the transfer */ 1068 - advk_writel(pcie, 1, PIO_ISR); 1069 - advk_writel(pcie, 1, PIO_START); 944 + retry_count = 0; 945 + do { 946 + /* Clear PIO DONE ISR and start the transfer */ 947 + advk_writel(pcie, 1, PIO_ISR); 948 + advk_writel(pcie, 1, PIO_START); 1070 949 1071 - ret = advk_pcie_wait_pio(pcie); 1072 - if (ret < 0) { 1073 - /* 1074 - * If it is possible return Completion Retry Status so caller 1075 - * tries to issue the request again instead of failing. 1076 - */ 1077 - if (allow_crs) { 1078 - *val = CFG_RD_CRS_VAL; 1079 - return PCIBIOS_SUCCESSFUL; 1080 - } 1081 - *val = 0xffffffff; 1082 - return PCIBIOS_SET_FAILED; 1083 - } 950 + ret = advk_pcie_wait_pio(pcie); 951 + if (ret < 0) 952 + goto try_crs; 1084 953 1085 - /* Check PIO status and get the read result */ 1086 - ret = advk_pcie_check_pio_status(pcie, allow_crs, val); 1087 - if (ret < 0) { 1088 - *val = 0xffffffff; 1089 - return PCIBIOS_SET_FAILED; 1090 - } 954 + retry_count += ret; 955 + 956 + /* Check PIO status and get the read result */ 957 + ret = advk_pcie_check_pio_status(pcie, allow_crs, val); 958 + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); 959 + 960 + if (ret < 0) 961 + goto fail; 1091 962 1092 963 if (size == 1) 1093 964 *val = (*val >> (8 * (where & 3))) & 0xff; ··· 1089 972 *val = (*val >> (8 * (where & 3))) & 0xffff; 1090 973 1091 974 return PCIBIOS_SUCCESSFUL; 975 + 976 + try_crs: 977 + /* 978 + * If it is possible, return Completion Retry Status so that caller 979 + * tries to issue the request again instead of failing. 980 + */ 981 + if (allow_crs) { 982 + *val = CFG_RD_CRS_VAL; 983 + return PCIBIOS_SUCCESSFUL; 984 + } 985 + 986 + fail: 987 + *val = 0xffffffff; 988 + return PCIBIOS_SET_FAILED; 1092 989 } 1093 990 1094 991 static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, ··· 1111 980 struct advk_pcie *pcie = bus->sysdata; 1112 981 u32 reg; 1113 982 u32 data_strobe = 0x0; 983 + int retry_count; 1114 984 int offset; 1115 985 int ret; 1116 986 ··· 1153 1021 /* Program the data strobe */ 1154 1022 advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); 1155 1023 1156 - /* Clear PIO DONE ISR and start the transfer */ 1157 - advk_writel(pcie, 1, PIO_ISR); 1158 - advk_writel(pcie, 1, PIO_START); 1024 + retry_count = 0; 1025 + do { 1026 + /* Clear PIO DONE ISR and start the transfer */ 1027 + advk_writel(pcie, 1, PIO_ISR); 1028 + advk_writel(pcie, 1, PIO_START); 1159 1029 1160 - ret = advk_pcie_wait_pio(pcie); 1161 - if (ret < 0) 1162 - return PCIBIOS_SET_FAILED; 1030 + ret = advk_pcie_wait_pio(pcie); 1031 + if (ret < 0) 1032 + return PCIBIOS_SET_FAILED; 1163 1033 1164 - ret = advk_pcie_check_pio_status(pcie, false, NULL); 1165 - if (ret < 0) 1166 - return PCIBIOS_SET_FAILED; 1034 + retry_count += ret; 1167 1035 1168 - return PCIBIOS_SUCCESSFUL; 1036 + ret = advk_pcie_check_pio_status(pcie, false, NULL); 1037 + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT); 1038 + 1039 + return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL; 1169 1040 } 1170 1041 1171 1042 static struct pci_ops advk_pcie_ops = { ··· 1217 1082 domain->host_data, handle_simple_irq, 1218 1083 NULL, NULL); 1219 1084 1220 - return hwirq; 1085 + return 0; 1221 1086 } 1222 1087 1223 1088 static void advk_msi_irq_domain_free(struct irq_domain *domain, ··· 1398 1263 if (!(BIT(msi_idx) & msi_status)) 1399 1264 continue; 1400 1265 1266 + /* 1267 + * msi_idx contains bits [4:0] of the msi_data and msi_data 1268 + * contains 16bit MSI interrupt number 1269 + */ 1401 1270 advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); 1402 - msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; 1271 + msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & PCIE_MSI_DATA_MASK; 1403 1272 generic_handle_irq(msi_data); 1404 1273 } 1405 1274 ··· 1424 1285 isr1_val = advk_readl(pcie, PCIE_ISR1_REG); 1425 1286 isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); 1426 1287 isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); 1427 - 1428 - if (!isr0_status && !isr1_status) { 1429 - advk_writel(pcie, isr0_val, PCIE_ISR0_REG); 1430 - advk_writel(pcie, isr1_val, PCIE_ISR1_REG); 1431 - return; 1432 - } 1433 1288 1434 1289 /* Process MSI interrupts */ 1435 1290 if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
+13
drivers/pci/pci-bridge-emul.c
··· 431 431 /* Clear the W1C bits */ 432 432 new &= ~((value << shift) & (behavior[reg / 4].w1c & mask)); 433 433 434 + /* Save the new value with the cleared W1C bits into the cfgspace */ 434 435 cfgspace[reg / 4] = cpu_to_le32(new); 436 + 437 + /* 438 + * Clear the W1C bits not specified by the write mask, so that the 439 + * write_op() does not clear them. 440 + */ 441 + new &= ~(behavior[reg / 4].w1c & ~mask); 442 + 443 + /* 444 + * Set the W1C bits specified by the write mask, so that write_op() 445 + * knows about that they are to be cleared. 446 + */ 447 + new |= (value << shift) & (behavior[reg / 4].w1c & mask); 435 448 436 449 if (write_op) 437 450 write_op(bridge, reg, old, new, mask);
+6
include/uapi/linux/pci_regs.h
··· 504 504 #define PCI_EXP_DEVCTL_URRE 0x0008 /* Unsupported Request Reporting En. */ 505 505 #define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */ 506 506 #define PCI_EXP_DEVCTL_PAYLOAD 0x00e0 /* Max_Payload_Size */ 507 + #define PCI_EXP_DEVCTL_PAYLOAD_128B 0x0000 /* 128 Bytes */ 508 + #define PCI_EXP_DEVCTL_PAYLOAD_256B 0x0020 /* 256 Bytes */ 509 + #define PCI_EXP_DEVCTL_PAYLOAD_512B 0x0040 /* 512 Bytes */ 510 + #define PCI_EXP_DEVCTL_PAYLOAD_1024B 0x0060 /* 1024 Bytes */ 511 + #define PCI_EXP_DEVCTL_PAYLOAD_2048B 0x0080 /* 2048 Bytes */ 512 + #define PCI_EXP_DEVCTL_PAYLOAD_4096B 0x00a0 /* 4096 Bytes */ 507 513 #define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */ 508 514 #define PCI_EXP_DEVCTL_PHANTOM 0x0200 /* Phantom Functions Enable */ 509 515 #define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */