Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Add MSI support for XLP9XX

In XLP9XX, the interrupt routing table for MSI-X has been moved to the
PCIe controller's config space from PIC. There are also 32 MSI-X
interrupts available per link on XLP9XX.

Update XLP MSI/MSI-X code to handle this.

Signed-off-by: Ganesan Ramalingam <ganesanr@broadcom.com>
Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Cc: g@linux-mips.org
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/6912/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Ganesan Ramalingam and committed by
Ralf Baechle
d66f3f0e 1c983986

+156 -51
+14
arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
··· 69 69 #define PCIE_9XX_BYTE_SWAP_IO_BASE 0x25e 70 70 #define PCIE_9XX_BYTE_SWAP_IO_LIM 0x25f 71 71 72 + #define PCIE_9XX_BRIDGE_MSIX_ADDR_BASE 0x264 73 + #define PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT 0x265 74 + #define PCIE_9XX_MSI_STATUS 0x283 75 + #define PCIE_9XX_MSI_EN 0x284 76 + /* 128 MSIX vectors available in 9xx */ 77 + #define PCIE_9XX_MSIX_STATUS0 0x286 78 + #define PCIE_9XX_MSIX_STATUSX(n) (n + 0x286) 79 + #define PCIE_9XX_MSIX_VEC 0x296 80 + #define PCIE_9XX_MSIX_VECX(n) (n + 0x296) 81 + #define PCIE_9XX_INT_STATUS0 0x397 82 + #define PCIE_9XX_INT_STATUS1 0x398 83 + #define PCIE_9XX_INT_EN0 0x399 84 + #define PCIE_9XX_INT_EN1 0x39a 85 + 72 86 /* other */ 73 87 #define PCIE_NLINKS 4 74 88
+4
arch/mips/include/asm/netlogic/xlp-hal/pic.h
··· 199 199 #define PIC_IRT_PCIE_LINK_3_INDEX 81 200 200 #define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) 201 201 202 + #define PIC_9XX_IRT_PCIE_LINK_0_INDEX 191 203 + #define PIC_9XX_IRT_PCIE_LINK_INDEX(num) \ 204 + ((num) + PIC_9XX_IRT_PCIE_LINK_0_INDEX) 205 + 202 206 #define PIC_CLOCK_TIMER 7 203 207 204 208 #if !defined(LOCORE) && !defined(__ASSEMBLY__)
+3 -2
arch/mips/include/asm/netlogic/xlp-hal/xlp.h
··· 70 70 #define PIC_PCIE_MSIX_IRQ_BASE 48 /* 48 - 51 MSI-X IRQ */ 71 71 #define PIC_PCIE_MSIX_IRQ(i) (48 + (i)) 72 72 73 - #define NLM_MSIX_VEC_BASE 96 /* 96 - 127 - MSIX mapped */ 74 - #define NLM_MSI_VEC_BASE 128 /* 128 -255 - MSI mapped */ 73 + /* XLP9xx and XLP8xx has 128 and 32 MSIX vectors respectively */ 74 + #define NLM_MSIX_VEC_BASE 96 /* 96 - 223 - MSIX mapped */ 75 + #define NLM_MSI_VEC_BASE 224 /* 224 -351 - MSI mapped */ 75 76 76 77 #define NLM_PIC_INDIRECT_VEC_BASE 512 77 78 #define NLM_GPIO_VEC_BASE 768
+135 -49
arch/mips/pci/msi-xlp.c
··· 56 56 #include <asm/netlogic/xlp-hal/bridge.h> 57 57 58 58 #define XLP_MSIVEC_PER_LINK 32 59 - #define XLP_MSIXVEC_TOTAL 32 60 - #define XLP_MSIXVEC_PER_LINK 8 59 + #define XLP_MSIXVEC_TOTAL (cpu_is_xlp9xx() ? 128 : 32) 60 + #define XLP_MSIXVEC_PER_LINK (cpu_is_xlp9xx() ? 32 : 8) 61 61 62 62 /* 128 MSI irqs per node, mapped starting at NLM_MSI_VEC_BASE */ 63 63 static inline int nlm_link_msiirq(int link, int msivec) ··· 65 65 return NLM_MSI_VEC_BASE + link * XLP_MSIVEC_PER_LINK + msivec; 66 66 } 67 67 68 + /* get the link MSI vector from irq number */ 68 69 static inline int nlm_irq_msivec(int irq) 69 70 { 70 - return irq % XLP_MSIVEC_PER_LINK; 71 + return (irq - NLM_MSI_VEC_BASE) % XLP_MSIVEC_PER_LINK; 71 72 } 72 73 74 + /* get the link from the irq number */ 73 75 static inline int nlm_irq_msilink(int irq) 74 76 { 75 - return (irq % (XLP_MSIVEC_PER_LINK * PCIE_NLINKS)) / 76 - XLP_MSIVEC_PER_LINK; 77 + int total_msivec = XLP_MSIVEC_PER_LINK * PCIE_NLINKS; 78 + 79 + return ((irq - NLM_MSI_VEC_BASE) % total_msivec) / 80 + XLP_MSIVEC_PER_LINK; 77 81 } 78 82 79 83 /* 80 - * Only 32 MSI-X vectors are possible because there are only 32 PIC 81 - * interrupts for MSI. We split them statically and use 8 MSI-X vectors 82 - * per link - this keeps the allocation and lookup simple. 84 + * For XLP 8xx/4xx/3xx/2xx, only 32 MSI-X vectors are possible because 85 + * there are only 32 PIC interrupts for MSI. We split them statically 86 + * and use 8 MSI-X vectors per link - this keeps the allocation and 87 + * lookup simple. 88 + * On XLP 9xx, there are 32 vectors per link, and the interrupts are 89 + * not routed thru PIC, so we can use all 128 MSI-X vectors. 83 90 */ 84 91 static inline int nlm_link_msixirq(int link, int bit) 85 92 { 86 93 return NLM_MSIX_VEC_BASE + link * XLP_MSIXVEC_PER_LINK + bit; 87 94 } 88 95 96 + /* get the link MSI vector from irq number */ 89 97 static inline int nlm_irq_msixvec(int irq) 90 98 { 91 - return irq % XLP_MSIXVEC_TOTAL; /* works when given xirq */ 99 + return (irq - NLM_MSIX_VEC_BASE) % XLP_MSIXVEC_TOTAL; 92 100 } 93 101 94 - static inline int nlm_irq_msixlink(int irq) 102 + /* get the link from MSIX vec */ 103 + static inline int nlm_irq_msixlink(int msixvec) 95 104 { 96 - return nlm_irq_msixvec(irq) / XLP_MSIXVEC_PER_LINK; 105 + return msixvec / XLP_MSIXVEC_PER_LINK; 97 106 } 98 107 99 108 /* ··· 138 129 vec = nlm_irq_msivec(d->irq); 139 130 spin_lock_irqsave(&md->msi_lock, flags); 140 131 md->msi_enabled_mask |= 1u << vec; 141 - nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 132 + if (cpu_is_xlp9xx()) 133 + nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, 134 + md->msi_enabled_mask); 135 + else 136 + nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 142 137 spin_unlock_irqrestore(&md->msi_lock, flags); 143 138 } 144 139 ··· 155 142 vec = nlm_irq_msivec(d->irq); 156 143 spin_lock_irqsave(&md->msi_lock, flags); 157 144 md->msi_enabled_mask &= ~(1u << vec); 158 - nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 145 + if (cpu_is_xlp9xx()) 146 + nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, 147 + md->msi_enabled_mask); 148 + else 149 + nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); 159 150 spin_unlock_irqrestore(&md->msi_lock, flags); 160 151 } 161 152 ··· 173 156 xlp_msi_disable(d); 174 157 175 158 /* Ack MSI on bridge */ 176 - nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); 159 + if (cpu_is_xlp9xx()) 160 + nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); 161 + else 162 + nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); 177 163 178 164 /* Ack at eirr and PIC */ 179 165 ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link)); 180 - nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); 166 + if (cpu_is_xlp9xx()) 167 + nlm_pic_ack(md->node->picbase, 168 + PIC_9XX_IRT_PCIE_LINK_INDEX(link)); 169 + else 170 + nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); 181 171 } 182 172 183 173 static struct irq_chip xlp_msi_chip = { ··· 196 172 }; 197 173 198 174 /* 199 - * The MSI-X interrupt handling is different from MSI, there are 32 200 - * MSI-X interrupts generated by the PIC and each of these correspond 201 - * to a MSI-X vector (0-31) that can be assigned. 175 + * XLP8XX/4XX/3XX/2XX: 176 + * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X 177 + * interrupts generated by the PIC and each of these correspond to a MSI-X 178 + * vector (0-31) that can be assigned. 202 179 * 203 - * We divide the MSI-X vectors to 8 per link and do a per-link 204 - * allocation 180 + * We divide the MSI-X vectors to 8 per link and do a per-link allocation 181 + * 182 + * XLP9XX: 183 + * 32 MSI-X vectors are available per link, and the interrupts are not routed 184 + * thru the PIC. PIC ack not needed. 205 185 * 206 186 * Enable and disable done using standard MSI functions. 207 187 */ 208 188 static void xlp_msix_mask_ack(struct irq_data *d) 209 189 { 210 - struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); 190 + struct xlp_msi_data *md; 211 191 int link, msixvec; 192 + uint32_t status_reg, bit; 212 193 213 194 msixvec = nlm_irq_msixvec(d->irq); 214 - link = nlm_irq_msixlink(d->irq); 195 + link = nlm_irq_msixlink(msixvec); 215 196 mask_msi_irq(d); 197 + md = irq_data_get_irq_handler_data(d); 216 198 217 199 /* Ack MSI on bridge */ 218 - nlm_write_reg(md->lnkbase, PCIE_MSIX_STATUS, 1u << msixvec); 200 + if (cpu_is_xlp9xx()) { 201 + status_reg = PCIE_9XX_MSIX_STATUSX(link); 202 + bit = msixvec % XLP_MSIXVEC_PER_LINK; 203 + } else { 204 + status_reg = PCIE_MSIX_STATUS; 205 + bit = msixvec; 206 + } 207 + nlm_write_reg(md->lnkbase, status_reg, 1u << bit); 219 208 220 209 /* Ack at eirr and PIC */ 221 210 ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link)); 222 - nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_MSIX_INDEX(msixvec)); 211 + if (!cpu_is_xlp9xx()) 212 + nlm_pic_ack(md->node->picbase, 213 + PIC_IRT_PCIE_MSIX_INDEX(msixvec)); 223 214 } 224 215 225 216 static struct irq_chip xlp_msix_chip = { ··· 264 225 { 265 226 u32 val; 266 227 267 - val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 268 - if ((val & 0x200) == 0) { 269 - val |= 0x200; /* MSI Interrupt enable */ 270 - nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 228 + if (cpu_is_xlp9xx()) { 229 + val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); 230 + if ((val & 0x200) == 0) { 231 + val |= 0x200; /* MSI Interrupt enable */ 232 + nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); 233 + } 234 + } else { 235 + val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 236 + if ((val & 0x200) == 0) { 237 + val |= 0x200; 238 + nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 239 + } 271 240 } 272 241 273 242 val = nlm_read_reg(lnkbase, 0x1); /* CMD */ ··· 322 275 323 276 spin_lock_irqsave(&md->msi_lock, flags); 324 277 if (md->msi_alloc_mask == 0) { 325 - /* switch the link IRQ to MSI range */ 326 278 xlp_config_link_msi(lnkbase, lirq, msiaddr); 327 - irt = PIC_IRT_PCIE_LINK_INDEX(link); 279 + /* switch the link IRQ to MSI range */ 280 + if (cpu_is_xlp9xx()) 281 + irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link); 282 + else 283 + irt = PIC_IRT_PCIE_LINK_INDEX(link); 328 284 nlm_setup_pic_irq(node, lirq, lirq, irt); 329 285 nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq, 330 286 node * nlm_threads_per_node(), 1 /*en */); ··· 369 319 val |= 0x80000000U; 370 320 nlm_write_reg(lnkbase, 0x2C, val); 371 321 } 372 - val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 373 - if ((val & 0x200) == 0) { 374 - val |= 0x200; /* MSI Interrupt enable */ 375 - nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 322 + 323 + if (cpu_is_xlp9xx()) { 324 + val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); 325 + if ((val & 0x200) == 0) { 326 + val |= 0x200; /* MSI Interrupt enable */ 327 + nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); 328 + } 329 + } else { 330 + val = nlm_read_reg(lnkbase, PCIE_INT_EN0); 331 + if ((val & 0x200) == 0) { 332 + val |= 0x200; /* MSI Interrupt enable */ 333 + nlm_write_reg(lnkbase, PCIE_INT_EN0, val); 334 + } 376 335 } 377 336 378 337 val = nlm_read_reg(lnkbase, 0x1); /* CMD */ ··· 396 337 val |= (1 << 8) | lirq; 397 338 nlm_write_pci_reg(lnkbase, 0xf, val); 398 339 399 - /* MSI-X addresses */ 400 - nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, msixaddr >> 8); 401 - nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT, 402 - (msixaddr + MSI_ADDR_SZ) >> 8); 340 + if (cpu_is_xlp9xx()) { 341 + /* MSI-X addresses */ 342 + nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE, 343 + msixaddr >> 8); 344 + nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT, 345 + (msixaddr + MSI_ADDR_SZ) >> 8); 346 + } else { 347 + /* MSI-X addresses */ 348 + nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_BASE, 349 + msixaddr >> 8); 350 + nlm_write_reg(lnkbase, PCIE_BRIDGE_MSIX_ADDR_LIMIT, 351 + (msixaddr + MSI_ADDR_SZ) >> 8); 352 + } 403 353 } 404 354 405 355 /* ··· 445 377 446 378 xirq += t; 447 379 msixvec = nlm_irq_msixvec(xirq); 380 + 448 381 msg.address_hi = msixaddr >> 32; 449 382 msg.address_lo = msixaddr & 0xffffffff; 450 383 msg.data = 0xc00 | msixvec; ··· 486 417 { 487 418 struct nlm_soc_info *nodep; 488 419 struct xlp_msi_data *md; 489 - int irq, i, irt, msixvec; 420 + int irq, i, irt, msixvec, val; 490 421 491 422 pr_info("[%d %d] Init node PCI IRT\n", node, link); 492 423 nodep = nlm_get_node(node); ··· 507 438 irq_set_handler_data(i, md); 508 439 } 509 440 510 - for (i = 0; i < XLP_MSIXVEC_PER_LINK; i++) { 511 - /* Initialize MSI-X irts to generate one interrupt per link */ 512 - msixvec = link * XLP_MSIXVEC_PER_LINK + i; 513 - irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec); 514 - nlm_pic_init_irt(nodep->picbase, irt, PIC_PCIE_MSIX_IRQ(link), 515 - node * nlm_threads_per_node(), 1 /* enable */); 441 + for (i = 0; i < XLP_MSIXVEC_PER_LINK ; i++) { 442 + if (cpu_is_xlp9xx()) { 443 + val = ((node * nlm_threads_per_node()) << 7 | 444 + PIC_PCIE_MSIX_IRQ(link) << 1 | 0 << 0); 445 + nlm_write_pcie_reg(md->lnkbase, PCIE_9XX_MSIX_VECX(i + 446 + (link * XLP_MSIXVEC_PER_LINK)), val); 447 + } else { 448 + /* Initialize MSI-X irts to generate one interrupt 449 + * per link 450 + */ 451 + msixvec = link * XLP_MSIXVEC_PER_LINK + i; 452 + irt = PIC_IRT_PCIE_MSIX_INDEX(msixvec); 453 + nlm_pic_init_irt(nodep->picbase, irt, 454 + PIC_PCIE_MSIX_IRQ(link), 455 + node * nlm_threads_per_node(), 1); 456 + } 516 457 517 458 /* Initialize MSI-X extended irq space for the link */ 518 459 irq = nlm_irq_to_xirq(node, nlm_link_msixirq(link, i)); 519 460 irq_set_chip_and_handler(irq, &xlp_msix_chip, handle_level_irq); 520 461 irq_set_handler_data(irq, md); 521 462 } 522 - 523 463 } 524 464 525 465 void nlm_dispatch_msi(int node, int lirq) ··· 540 462 link = lirq - PIC_PCIE_LINK_MSI_IRQ_BASE; 541 463 irqbase = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0)); 542 464 md = irq_get_handler_data(irqbase); 543 - status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & 465 + if (cpu_is_xlp9xx()) 466 + status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSI_STATUS) & 467 + md->msi_enabled_mask; 468 + else 469 + status = nlm_read_reg(md->lnkbase, PCIE_MSI_STATUS) & 544 470 md->msi_enabled_mask; 545 471 while (status) { 546 472 i = __ffs(status); ··· 562 480 link = lirq - PIC_PCIE_MSIX_IRQ_BASE; 563 481 irqbase = nlm_irq_to_xirq(node, nlm_link_msixirq(link, 0)); 564 482 md = irq_get_handler_data(irqbase); 565 - status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); 483 + if (cpu_is_xlp9xx()) 484 + status = nlm_read_reg(md->lnkbase, PCIE_9XX_MSIX_STATUSX(link)); 485 + else 486 + status = nlm_read_reg(md->lnkbase, PCIE_MSIX_STATUS); 566 487 567 488 /* narrow it down to the MSI-x vectors for our link */ 568 - status = (status >> (link * XLP_MSIXVEC_PER_LINK)) & 489 + if (!cpu_is_xlp9xx()) 490 + status = (status >> (link * XLP_MSIXVEC_PER_LINK)) & 569 491 ((1 << XLP_MSIXVEC_PER_LINK) - 1); 570 492 571 493 while (status) {