Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.15-rc1 679 lines 18 kB view raw
1/* 2 * Copyright (C) 2015 Broadcom Corporation 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation version 2. 7 * 8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 9 * kind, whether express or implied; without even the implied warranty 10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14#include <linux/interrupt.h> 15#include <linux/irqchip/chained_irq.h> 16#include <linux/irqdomain.h> 17#include <linux/msi.h> 18#include <linux/of_irq.h> 19#include <linux/of_pci.h> 20#include <linux/pci.h> 21 22#include "pcie-iproc.h" 23 24#define IPROC_MSI_INTR_EN_SHIFT 11 25#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) 26#define IPROC_MSI_INT_N_EVENT_SHIFT 1 27#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) 28#define IPROC_MSI_EQ_EN_SHIFT 0 29#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) 30 31#define IPROC_MSI_EQ_MASK 0x3f 32 33/* Max number of GIC interrupts */ 34#define NR_HW_IRQS 6 35 36/* Number of entries in each event queue */ 37#define EQ_LEN 64 38 39/* Size of each event queue memory region */ 40#define EQ_MEM_REGION_SIZE SZ_4K 41 42/* Size of each MSI address region */ 43#define MSI_MEM_REGION_SIZE SZ_4K 44 45enum iproc_msi_reg { 46 IPROC_MSI_EQ_PAGE = 0, 47 IPROC_MSI_EQ_PAGE_UPPER, 48 IPROC_MSI_PAGE, 49 IPROC_MSI_PAGE_UPPER, 50 IPROC_MSI_CTRL, 51 IPROC_MSI_EQ_HEAD, 52 IPROC_MSI_EQ_TAIL, 53 IPROC_MSI_INTS_EN, 54 IPROC_MSI_REG_SIZE, 55}; 56 57struct iproc_msi; 58 59/** 60 * iProc MSI group 61 * 62 * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI 63 * event queue. 64 * 65 * @msi: pointer to iProc MSI data 66 * @gic_irq: GIC interrupt 67 * @eq: Event queue number 68 */ 69struct iproc_msi_grp { 70 struct iproc_msi *msi; 71 int gic_irq; 72 unsigned int eq; 73}; 74 75/** 76 * iProc event queue based MSI 77 * 78 * Only meant to be used on platforms without MSI support integrated into the 79 * GIC. 80 * 81 * @pcie: pointer to iProc PCIe data 82 * @reg_offsets: MSI register offsets 83 * @grps: MSI groups 84 * @nr_irqs: number of total interrupts connected to GIC 85 * @nr_cpus: number of toal CPUs 86 * @has_inten_reg: indicates the MSI interrupt enable register needs to be 87 * set explicitly (required for some legacy platforms) 88 * @bitmap: MSI vector bitmap 89 * @bitmap_lock: lock to protect access to the MSI bitmap 90 * @nr_msi_vecs: total number of MSI vectors 91 * @inner_domain: inner IRQ domain 92 * @msi_domain: MSI IRQ domain 93 * @nr_eq_region: required number of 4K aligned memory region for MSI event 94 * queues 95 * @nr_msi_region: required number of 4K aligned address region for MSI posted 96 * writes 97 * @eq_cpu: pointer to allocated memory region for MSI event queues 98 * @eq_dma: DMA address of MSI event queues 99 * @msi_addr: MSI address 100 */ 101struct iproc_msi { 102 struct iproc_pcie *pcie; 103 const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; 104 struct iproc_msi_grp *grps; 105 int nr_irqs; 106 int nr_cpus; 107 bool has_inten_reg; 108 unsigned long *bitmap; 109 struct mutex bitmap_lock; 110 unsigned int nr_msi_vecs; 111 struct irq_domain *inner_domain; 112 struct irq_domain *msi_domain; 113 unsigned int nr_eq_region; 114 unsigned int nr_msi_region; 115 void *eq_cpu; 116 dma_addr_t eq_dma; 117 phys_addr_t msi_addr; 118}; 119 120static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { 121 { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, 122 { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, 123 { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, 124 { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, 125 { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, 126 { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, 127}; 128 129static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { 130 { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, 131 { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, 132 { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, 133 { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, 134}; 135 136static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, 137 enum iproc_msi_reg reg, 138 unsigned int eq) 139{ 140 struct iproc_pcie *pcie = msi->pcie; 141 142 return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); 143} 144 145static inline void iproc_msi_write_reg(struct iproc_msi *msi, 146 enum iproc_msi_reg reg, 147 int eq, u32 val) 148{ 149 struct iproc_pcie *pcie = msi->pcie; 150 151 writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); 152} 153 154static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) 155{ 156 return (hwirq % msi->nr_irqs); 157} 158 159static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, 160 unsigned long hwirq) 161{ 162 if (msi->nr_msi_region > 1) 163 return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; 164 else 165 return hwirq_to_group(msi, hwirq) * sizeof(u32); 166} 167 168static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) 169{ 170 if (msi->nr_eq_region > 1) 171 return eq * EQ_MEM_REGION_SIZE; 172 else 173 return eq * EQ_LEN * sizeof(u32); 174} 175 176static struct irq_chip iproc_msi_irq_chip = { 177 .name = "iProc-MSI", 178}; 179 180static struct msi_domain_info iproc_msi_domain_info = { 181 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | 182 MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, 183 .chip = &iproc_msi_irq_chip, 184}; 185 186/* 187 * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a 188 * dedicated event queue. Each MSI group can support up to 64 MSI vectors. 189 * 190 * The number of MSI groups varies between different iProc SoCs. The total 191 * number of CPU cores also varies. To support MSI IRQ affinity, we 192 * distribute GIC interrupts across all available CPUs. MSI vector is moved 193 * from one GIC interrupt to another to steer to the target CPU. 194 * 195 * Assuming: 196 * - the number of MSI groups is M 197 * - the number of CPU cores is N 198 * - M is always a multiple of N 199 * 200 * Total number of raw MSI vectors = M * 64 201 * Total number of supported MSI vectors = (M * 64) / N 202 */ 203static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) 204{ 205 return (hwirq % msi->nr_cpus); 206} 207 208static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, 209 unsigned long hwirq) 210{ 211 return (hwirq - hwirq_to_cpu(msi, hwirq)); 212} 213 214static int iproc_msi_irq_set_affinity(struct irq_data *data, 215 const struct cpumask *mask, bool force) 216{ 217 struct iproc_msi *msi = irq_data_get_irq_chip_data(data); 218 int target_cpu = cpumask_first(mask); 219 int curr_cpu; 220 221 curr_cpu = hwirq_to_cpu(msi, data->hwirq); 222 if (curr_cpu == target_cpu) 223 return IRQ_SET_MASK_OK_DONE; 224 225 /* steer MSI to the target CPU */ 226 data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; 227 228 return IRQ_SET_MASK_OK; 229} 230 231static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, 232 struct msi_msg *msg) 233{ 234 struct iproc_msi *msi = irq_data_get_irq_chip_data(data); 235 dma_addr_t addr; 236 237 addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); 238 msg->address_lo = lower_32_bits(addr); 239 msg->address_hi = upper_32_bits(addr); 240 msg->data = data->hwirq << 5; 241} 242 243static struct irq_chip iproc_msi_bottom_irq_chip = { 244 .name = "MSI", 245 .irq_set_affinity = iproc_msi_irq_set_affinity, 246 .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, 247}; 248 249static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, 250 unsigned int virq, unsigned int nr_irqs, 251 void *args) 252{ 253 struct iproc_msi *msi = domain->host_data; 254 int hwirq, i; 255 256 mutex_lock(&msi->bitmap_lock); 257 258 /* Allocate 'nr_cpus' number of MSI vectors each time */ 259 hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, 260 msi->nr_cpus, 0); 261 if (hwirq < msi->nr_msi_vecs) { 262 bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); 263 } else { 264 mutex_unlock(&msi->bitmap_lock); 265 return -ENOSPC; 266 } 267 268 mutex_unlock(&msi->bitmap_lock); 269 270 for (i = 0; i < nr_irqs; i++) { 271 irq_domain_set_info(domain, virq + i, hwirq + i, 272 &iproc_msi_bottom_irq_chip, 273 domain->host_data, handle_simple_irq, 274 NULL, NULL); 275 } 276 277 return hwirq; 278} 279 280static void iproc_msi_irq_domain_free(struct irq_domain *domain, 281 unsigned int virq, unsigned int nr_irqs) 282{ 283 struct irq_data *data = irq_domain_get_irq_data(domain, virq); 284 struct iproc_msi *msi = irq_data_get_irq_chip_data(data); 285 unsigned int hwirq; 286 287 mutex_lock(&msi->bitmap_lock); 288 289 hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); 290 bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); 291 292 mutex_unlock(&msi->bitmap_lock); 293 294 irq_domain_free_irqs_parent(domain, virq, nr_irqs); 295} 296 297static const struct irq_domain_ops msi_domain_ops = { 298 .alloc = iproc_msi_irq_domain_alloc, 299 .free = iproc_msi_irq_domain_free, 300}; 301 302static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) 303{ 304 u32 *msg, hwirq; 305 unsigned int offs; 306 307 offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); 308 msg = (u32 *)(msi->eq_cpu + offs); 309 hwirq = readl(msg); 310 hwirq = (hwirq >> 5) + (hwirq & 0x1f); 311 312 /* 313 * Since we have multiple hwirq mapped to a single MSI vector, 314 * now we need to derive the hwirq at CPU0. It can then be used to 315 * mapped back to virq. 316 */ 317 return hwirq_to_canonical_hwirq(msi, hwirq); 318} 319 320static void iproc_msi_handler(struct irq_desc *desc) 321{ 322 struct irq_chip *chip = irq_desc_get_chip(desc); 323 struct iproc_msi_grp *grp; 324 struct iproc_msi *msi; 325 u32 eq, head, tail, nr_events; 326 unsigned long hwirq; 327 int virq; 328 329 chained_irq_enter(chip, desc); 330 331 grp = irq_desc_get_handler_data(desc); 332 msi = grp->msi; 333 eq = grp->eq; 334 335 /* 336 * iProc MSI event queue is tracked by head and tail pointers. Head 337 * pointer indicates the next entry (MSI data) to be consumed by SW in 338 * the queue and needs to be updated by SW. iProc MSI core uses the 339 * tail pointer as the next data insertion point. 340 * 341 * Entries between head and tail pointers contain valid MSI data. MSI 342 * data is guaranteed to be in the event queue memory before the tail 343 * pointer is updated by the iProc MSI core. 344 */ 345 head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, 346 eq) & IPROC_MSI_EQ_MASK; 347 do { 348 tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, 349 eq) & IPROC_MSI_EQ_MASK; 350 351 /* 352 * Figure out total number of events (MSI data) to be 353 * processed. 354 */ 355 nr_events = (tail < head) ? 356 (EQ_LEN - (head - tail)) : (tail - head); 357 if (!nr_events) 358 break; 359 360 /* process all outstanding events */ 361 while (nr_events--) { 362 hwirq = decode_msi_hwirq(msi, eq, head); 363 virq = irq_find_mapping(msi->inner_domain, hwirq); 364 generic_handle_irq(virq); 365 366 head++; 367 head %= EQ_LEN; 368 } 369 370 /* 371 * Now all outstanding events have been processed. Update the 372 * head pointer. 373 */ 374 iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); 375 376 /* 377 * Now go read the tail pointer again to see if there are new 378 * oustanding events that came in during the above window. 379 */ 380 } while (true); 381 382 chained_irq_exit(chip, desc); 383} 384 385static void iproc_msi_enable(struct iproc_msi *msi) 386{ 387 int i, eq; 388 u32 val; 389 390 /* Program memory region for each event queue */ 391 for (i = 0; i < msi->nr_eq_region; i++) { 392 dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); 393 394 iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, 395 lower_32_bits(addr)); 396 iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, 397 upper_32_bits(addr)); 398 } 399 400 /* Program address region for MSI posted writes */ 401 for (i = 0; i < msi->nr_msi_region; i++) { 402 phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); 403 404 iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, 405 lower_32_bits(addr)); 406 iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, 407 upper_32_bits(addr)); 408 } 409 410 for (eq = 0; eq < msi->nr_irqs; eq++) { 411 /* Enable MSI event queue */ 412 val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | 413 IPROC_MSI_EQ_EN; 414 iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); 415 416 /* 417 * Some legacy platforms require the MSI interrupt enable 418 * register to be set explicitly. 419 */ 420 if (msi->has_inten_reg) { 421 val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); 422 val |= BIT(eq); 423 iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); 424 } 425 } 426} 427 428static void iproc_msi_disable(struct iproc_msi *msi) 429{ 430 u32 eq, val; 431 432 for (eq = 0; eq < msi->nr_irqs; eq++) { 433 if (msi->has_inten_reg) { 434 val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); 435 val &= ~BIT(eq); 436 iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); 437 } 438 439 val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); 440 val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | 441 IPROC_MSI_EQ_EN); 442 iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); 443 } 444} 445 446static int iproc_msi_alloc_domains(struct device_node *node, 447 struct iproc_msi *msi) 448{ 449 msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, 450 &msi_domain_ops, msi); 451 if (!msi->inner_domain) 452 return -ENOMEM; 453 454 msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), 455 &iproc_msi_domain_info, 456 msi->inner_domain); 457 if (!msi->msi_domain) { 458 irq_domain_remove(msi->inner_domain); 459 return -ENOMEM; 460 } 461 462 return 0; 463} 464 465static void iproc_msi_free_domains(struct iproc_msi *msi) 466{ 467 if (msi->msi_domain) 468 irq_domain_remove(msi->msi_domain); 469 470 if (msi->inner_domain) 471 irq_domain_remove(msi->inner_domain); 472} 473 474static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) 475{ 476 int i; 477 478 for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { 479 irq_set_chained_handler_and_data(msi->grps[i].gic_irq, 480 NULL, NULL); 481 } 482} 483 484static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) 485{ 486 int i, ret; 487 cpumask_var_t mask; 488 struct iproc_pcie *pcie = msi->pcie; 489 490 for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { 491 irq_set_chained_handler_and_data(msi->grps[i].gic_irq, 492 iproc_msi_handler, 493 &msi->grps[i]); 494 /* Dedicate GIC interrupt to each CPU core */ 495 if (alloc_cpumask_var(&mask, GFP_KERNEL)) { 496 cpumask_clear(mask); 497 cpumask_set_cpu(cpu, mask); 498 ret = irq_set_affinity(msi->grps[i].gic_irq, mask); 499 if (ret) 500 dev_err(pcie->dev, 501 "failed to set affinity for IRQ%d\n", 502 msi->grps[i].gic_irq); 503 free_cpumask_var(mask); 504 } else { 505 dev_err(pcie->dev, "failed to alloc CPU mask\n"); 506 ret = -EINVAL; 507 } 508 509 if (ret) { 510 /* Free all configured/unconfigured IRQs */ 511 iproc_msi_irq_free(msi, cpu); 512 return ret; 513 } 514 } 515 516 return 0; 517} 518 519int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) 520{ 521 struct iproc_msi *msi; 522 int i, ret; 523 unsigned int cpu; 524 525 if (!of_device_is_compatible(node, "brcm,iproc-msi")) 526 return -ENODEV; 527 528 if (!of_find_property(node, "msi-controller", NULL)) 529 return -ENODEV; 530 531 if (pcie->msi) 532 return -EBUSY; 533 534 msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); 535 if (!msi) 536 return -ENOMEM; 537 538 msi->pcie = pcie; 539 pcie->msi = msi; 540 msi->msi_addr = pcie->base_addr; 541 mutex_init(&msi->bitmap_lock); 542 msi->nr_cpus = num_possible_cpus(); 543 544 msi->nr_irqs = of_irq_count(node); 545 if (!msi->nr_irqs) { 546 dev_err(pcie->dev, "found no MSI GIC interrupt\n"); 547 return -ENODEV; 548 } 549 550 if (msi->nr_irqs > NR_HW_IRQS) { 551 dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", 552 msi->nr_irqs); 553 msi->nr_irqs = NR_HW_IRQS; 554 } 555 556 if (msi->nr_irqs < msi->nr_cpus) { 557 dev_err(pcie->dev, 558 "not enough GIC interrupts for MSI affinity\n"); 559 return -EINVAL; 560 } 561 562 if (msi->nr_irqs % msi->nr_cpus != 0) { 563 msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; 564 dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", 565 msi->nr_irqs); 566 } 567 568 switch (pcie->type) { 569 case IPROC_PCIE_PAXB_BCMA: 570 case IPROC_PCIE_PAXB: 571 msi->reg_offsets = iproc_msi_reg_paxb; 572 msi->nr_eq_region = 1; 573 msi->nr_msi_region = 1; 574 break; 575 case IPROC_PCIE_PAXC: 576 msi->reg_offsets = iproc_msi_reg_paxc; 577 msi->nr_eq_region = msi->nr_irqs; 578 msi->nr_msi_region = msi->nr_irqs; 579 break; 580 default: 581 dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); 582 return -EINVAL; 583 } 584 585 if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) 586 msi->has_inten_reg = true; 587 588 msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; 589 msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), 590 sizeof(*msi->bitmap), GFP_KERNEL); 591 if (!msi->bitmap) 592 return -ENOMEM; 593 594 msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), 595 GFP_KERNEL); 596 if (!msi->grps) 597 return -ENOMEM; 598 599 for (i = 0; i < msi->nr_irqs; i++) { 600 unsigned int irq = irq_of_parse_and_map(node, i); 601 602 if (!irq) { 603 dev_err(pcie->dev, "unable to parse/map interrupt\n"); 604 ret = -ENODEV; 605 goto free_irqs; 606 } 607 msi->grps[i].gic_irq = irq; 608 msi->grps[i].msi = msi; 609 msi->grps[i].eq = i; 610 } 611 612 /* Reserve memory for event queue and make sure memories are zeroed */ 613 msi->eq_cpu = dma_zalloc_coherent(pcie->dev, 614 msi->nr_eq_region * EQ_MEM_REGION_SIZE, 615 &msi->eq_dma, GFP_KERNEL); 616 if (!msi->eq_cpu) { 617 ret = -ENOMEM; 618 goto free_irqs; 619 } 620 621 ret = iproc_msi_alloc_domains(node, msi); 622 if (ret) { 623 dev_err(pcie->dev, "failed to create MSI domains\n"); 624 goto free_eq_dma; 625 } 626 627 for_each_online_cpu(cpu) { 628 ret = iproc_msi_irq_setup(msi, cpu); 629 if (ret) 630 goto free_msi_irq; 631 } 632 633 iproc_msi_enable(msi); 634 635 return 0; 636 637free_msi_irq: 638 for_each_online_cpu(cpu) 639 iproc_msi_irq_free(msi, cpu); 640 iproc_msi_free_domains(msi); 641 642free_eq_dma: 643 dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, 644 msi->eq_cpu, msi->eq_dma); 645 646free_irqs: 647 for (i = 0; i < msi->nr_irqs; i++) { 648 if (msi->grps[i].gic_irq) 649 irq_dispose_mapping(msi->grps[i].gic_irq); 650 } 651 pcie->msi = NULL; 652 return ret; 653} 654EXPORT_SYMBOL(iproc_msi_init); 655 656void iproc_msi_exit(struct iproc_pcie *pcie) 657{ 658 struct iproc_msi *msi = pcie->msi; 659 unsigned int i, cpu; 660 661 if (!msi) 662 return; 663 664 iproc_msi_disable(msi); 665 666 for_each_online_cpu(cpu) 667 iproc_msi_irq_free(msi, cpu); 668 669 iproc_msi_free_domains(msi); 670 671 dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, 672 msi->eq_cpu, msi->eq_dma); 673 674 for (i = 0; i < msi->nr_irqs; i++) { 675 if (msi->grps[i].gic_irq) 676 irq_dispose_mapping(msi->grps[i].gic_irq); 677 } 678} 679EXPORT_SYMBOL(iproc_msi_exit);