Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.30 721 lines 19 kB view raw
1/* 2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include <linux/init.h> 35#include <linux/interrupt.h> 36#include <linux/mm.h> 37#include <linux/dma-mapping.h> 38 39#include <linux/mlx4/cmd.h> 40 41#include "mlx4.h" 42#include "fw.h" 43 44enum { 45 MLX4_NUM_ASYNC_EQE = 0x100, 46 MLX4_NUM_SPARE_EQE = 0x80, 47 MLX4_EQ_ENTRY_SIZE = 0x20 48}; 49 50/* 51 * Must be packed because start is 64 bits but only aligned to 32 bits. 52 */ 53struct mlx4_eq_context { 54 __be32 flags; 55 u16 reserved1[3]; 56 __be16 page_offset; 57 u8 log_eq_size; 58 u8 reserved2[4]; 59 u8 eq_period; 60 u8 reserved3; 61 u8 eq_max_count; 62 u8 reserved4[3]; 63 u8 intr; 64 u8 log_page_size; 65 u8 reserved5[2]; 66 u8 mtt_base_addr_h; 67 __be32 mtt_base_addr_l; 68 u32 reserved6[2]; 69 __be32 consumer_index; 70 __be32 producer_index; 71 u32 reserved7[4]; 72}; 73 74#define MLX4_EQ_STATUS_OK ( 0 << 28) 75#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 76#define MLX4_EQ_OWNER_SW ( 0 << 24) 77#define MLX4_EQ_OWNER_HW ( 1 << 24) 78#define MLX4_EQ_FLAG_EC ( 1 << 18) 79#define MLX4_EQ_FLAG_OI ( 1 << 17) 80#define MLX4_EQ_STATE_ARMED ( 9 << 8) 81#define MLX4_EQ_STATE_FIRED (10 << 8) 82#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 83 84#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 85 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 86 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 87 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 88 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 89 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 90 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 91 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 92 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 93 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 94 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 95 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 96 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 97 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 98 (1ull << MLX4_EVENT_TYPE_CMD)) 99 100struct mlx4_eqe { 101 u8 reserved1; 102 u8 type; 103 u8 reserved2; 104 u8 subtype; 105 union { 106 u32 raw[6]; 107 struct { 108 __be32 cqn; 109 } __attribute__((packed)) comp; 110 struct { 111 u16 reserved1; 112 __be16 token; 113 u32 reserved2; 114 u8 reserved3[3]; 115 u8 status; 116 __be64 out_param; 117 } __attribute__((packed)) cmd; 118 struct { 119 __be32 qpn; 120 } __attribute__((packed)) qp; 121 struct { 122 __be32 srqn; 123 } __attribute__((packed)) srq; 124 struct { 125 __be32 cqn; 126 u32 reserved1; 127 u8 reserved2[3]; 128 u8 syndrome; 129 } __attribute__((packed)) cq_err; 130 struct { 131 u32 reserved1[2]; 132 __be32 port; 133 } __attribute__((packed)) port_change; 134 } event; 135 u8 reserved3[3]; 136 u8 owner; 137} __attribute__((packed)); 138 139static void eq_set_ci(struct mlx4_eq *eq, int req_not) 140{ 141 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 142 req_not << 31), 143 eq->doorbell); 144 /* We still want ordering, just not swabbing, so add a barrier */ 145 mb(); 146} 147 148static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) 149{ 150 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; 151 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; 152} 153 154static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) 155{ 156 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); 157 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 158} 159 160static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 161{ 162 struct mlx4_eqe *eqe; 163 int cqn; 164 int eqes_found = 0; 165 int set_ci = 0; 166 int port; 167 168 while ((eqe = next_eqe_sw(eq))) { 169 /* 170 * Make sure we read EQ entry contents after we've 171 * checked the ownership bit. 172 */ 173 rmb(); 174 175 switch (eqe->type) { 176 case MLX4_EVENT_TYPE_COMP: 177 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 178 mlx4_cq_completion(dev, cqn); 179 break; 180 181 case MLX4_EVENT_TYPE_PATH_MIG: 182 case MLX4_EVENT_TYPE_COMM_EST: 183 case MLX4_EVENT_TYPE_SQ_DRAINED: 184 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 185 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 186 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 187 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 188 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 189 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 190 eqe->type); 191 break; 192 193 case MLX4_EVENT_TYPE_SRQ_LIMIT: 194 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 195 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, 196 eqe->type); 197 break; 198 199 case MLX4_EVENT_TYPE_CMD: 200 mlx4_cmd_event(dev, 201 be16_to_cpu(eqe->event.cmd.token), 202 eqe->event.cmd.status, 203 be64_to_cpu(eqe->event.cmd.out_param)); 204 break; 205 206 case MLX4_EVENT_TYPE_PORT_CHANGE: 207 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 208 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 209 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 210 port); 211 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 212 } else { 213 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, 214 port); 215 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 216 } 217 break; 218 219 case MLX4_EVENT_TYPE_CQ_ERROR: 220 mlx4_warn(dev, "CQ %s on CQN %06x\n", 221 eqe->event.cq_err.syndrome == 1 ? 222 "overrun" : "access violation", 223 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 224 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), 225 eqe->type); 226 break; 227 228 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 229 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 230 break; 231 232 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 233 case MLX4_EVENT_TYPE_ECC_DETECT: 234 default: 235 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", 236 eqe->type, eqe->subtype, eq->eqn, eq->cons_index); 237 break; 238 }; 239 240 ++eq->cons_index; 241 eqes_found = 1; 242 ++set_ci; 243 244 /* 245 * The HCA will think the queue has overflowed if we 246 * don't tell it we've been processing events. We 247 * create our EQs with MLX4_NUM_SPARE_EQE extra 248 * entries, so we must update our consumer index at 249 * least that often. 250 */ 251 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 252 eq_set_ci(eq, 0); 253 set_ci = 0; 254 } 255 } 256 257 eq_set_ci(eq, 1); 258 259 return eqes_found; 260} 261 262static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 263{ 264 struct mlx4_dev *dev = dev_ptr; 265 struct mlx4_priv *priv = mlx4_priv(dev); 266 int work = 0; 267 int i; 268 269 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 270 271 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 272 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 273 274 return IRQ_RETVAL(work); 275} 276 277static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 278{ 279 struct mlx4_eq *eq = eq_ptr; 280 struct mlx4_dev *dev = eq->dev; 281 282 mlx4_eq_int(dev, eq); 283 284 /* MSI-X vectors always belong to us */ 285 return IRQ_HANDLED; 286} 287 288static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 289 int eq_num) 290{ 291 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 292 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); 293} 294 295static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 296 int eq_num) 297{ 298 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, 299 MLX4_CMD_TIME_CLASS_A); 300} 301 302static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 303 int eq_num) 304{ 305 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, 306 MLX4_CMD_TIME_CLASS_A); 307} 308 309static int mlx4_num_eq_uar(struct mlx4_dev *dev) 310{ 311 /* 312 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 313 * we need to map, take the difference of highest index and 314 * the lowest index we'll use and add 1. 315 */ 316 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 317 dev->caps.reserved_eqs / 4 + 1; 318} 319 320static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 321{ 322 struct mlx4_priv *priv = mlx4_priv(dev); 323 int index; 324 325 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 326 327 if (!priv->eq_table.uar_map[index]) { 328 priv->eq_table.uar_map[index] = 329 ioremap(pci_resource_start(dev->pdev, 2) + 330 ((eq->eqn / 4) << PAGE_SHIFT), 331 PAGE_SIZE); 332 if (!priv->eq_table.uar_map[index]) { 333 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 334 eq->eqn); 335 return NULL; 336 } 337 } 338 339 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 340} 341 342static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 343 u8 intr, struct mlx4_eq *eq) 344{ 345 struct mlx4_priv *priv = mlx4_priv(dev); 346 struct mlx4_cmd_mailbox *mailbox; 347 struct mlx4_eq_context *eq_context; 348 int npages; 349 u64 *dma_list = NULL; 350 dma_addr_t t; 351 u64 mtt_addr; 352 int err = -ENOMEM; 353 int i; 354 355 eq->dev = dev; 356 eq->nent = roundup_pow_of_two(max(nent, 2)); 357 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; 358 359 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 360 GFP_KERNEL); 361 if (!eq->page_list) 362 goto err_out; 363 364 for (i = 0; i < npages; ++i) 365 eq->page_list[i].buf = NULL; 366 367 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 368 if (!dma_list) 369 goto err_out_free; 370 371 mailbox = mlx4_alloc_cmd_mailbox(dev); 372 if (IS_ERR(mailbox)) 373 goto err_out_free; 374 eq_context = mailbox->buf; 375 376 for (i = 0; i < npages; ++i) { 377 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 378 PAGE_SIZE, &t, GFP_KERNEL); 379 if (!eq->page_list[i].buf) 380 goto err_out_free_pages; 381 382 dma_list[i] = t; 383 eq->page_list[i].map = t; 384 385 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 386 } 387 388 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 389 if (eq->eqn == -1) 390 goto err_out_free_pages; 391 392 eq->doorbell = mlx4_get_eq_uar(dev, eq); 393 if (!eq->doorbell) { 394 err = -ENOMEM; 395 goto err_out_free_eq; 396 } 397 398 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 399 if (err) 400 goto err_out_free_eq; 401 402 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 403 if (err) 404 goto err_out_free_mtt; 405 406 memset(eq_context, 0, sizeof *eq_context); 407 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 408 MLX4_EQ_STATE_ARMED); 409 eq_context->log_eq_size = ilog2(eq->nent); 410 eq_context->intr = intr; 411 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 412 413 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 414 eq_context->mtt_base_addr_h = mtt_addr >> 32; 415 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 416 417 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 418 if (err) { 419 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 420 goto err_out_free_mtt; 421 } 422 423 kfree(dma_list); 424 mlx4_free_cmd_mailbox(dev, mailbox); 425 426 eq->cons_index = 0; 427 428 return err; 429 430err_out_free_mtt: 431 mlx4_mtt_cleanup(dev, &eq->mtt); 432 433err_out_free_eq: 434 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 435 436err_out_free_pages: 437 for (i = 0; i < npages; ++i) 438 if (eq->page_list[i].buf) 439 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, 440 eq->page_list[i].buf, 441 eq->page_list[i].map); 442 443 mlx4_free_cmd_mailbox(dev, mailbox); 444 445err_out_free: 446 kfree(eq->page_list); 447 kfree(dma_list); 448 449err_out: 450 return err; 451} 452 453static void mlx4_free_eq(struct mlx4_dev *dev, 454 struct mlx4_eq *eq) 455{ 456 struct mlx4_priv *priv = mlx4_priv(dev); 457 struct mlx4_cmd_mailbox *mailbox; 458 int err; 459 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; 460 int i; 461 462 mailbox = mlx4_alloc_cmd_mailbox(dev); 463 if (IS_ERR(mailbox)) 464 return; 465 466 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); 467 if (err) 468 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 469 470 if (0) { 471 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); 472 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { 473 if (i % 4 == 0) 474 printk("[%02x] ", i * 4); 475 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 476 if ((i + 1) % 4 == 0) 477 printk("\n"); 478 } 479 } 480 481 mlx4_mtt_cleanup(dev, &eq->mtt); 482 for (i = 0; i < npages; ++i) 483 pci_free_consistent(dev->pdev, PAGE_SIZE, 484 eq->page_list[i].buf, 485 eq->page_list[i].map); 486 487 kfree(eq->page_list); 488 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); 489 mlx4_free_cmd_mailbox(dev, mailbox); 490} 491 492static void mlx4_free_irqs(struct mlx4_dev *dev) 493{ 494 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 495 int i; 496 497 if (eq_table->have_irq) 498 free_irq(dev->pdev->irq, dev); 499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 500 if (eq_table->eq[i].have_irq) 501 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 502 503 kfree(eq_table->irq_names); 504} 505 506static int mlx4_map_clr_int(struct mlx4_dev *dev) 507{ 508 struct mlx4_priv *priv = mlx4_priv(dev); 509 510 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + 511 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 512 if (!priv->clr_base) { 513 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); 514 return -ENOMEM; 515 } 516 517 return 0; 518} 519 520static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 521{ 522 struct mlx4_priv *priv = mlx4_priv(dev); 523 524 iounmap(priv->clr_base); 525} 526 527int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) 528{ 529 struct mlx4_priv *priv = mlx4_priv(dev); 530 int ret; 531 532 /* 533 * We assume that mapping one page is enough for the whole EQ 534 * context table. This is fine with all current HCAs, because 535 * we only use 32 EQs and each EQ uses 64 bytes of context 536 * memory, or 1 KB total. 537 */ 538 priv->eq_table.icm_virt = icm_virt; 539 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); 540 if (!priv->eq_table.icm_page) 541 return -ENOMEM; 542 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, 543 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 544 if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { 545 __free_page(priv->eq_table.icm_page); 546 return -ENOMEM; 547 } 548 549 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); 550 if (ret) { 551 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, 552 PCI_DMA_BIDIRECTIONAL); 553 __free_page(priv->eq_table.icm_page); 554 } 555 556 return ret; 557} 558 559void mlx4_unmap_eq_icm(struct mlx4_dev *dev) 560{ 561 struct mlx4_priv *priv = mlx4_priv(dev); 562 563 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); 564 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, 565 PCI_DMA_BIDIRECTIONAL); 566 __free_page(priv->eq_table.icm_page); 567} 568 569int mlx4_alloc_eq_table(struct mlx4_dev *dev) 570{ 571 struct mlx4_priv *priv = mlx4_priv(dev); 572 573 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 574 sizeof *priv->eq_table.eq, GFP_KERNEL); 575 if (!priv->eq_table.eq) 576 return -ENOMEM; 577 578 return 0; 579} 580 581void mlx4_free_eq_table(struct mlx4_dev *dev) 582{ 583 kfree(mlx4_priv(dev)->eq_table.eq); 584} 585 586int mlx4_init_eq_table(struct mlx4_dev *dev) 587{ 588 struct mlx4_priv *priv = mlx4_priv(dev); 589 int err; 590 int i; 591 592 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, 593 mlx4_num_eq_uar(dev), GFP_KERNEL); 594 if (!priv->eq_table.uar_map) { 595 err = -ENOMEM; 596 goto err_out_free; 597 } 598 599 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 600 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 601 if (err) 602 goto err_out_free; 603 604 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 605 priv->eq_table.uar_map[i] = NULL; 606 607 err = mlx4_map_clr_int(dev); 608 if (err) 609 goto err_out_bitmap; 610 611 priv->eq_table.clr_mask = 612 swab32(1 << (priv->eq_table.inta_pin & 31)); 613 priv->eq_table.clr_int = priv->clr_base + 614 (priv->eq_table.inta_pin < 32 ? 4 : 0); 615 616 priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); 617 if (!priv->eq_table.irq_names) { 618 err = -ENOMEM; 619 goto err_out_bitmap; 620 } 621 622 for (i = 0; i < dev->caps.num_comp_vectors; ++i) { 623 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 624 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, 625 &priv->eq_table.eq[i]); 626 if (err) 627 goto err_out_unmap; 628 } 629 630 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 631 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, 632 &priv->eq_table.eq[dev->caps.num_comp_vectors]); 633 if (err) 634 goto err_out_comp; 635 636 if (dev->flags & MLX4_FLAG_MSI_X) { 637 static const char async_eq_name[] = "mlx4-async"; 638 const char *eq_name; 639 640 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 641 if (i < dev->caps.num_comp_vectors) { 642 snprintf(priv->eq_table.irq_names + i * 16, 16, 643 "mlx4-comp-%d", i); 644 eq_name = priv->eq_table.irq_names + i * 16; 645 } else 646 eq_name = async_eq_name; 647 648 err = request_irq(priv->eq_table.eq[i].irq, 649 mlx4_msi_x_interrupt, 0, eq_name, 650 priv->eq_table.eq + i); 651 if (err) 652 goto err_out_async; 653 654 priv->eq_table.eq[i].have_irq = 1; 655 } 656 } else { 657 err = request_irq(dev->pdev->irq, mlx4_interrupt, 658 IRQF_SHARED, DRV_NAME, dev); 659 if (err) 660 goto err_out_async; 661 662 priv->eq_table.have_irq = 1; 663 } 664 665 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 666 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 667 if (err) 668 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 669 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); 670 671 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 672 eq_set_ci(&priv->eq_table.eq[i], 1); 673 674 return 0; 675 676err_out_async: 677 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); 678 679err_out_comp: 680 i = dev->caps.num_comp_vectors - 1; 681 682err_out_unmap: 683 while (i >= 0) { 684 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 685 --i; 686 } 687 mlx4_unmap_clr_int(dev); 688 mlx4_free_irqs(dev); 689 690err_out_bitmap: 691 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 692 693err_out_free: 694 kfree(priv->eq_table.uar_map); 695 696 return err; 697} 698 699void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 700{ 701 struct mlx4_priv *priv = mlx4_priv(dev); 702 int i; 703 704 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, 705 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); 706 707 mlx4_free_irqs(dev); 708 709 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 710 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 711 712 mlx4_unmap_clr_int(dev); 713 714 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 715 if (priv->eq_table.uar_map[i]) 716 iounmap(priv->eq_table.uar_map[i]); 717 718 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 719 720 kfree(priv->eq_table.uar_map); 721}