Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25-rc2 970 lines 25 kB view raw
1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/module.h> 37#include <linux/init.h> 38#include <linux/errno.h> 39#include <linux/pci.h> 40#include <linux/dma-mapping.h> 41 42#include <linux/mlx4/device.h> 43#include <linux/mlx4/doorbell.h> 44 45#include "mlx4.h" 46#include "fw.h" 47#include "icm.h" 48 49MODULE_AUTHOR("Roland Dreier"); 50MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 51MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_VERSION(DRV_VERSION); 53 54#ifdef CONFIG_MLX4_DEBUG 55 56int mlx4_debug_level = 0; 57module_param_named(debug_level, mlx4_debug_level, int, 0644); 58MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 59 60#endif /* CONFIG_MLX4_DEBUG */ 61 62#ifdef CONFIG_PCI_MSI 63 64static int msi_x = 1; 65module_param(msi_x, int, 0444); 66MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 67 68#else /* CONFIG_PCI_MSI */ 69 70#define msi_x (0) 71 72#endif /* CONFIG_PCI_MSI */ 73 74static char mlx4_version[] __devinitdata = 75 DRV_NAME ": Mellanox ConnectX core driver v" 76 DRV_VERSION " (" DRV_RELDATE ")\n"; 77 78static struct mlx4_profile default_profile = { 79 .num_qp = 1 << 16, 80 .num_srq = 1 << 16, 81 .rdmarc_per_qp = 1 << 4, 82 .num_cq = 1 << 16, 83 .num_mcg = 1 << 13, 84 .num_mpt = 1 << 17, 85 .num_mtt = 1 << 20, 86}; 87 88static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 89{ 90 int err; 91 int i; 92 93 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 94 if (err) { 95 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 96 return err; 97 } 98 99 if (dev_cap->min_page_sz > PAGE_SIZE) { 100 mlx4_err(dev, "HCA minimum page size of %d bigger than " 101 "kernel PAGE_SIZE of %ld, aborting.\n", 102 dev_cap->min_page_sz, PAGE_SIZE); 103 return -ENODEV; 104 } 105 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 106 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 107 "aborting.\n", 108 dev_cap->num_ports, MLX4_MAX_PORTS); 109 return -ENODEV; 110 } 111 112 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 113 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 114 "PCI resource 2 size of 0x%llx, aborting.\n", 115 dev_cap->uar_size, 116 (unsigned long long) pci_resource_len(dev->pdev, 2)); 117 return -ENODEV; 118 } 119 120 dev->caps.num_ports = dev_cap->num_ports; 121 for (i = 1; i <= dev->caps.num_ports; ++i) { 122 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 123 dev->caps.mtu_cap[i] = dev_cap->max_mtu[i]; 124 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 125 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 126 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 127 } 128 129 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 130 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 131 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 132 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 133 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 134 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 135 dev->caps.max_wqes = dev_cap->max_qp_sz; 136 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 137 dev->caps.reserved_qps = dev_cap->reserved_qps; 138 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 139 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 140 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 141 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 142 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 143 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; 144 /* 145 * Subtract 1 from the limit because we need to allocate a 146 * spare CQE so the HCA HW can tell the difference between an 147 * empty CQ and a full CQ. 148 */ 149 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 150 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 151 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 152 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 153 MLX4_MTT_ENTRY_PER_SEG); 154 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 155 dev->caps.reserved_uars = dev_cap->reserved_uars; 156 dev->caps.reserved_pds = dev_cap->reserved_pds; 157 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 158 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 159 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 160 dev->caps.flags = dev_cap->flags; 161 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 162 163 return 0; 164} 165 166static int mlx4_load_fw(struct mlx4_dev *dev) 167{ 168 struct mlx4_priv *priv = mlx4_priv(dev); 169 int err; 170 171 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 172 GFP_HIGHUSER | __GFP_NOWARN, 0); 173 if (!priv->fw.fw_icm) { 174 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 175 return -ENOMEM; 176 } 177 178 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 179 if (err) { 180 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 181 goto err_free; 182 } 183 184 err = mlx4_RUN_FW(dev); 185 if (err) { 186 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 187 goto err_unmap_fa; 188 } 189 190 return 0; 191 192err_unmap_fa: 193 mlx4_UNMAP_FA(dev); 194 195err_free: 196 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 197 return err; 198} 199 200static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 201 int cmpt_entry_sz) 202{ 203 struct mlx4_priv *priv = mlx4_priv(dev); 204 int err; 205 206 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 207 cmpt_base + 208 ((u64) (MLX4_CMPT_TYPE_QP * 209 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 210 cmpt_entry_sz, dev->caps.num_qps, 211 dev->caps.reserved_qps, 0, 0); 212 if (err) 213 goto err; 214 215 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 216 cmpt_base + 217 ((u64) (MLX4_CMPT_TYPE_SRQ * 218 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 219 cmpt_entry_sz, dev->caps.num_srqs, 220 dev->caps.reserved_srqs, 0, 0); 221 if (err) 222 goto err_qp; 223 224 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 225 cmpt_base + 226 ((u64) (MLX4_CMPT_TYPE_CQ * 227 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 228 cmpt_entry_sz, dev->caps.num_cqs, 229 dev->caps.reserved_cqs, 0, 0); 230 if (err) 231 goto err_srq; 232 233 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 234 cmpt_base + 235 ((u64) (MLX4_CMPT_TYPE_EQ * 236 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 237 cmpt_entry_sz, 238 roundup_pow_of_two(MLX4_NUM_EQ + 239 dev->caps.reserved_eqs), 240 MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); 241 if (err) 242 goto err_cq; 243 244 return 0; 245 246err_cq: 247 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 248 249err_srq: 250 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 251 252err_qp: 253 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 254 255err: 256 return err; 257} 258 259static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 260 struct mlx4_init_hca_param *init_hca, u64 icm_size) 261{ 262 struct mlx4_priv *priv = mlx4_priv(dev); 263 u64 aux_pages; 264 int err; 265 266 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 267 if (err) { 268 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 269 return err; 270 } 271 272 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 273 (unsigned long long) icm_size >> 10, 274 (unsigned long long) aux_pages << 2); 275 276 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 277 GFP_HIGHUSER | __GFP_NOWARN, 0); 278 if (!priv->fw.aux_icm) { 279 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 280 return -ENOMEM; 281 } 282 283 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 284 if (err) { 285 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 286 goto err_free_aux; 287 } 288 289 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 290 if (err) { 291 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 292 goto err_unmap_aux; 293 } 294 295 err = mlx4_map_eq_icm(dev, init_hca->eqc_base); 296 if (err) { 297 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 298 goto err_unmap_cmpt; 299 } 300 301 /* 302 * Reserved MTT entries must be aligned up to a cacheline 303 * boundary, since the FW will write to them, while the driver 304 * writes to all other MTT entries. (The variable 305 * dev->caps.mtt_entry_sz below is really the MTT segment 306 * size, not the raw entry size) 307 */ 308 dev->caps.reserved_mtts = 309 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 310 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 311 312 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 313 init_hca->mtt_base, 314 dev->caps.mtt_entry_sz, 315 dev->caps.num_mtt_segs, 316 dev->caps.reserved_mtts, 1, 0); 317 if (err) { 318 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 319 goto err_unmap_eq; 320 } 321 322 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 323 init_hca->dmpt_base, 324 dev_cap->dmpt_entry_sz, 325 dev->caps.num_mpts, 326 dev->caps.reserved_mrws, 1, 1); 327 if (err) { 328 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 329 goto err_unmap_mtt; 330 } 331 332 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 333 init_hca->qpc_base, 334 dev_cap->qpc_entry_sz, 335 dev->caps.num_qps, 336 dev->caps.reserved_qps, 0, 0); 337 if (err) { 338 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 339 goto err_unmap_dmpt; 340 } 341 342 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 343 init_hca->auxc_base, 344 dev_cap->aux_entry_sz, 345 dev->caps.num_qps, 346 dev->caps.reserved_qps, 0, 0); 347 if (err) { 348 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 349 goto err_unmap_qp; 350 } 351 352 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 353 init_hca->altc_base, 354 dev_cap->altc_entry_sz, 355 dev->caps.num_qps, 356 dev->caps.reserved_qps, 0, 0); 357 if (err) { 358 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 359 goto err_unmap_auxc; 360 } 361 362 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 363 init_hca->rdmarc_base, 364 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 365 dev->caps.num_qps, 366 dev->caps.reserved_qps, 0, 0); 367 if (err) { 368 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 369 goto err_unmap_altc; 370 } 371 372 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 373 init_hca->cqc_base, 374 dev_cap->cqc_entry_sz, 375 dev->caps.num_cqs, 376 dev->caps.reserved_cqs, 0, 0); 377 if (err) { 378 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 379 goto err_unmap_rdmarc; 380 } 381 382 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 383 init_hca->srqc_base, 384 dev_cap->srq_entry_sz, 385 dev->caps.num_srqs, 386 dev->caps.reserved_srqs, 0, 0); 387 if (err) { 388 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 389 goto err_unmap_cq; 390 } 391 392 /* 393 * It's not strictly required, but for simplicity just map the 394 * whole multicast group table now. The table isn't very big 395 * and it's a lot easier than trying to track ref counts. 396 */ 397 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 398 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, 399 dev->caps.num_mgms + dev->caps.num_amgms, 400 dev->caps.num_mgms + dev->caps.num_amgms, 401 0, 0); 402 if (err) { 403 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 404 goto err_unmap_srq; 405 } 406 407 return 0; 408 409err_unmap_srq: 410 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 411 412err_unmap_cq: 413 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 414 415err_unmap_rdmarc: 416 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 417 418err_unmap_altc: 419 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 420 421err_unmap_auxc: 422 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 423 424err_unmap_qp: 425 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 426 427err_unmap_dmpt: 428 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 429 430err_unmap_mtt: 431 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 432 433err_unmap_eq: 434 mlx4_unmap_eq_icm(dev); 435 436err_unmap_cmpt: 437 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 438 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 439 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 440 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 441 442err_unmap_aux: 443 mlx4_UNMAP_ICM_AUX(dev); 444 445err_free_aux: 446 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 447 448 return err; 449} 450 451static void mlx4_free_icms(struct mlx4_dev *dev) 452{ 453 struct mlx4_priv *priv = mlx4_priv(dev); 454 455 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 456 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 457 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 458 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 459 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 460 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 461 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 462 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 463 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 464 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 465 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 466 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 467 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 468 mlx4_unmap_eq_icm(dev); 469 470 mlx4_UNMAP_ICM_AUX(dev); 471 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 472} 473 474static void mlx4_close_hca(struct mlx4_dev *dev) 475{ 476 mlx4_CLOSE_HCA(dev, 0); 477 mlx4_free_icms(dev); 478 mlx4_UNMAP_FA(dev); 479 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 480} 481 482static int mlx4_init_hca(struct mlx4_dev *dev) 483{ 484 struct mlx4_priv *priv = mlx4_priv(dev); 485 struct mlx4_adapter adapter; 486 struct mlx4_dev_cap dev_cap; 487 struct mlx4_profile profile; 488 struct mlx4_init_hca_param init_hca; 489 u64 icm_size; 490 int err; 491 492 err = mlx4_QUERY_FW(dev); 493 if (err) { 494 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 495 return err; 496 } 497 498 err = mlx4_load_fw(dev); 499 if (err) { 500 mlx4_err(dev, "Failed to start FW, aborting.\n"); 501 return err; 502 } 503 504 err = mlx4_dev_cap(dev, &dev_cap); 505 if (err) { 506 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 507 goto err_stop_fw; 508 } 509 510 profile = default_profile; 511 512 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); 513 if ((long long) icm_size < 0) { 514 err = icm_size; 515 goto err_stop_fw; 516 } 517 518 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 519 520 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 521 if (err) 522 goto err_stop_fw; 523 524 err = mlx4_INIT_HCA(dev, &init_hca); 525 if (err) { 526 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 527 goto err_free_icm; 528 } 529 530 err = mlx4_QUERY_ADAPTER(dev, &adapter); 531 if (err) { 532 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 533 goto err_close; 534 } 535 536 priv->eq_table.inta_pin = adapter.inta_pin; 537 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 538 539 return 0; 540 541err_close: 542 mlx4_close_hca(dev); 543 544err_free_icm: 545 mlx4_free_icms(dev); 546 547err_stop_fw: 548 mlx4_UNMAP_FA(dev); 549 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 550 551 return err; 552} 553 554static int mlx4_setup_hca(struct mlx4_dev *dev) 555{ 556 struct mlx4_priv *priv = mlx4_priv(dev); 557 int err; 558 559 err = mlx4_init_uar_table(dev); 560 if (err) { 561 mlx4_err(dev, "Failed to initialize " 562 "user access region table, aborting.\n"); 563 return err; 564 } 565 566 err = mlx4_uar_alloc(dev, &priv->driver_uar); 567 if (err) { 568 mlx4_err(dev, "Failed to allocate driver access region, " 569 "aborting.\n"); 570 goto err_uar_table_free; 571 } 572 573 priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 574 if (!priv->kar) { 575 mlx4_err(dev, "Couldn't map kernel access region, " 576 "aborting.\n"); 577 err = -ENOMEM; 578 goto err_uar_free; 579 } 580 581 err = mlx4_init_pd_table(dev); 582 if (err) { 583 mlx4_err(dev, "Failed to initialize " 584 "protection domain table, aborting.\n"); 585 goto err_kar_unmap; 586 } 587 588 err = mlx4_init_mr_table(dev); 589 if (err) { 590 mlx4_err(dev, "Failed to initialize " 591 "memory region table, aborting.\n"); 592 goto err_pd_table_free; 593 } 594 595 err = mlx4_init_eq_table(dev); 596 if (err) { 597 mlx4_err(dev, "Failed to initialize " 598 "event queue table, aborting.\n"); 599 goto err_mr_table_free; 600 } 601 602 err = mlx4_cmd_use_events(dev); 603 if (err) { 604 mlx4_err(dev, "Failed to switch to event-driven " 605 "firmware commands, aborting.\n"); 606 goto err_eq_table_free; 607 } 608 609 err = mlx4_NOP(dev); 610 if (err) { 611 if (dev->flags & MLX4_FLAG_MSI_X) { 612 mlx4_warn(dev, "NOP command failed to generate MSI-X " 613 "interrupt IRQ %d).\n", 614 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 615 mlx4_warn(dev, "Trying again without MSI-X.\n"); 616 } else { 617 mlx4_err(dev, "NOP command failed to generate interrupt " 618 "(IRQ %d), aborting.\n", 619 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 620 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 621 } 622 623 goto err_cmd_poll; 624 } 625 626 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 627 628 err = mlx4_init_cq_table(dev); 629 if (err) { 630 mlx4_err(dev, "Failed to initialize " 631 "completion queue table, aborting.\n"); 632 goto err_cmd_poll; 633 } 634 635 err = mlx4_init_srq_table(dev); 636 if (err) { 637 mlx4_err(dev, "Failed to initialize " 638 "shared receive queue table, aborting.\n"); 639 goto err_cq_table_free; 640 } 641 642 err = mlx4_init_qp_table(dev); 643 if (err) { 644 mlx4_err(dev, "Failed to initialize " 645 "queue pair table, aborting.\n"); 646 goto err_srq_table_free; 647 } 648 649 err = mlx4_init_mcg_table(dev); 650 if (err) { 651 mlx4_err(dev, "Failed to initialize " 652 "multicast group table, aborting.\n"); 653 goto err_qp_table_free; 654 } 655 656 return 0; 657 658err_qp_table_free: 659 mlx4_cleanup_qp_table(dev); 660 661err_srq_table_free: 662 mlx4_cleanup_srq_table(dev); 663 664err_cq_table_free: 665 mlx4_cleanup_cq_table(dev); 666 667err_cmd_poll: 668 mlx4_cmd_use_polling(dev); 669 670err_eq_table_free: 671 mlx4_cleanup_eq_table(dev); 672 673err_mr_table_free: 674 mlx4_cleanup_mr_table(dev); 675 676err_pd_table_free: 677 mlx4_cleanup_pd_table(dev); 678 679err_kar_unmap: 680 iounmap(priv->kar); 681 682err_uar_free: 683 mlx4_uar_free(dev, &priv->driver_uar); 684 685err_uar_table_free: 686 mlx4_cleanup_uar_table(dev); 687 return err; 688} 689 690static void mlx4_enable_msi_x(struct mlx4_dev *dev) 691{ 692 struct mlx4_priv *priv = mlx4_priv(dev); 693 struct msix_entry entries[MLX4_NUM_EQ]; 694 int err; 695 int i; 696 697 if (msi_x) { 698 for (i = 0; i < MLX4_NUM_EQ; ++i) 699 entries[i].entry = i; 700 701 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); 702 if (err) { 703 if (err > 0) 704 mlx4_info(dev, "Only %d MSI-X vectors available, " 705 "not using MSI-X\n", err); 706 goto no_msi; 707 } 708 709 for (i = 0; i < MLX4_NUM_EQ; ++i) 710 priv->eq_table.eq[i].irq = entries[i].vector; 711 712 dev->flags |= MLX4_FLAG_MSI_X; 713 return; 714 } 715 716no_msi: 717 for (i = 0; i < MLX4_NUM_EQ; ++i) 718 priv->eq_table.eq[i].irq = dev->pdev->irq; 719} 720 721static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 722{ 723 struct mlx4_priv *priv; 724 struct mlx4_dev *dev; 725 int err; 726 727 printk(KERN_INFO PFX "Initializing %s\n", 728 pci_name(pdev)); 729 730 err = pci_enable_device(pdev); 731 if (err) { 732 dev_err(&pdev->dev, "Cannot enable PCI device, " 733 "aborting.\n"); 734 return err; 735 } 736 737 /* 738 * Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not 739 * be present) 740 */ 741 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 742 pci_resource_len(pdev, 0) != 1 << 20) { 743 dev_err(&pdev->dev, "Missing DCS, aborting.\n"); 744 err = -ENODEV; 745 goto err_disable_pdev; 746 } 747 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 748 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 749 err = -ENODEV; 750 goto err_disable_pdev; 751 } 752 753 err = pci_request_region(pdev, 0, DRV_NAME); 754 if (err) { 755 dev_err(&pdev->dev, "Cannot request control region, aborting.\n"); 756 goto err_disable_pdev; 757 } 758 759 err = pci_request_region(pdev, 2, DRV_NAME); 760 if (err) { 761 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n"); 762 goto err_release_bar0; 763 } 764 765 pci_set_master(pdev); 766 767 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); 768 if (err) { 769 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 770 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 771 if (err) { 772 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 773 goto err_release_bar2; 774 } 775 } 776 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); 777 if (err) { 778 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 779 "consistent PCI DMA mask.\n"); 780 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 781 if (err) { 782 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 783 "aborting.\n"); 784 goto err_release_bar2; 785 } 786 } 787 788 priv = kzalloc(sizeof *priv, GFP_KERNEL); 789 if (!priv) { 790 dev_err(&pdev->dev, "Device struct alloc failed, " 791 "aborting.\n"); 792 err = -ENOMEM; 793 goto err_release_bar2; 794 } 795 796 dev = &priv->dev; 797 dev->pdev = pdev; 798 INIT_LIST_HEAD(&priv->ctx_list); 799 spin_lock_init(&priv->ctx_lock); 800 801 /* 802 * Now reset the HCA before we touch the PCI capabilities or 803 * attempt a firmware command, since a boot ROM may have left 804 * the HCA in an undefined state. 805 */ 806 err = mlx4_reset(dev); 807 if (err) { 808 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 809 goto err_free_dev; 810 } 811 812 if (mlx4_cmd_init(dev)) { 813 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 814 goto err_free_dev; 815 } 816 817 err = mlx4_init_hca(dev); 818 if (err) 819 goto err_cmd; 820 821 mlx4_enable_msi_x(dev); 822 823 err = mlx4_setup_hca(dev); 824 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 825 dev->flags &= ~MLX4_FLAG_MSI_X; 826 pci_disable_msix(pdev); 827 err = mlx4_setup_hca(dev); 828 } 829 830 if (err) 831 goto err_close; 832 833 err = mlx4_register_device(dev); 834 if (err) 835 goto err_cleanup; 836 837 pci_set_drvdata(pdev, dev); 838 839 return 0; 840 841err_cleanup: 842 mlx4_cleanup_mcg_table(dev); 843 mlx4_cleanup_qp_table(dev); 844 mlx4_cleanup_srq_table(dev); 845 mlx4_cleanup_cq_table(dev); 846 mlx4_cmd_use_polling(dev); 847 mlx4_cleanup_eq_table(dev); 848 mlx4_cleanup_mr_table(dev); 849 mlx4_cleanup_pd_table(dev); 850 mlx4_cleanup_uar_table(dev); 851 852err_close: 853 if (dev->flags & MLX4_FLAG_MSI_X) 854 pci_disable_msix(pdev); 855 856 mlx4_close_hca(dev); 857 858err_cmd: 859 mlx4_cmd_cleanup(dev); 860 861err_free_dev: 862 kfree(priv); 863 864err_release_bar2: 865 pci_release_region(pdev, 2); 866 867err_release_bar0: 868 pci_release_region(pdev, 0); 869 870err_disable_pdev: 871 pci_disable_device(pdev); 872 pci_set_drvdata(pdev, NULL); 873 return err; 874} 875 876static int __devinit mlx4_init_one(struct pci_dev *pdev, 877 const struct pci_device_id *id) 878{ 879 static int mlx4_version_printed; 880 881 if (!mlx4_version_printed) { 882 printk(KERN_INFO "%s", mlx4_version); 883 ++mlx4_version_printed; 884 } 885 886 return __mlx4_init_one(pdev, id); 887} 888 889static void mlx4_remove_one(struct pci_dev *pdev) 890{ 891 struct mlx4_dev *dev = pci_get_drvdata(pdev); 892 struct mlx4_priv *priv = mlx4_priv(dev); 893 int p; 894 895 if (dev) { 896 mlx4_unregister_device(dev); 897 898 for (p = 1; p <= dev->caps.num_ports; ++p) 899 mlx4_CLOSE_PORT(dev, p); 900 901 mlx4_cleanup_mcg_table(dev); 902 mlx4_cleanup_qp_table(dev); 903 mlx4_cleanup_srq_table(dev); 904 mlx4_cleanup_cq_table(dev); 905 mlx4_cmd_use_polling(dev); 906 mlx4_cleanup_eq_table(dev); 907 mlx4_cleanup_mr_table(dev); 908 mlx4_cleanup_pd_table(dev); 909 910 iounmap(priv->kar); 911 mlx4_uar_free(dev, &priv->driver_uar); 912 mlx4_cleanup_uar_table(dev); 913 mlx4_close_hca(dev); 914 mlx4_cmd_cleanup(dev); 915 916 if (dev->flags & MLX4_FLAG_MSI_X) 917 pci_disable_msix(pdev); 918 919 kfree(priv); 920 pci_release_region(pdev, 2); 921 pci_release_region(pdev, 0); 922 pci_disable_device(pdev); 923 pci_set_drvdata(pdev, NULL); 924 } 925} 926 927int mlx4_restart_one(struct pci_dev *pdev) 928{ 929 mlx4_remove_one(pdev); 930 return __mlx4_init_one(pdev, NULL); 931} 932 933static struct pci_device_id mlx4_pci_table[] = { 934 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ 935 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ 936 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 937 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 938 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 939 { 0, } 940}; 941 942MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 943 944static struct pci_driver mlx4_driver = { 945 .name = DRV_NAME, 946 .id_table = mlx4_pci_table, 947 .probe = mlx4_init_one, 948 .remove = __devexit_p(mlx4_remove_one) 949}; 950 951static int __init mlx4_init(void) 952{ 953 int ret; 954 955 ret = mlx4_catas_init(); 956 if (ret) 957 return ret; 958 959 ret = pci_register_driver(&mlx4_driver); 960 return ret < 0 ? ret : 0; 961} 962 963static void __exit mlx4_cleanup(void) 964{ 965 pci_unregister_driver(&mlx4_driver); 966 mlx4_catas_cleanup(); 967} 968 969module_init(mlx4_init); 970module_exit(mlx4_cleanup);