Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.0-rc2 1479 lines 40 kB view raw
1/* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/module.h> 37#include <linux/init.h> 38#include <linux/errno.h> 39#include <linux/pci.h> 40#include <linux/dma-mapping.h> 41#include <linux/slab.h> 42#include <linux/io-mapping.h> 43 44#include <linux/mlx4/device.h> 45#include <linux/mlx4/doorbell.h> 46 47#include "mlx4.h" 48#include "fw.h" 49#include "icm.h" 50 51MODULE_AUTHOR("Roland Dreier"); 52MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 53MODULE_LICENSE("Dual BSD/GPL"); 54MODULE_VERSION(DRV_VERSION); 55 56struct workqueue_struct *mlx4_wq; 57 58#ifdef CONFIG_MLX4_DEBUG 59 60int mlx4_debug_level = 0; 61module_param_named(debug_level, mlx4_debug_level, int, 0644); 62MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 63 64#endif /* CONFIG_MLX4_DEBUG */ 65 66#ifdef CONFIG_PCI_MSI 67 68static int msi_x = 1; 69module_param(msi_x, int, 0444); 70MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 71 72#else /* CONFIG_PCI_MSI */ 73 74#define msi_x (0) 75 76#endif /* CONFIG_PCI_MSI */ 77 78static char mlx4_version[] __devinitdata = 79 DRV_NAME ": Mellanox ConnectX core driver v" 80 DRV_VERSION " (" DRV_RELDATE ")\n"; 81 82static struct mlx4_profile default_profile = { 83 .num_qp = 1 << 17, 84 .num_srq = 1 << 16, 85 .rdmarc_per_qp = 1 << 4, 86 .num_cq = 1 << 16, 87 .num_mcg = 1 << 13, 88 .num_mpt = 1 << 17, 89 .num_mtt = 1 << 20, 90}; 91 92static int log_num_mac = 2; 93module_param_named(log_num_mac, log_num_mac, int, 0444); 94MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 95 96static int log_num_vlan; 97module_param_named(log_num_vlan, log_num_vlan, int, 0444); 98MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 99 100static int use_prio; 101module_param_named(use_prio, use_prio, bool, 0444); 102MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 103 "(0/1, default 0)"); 104 105static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 106module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 107MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 108 109int mlx4_check_port_params(struct mlx4_dev *dev, 110 enum mlx4_port_type *port_type) 111{ 112 int i; 113 114 for (i = 0; i < dev->caps.num_ports - 1; i++) { 115 if (port_type[i] != port_type[i + 1]) { 116 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 117 mlx4_err(dev, "Only same port types supported " 118 "on this HCA, aborting.\n"); 119 return -EINVAL; 120 } 121 if (port_type[i] == MLX4_PORT_TYPE_ETH && 122 port_type[i + 1] == MLX4_PORT_TYPE_IB) 123 return -EINVAL; 124 } 125 } 126 127 for (i = 0; i < dev->caps.num_ports; i++) { 128 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 129 mlx4_err(dev, "Requested port type for port %d is not " 130 "supported on this HCA\n", i + 1); 131 return -EINVAL; 132 } 133 } 134 return 0; 135} 136 137static void mlx4_set_port_mask(struct mlx4_dev *dev) 138{ 139 int i; 140 141 dev->caps.port_mask = 0; 142 for (i = 1; i <= dev->caps.num_ports; ++i) 143 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) 144 dev->caps.port_mask |= 1 << (i - 1); 145} 146static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 147{ 148 int err; 149 int i; 150 151 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 152 if (err) { 153 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 154 return err; 155 } 156 157 if (dev_cap->min_page_sz > PAGE_SIZE) { 158 mlx4_err(dev, "HCA minimum page size of %d bigger than " 159 "kernel PAGE_SIZE of %ld, aborting.\n", 160 dev_cap->min_page_sz, PAGE_SIZE); 161 return -ENODEV; 162 } 163 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 164 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 165 "aborting.\n", 166 dev_cap->num_ports, MLX4_MAX_PORTS); 167 return -ENODEV; 168 } 169 170 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 171 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 172 "PCI resource 2 size of 0x%llx, aborting.\n", 173 dev_cap->uar_size, 174 (unsigned long long) pci_resource_len(dev->pdev, 2)); 175 return -ENODEV; 176 } 177 178 dev->caps.num_ports = dev_cap->num_ports; 179 for (i = 1; i <= dev->caps.num_ports; ++i) { 180 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 181 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 182 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 183 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 184 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 185 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 186 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 187 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 188 dev->caps.trans_type[i] = dev_cap->trans_type[i]; 189 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; 190 dev->caps.wavelength[i] = dev_cap->wavelength[i]; 191 dev->caps.trans_code[i] = dev_cap->trans_code[i]; 192 } 193 194 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 195 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 196 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 197 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 198 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 199 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 200 dev->caps.max_wqes = dev_cap->max_qp_sz; 201 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 202 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 203 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 204 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 205 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 206 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 207 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM; 208 /* 209 * Subtract 1 from the limit because we need to allocate a 210 * spare CQE so the HCA HW can tell the difference between an 211 * empty CQ and a full CQ. 212 */ 213 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 214 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 215 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 216 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; 217 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 218 dev->caps.mtts_per_seg); 219 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 220 dev->caps.reserved_uars = dev_cap->reserved_uars; 221 dev->caps.reserved_pds = dev_cap->reserved_pds; 222 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; 223 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 224 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 225 dev->caps.flags = dev_cap->flags; 226 dev->caps.bmme_flags = dev_cap->bmme_flags; 227 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 228 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 229 dev->caps.udp_rss = dev_cap->udp_rss; 230 dev->caps.loopback_support = dev_cap->loopback_support; 231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering; 232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering; 233 dev->caps.wol = dev_cap->wol; 234 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 235 236 dev->caps.log_num_macs = log_num_mac; 237 dev->caps.log_num_vlans = log_num_vlan; 238 dev->caps.log_num_prios = use_prio ? 3 : 0; 239 240 for (i = 1; i <= dev->caps.num_ports; ++i) { 241 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH) 242 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 243 else 244 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 245 dev->caps.possible_type[i] = dev->caps.port_type[i]; 246 mlx4_priv(dev)->sense.sense_allowed[i] = 247 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO; 248 249 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 250 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 251 mlx4_warn(dev, "Requested number of MACs is too much " 252 "for port %d, reducing to %d.\n", 253 i, 1 << dev->caps.log_num_macs); 254 } 255 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 256 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 257 mlx4_warn(dev, "Requested number of VLANs is too much " 258 "for port %d, reducing to %d.\n", 259 i, 1 << dev->caps.log_num_vlans); 260 } 261 } 262 263 mlx4_set_port_mask(dev); 264 265 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 266 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 267 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 268 (1 << dev->caps.log_num_macs) * 269 (1 << dev->caps.log_num_vlans) * 270 (1 << dev->caps.log_num_prios) * 271 dev->caps.num_ports; 272 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 273 274 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 275 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 276 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 277 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 278 279 return 0; 280} 281 282/* 283 * Change the port configuration of the device. 284 * Every user of this function must hold the port mutex. 285 */ 286int mlx4_change_port_types(struct mlx4_dev *dev, 287 enum mlx4_port_type *port_types) 288{ 289 int err = 0; 290 int change = 0; 291 int port; 292 293 for (port = 0; port < dev->caps.num_ports; port++) { 294 /* Change the port type only if the new type is different 295 * from the current, and not set to Auto */ 296 if (port_types[port] != dev->caps.port_type[port + 1]) { 297 change = 1; 298 dev->caps.port_type[port + 1] = port_types[port]; 299 } 300 } 301 if (change) { 302 mlx4_unregister_device(dev); 303 for (port = 1; port <= dev->caps.num_ports; port++) { 304 mlx4_CLOSE_PORT(dev, port); 305 err = mlx4_SET_PORT(dev, port); 306 if (err) { 307 mlx4_err(dev, "Failed to set port %d, " 308 "aborting\n", port); 309 goto out; 310 } 311 } 312 mlx4_set_port_mask(dev); 313 err = mlx4_register_device(dev); 314 } 315 316out: 317 return err; 318} 319 320static ssize_t show_port_type(struct device *dev, 321 struct device_attribute *attr, 322 char *buf) 323{ 324 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 325 port_attr); 326 struct mlx4_dev *mdev = info->dev; 327 char type[8]; 328 329 sprintf(type, "%s", 330 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 331 "ib" : "eth"); 332 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 333 sprintf(buf, "auto (%s)\n", type); 334 else 335 sprintf(buf, "%s\n", type); 336 337 return strlen(buf); 338} 339 340static ssize_t set_port_type(struct device *dev, 341 struct device_attribute *attr, 342 const char *buf, size_t count) 343{ 344 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 345 port_attr); 346 struct mlx4_dev *mdev = info->dev; 347 struct mlx4_priv *priv = mlx4_priv(mdev); 348 enum mlx4_port_type types[MLX4_MAX_PORTS]; 349 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 350 int i; 351 int err = 0; 352 353 if (!strcmp(buf, "ib\n")) 354 info->tmp_type = MLX4_PORT_TYPE_IB; 355 else if (!strcmp(buf, "eth\n")) 356 info->tmp_type = MLX4_PORT_TYPE_ETH; 357 else if (!strcmp(buf, "auto\n")) 358 info->tmp_type = MLX4_PORT_TYPE_AUTO; 359 else { 360 mlx4_err(mdev, "%s is not supported port type\n", buf); 361 return -EINVAL; 362 } 363 364 mlx4_stop_sense(mdev); 365 mutex_lock(&priv->port_mutex); 366 /* Possible type is always the one that was delivered */ 367 mdev->caps.possible_type[info->port] = info->tmp_type; 368 369 for (i = 0; i < mdev->caps.num_ports; i++) { 370 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 371 mdev->caps.possible_type[i+1]; 372 if (types[i] == MLX4_PORT_TYPE_AUTO) 373 types[i] = mdev->caps.port_type[i+1]; 374 } 375 376 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 377 for (i = 1; i <= mdev->caps.num_ports; i++) { 378 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 379 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 380 err = -EINVAL; 381 } 382 } 383 } 384 if (err) { 385 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 386 "Set only 'eth' or 'ib' for both ports " 387 "(should be the same)\n"); 388 goto out; 389 } 390 391 mlx4_do_sense_ports(mdev, new_types, types); 392 393 err = mlx4_check_port_params(mdev, new_types); 394 if (err) 395 goto out; 396 397 /* We are about to apply the changes after the configuration 398 * was verified, no need to remember the temporary types 399 * any more */ 400 for (i = 0; i < mdev->caps.num_ports; i++) 401 priv->port[i + 1].tmp_type = 0; 402 403 err = mlx4_change_port_types(mdev, new_types); 404 405out: 406 mlx4_start_sense(mdev); 407 mutex_unlock(&priv->port_mutex); 408 return err ? err : count; 409} 410 411static int mlx4_load_fw(struct mlx4_dev *dev) 412{ 413 struct mlx4_priv *priv = mlx4_priv(dev); 414 int err; 415 416 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 417 GFP_HIGHUSER | __GFP_NOWARN, 0); 418 if (!priv->fw.fw_icm) { 419 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 420 return -ENOMEM; 421 } 422 423 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 424 if (err) { 425 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 426 goto err_free; 427 } 428 429 err = mlx4_RUN_FW(dev); 430 if (err) { 431 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 432 goto err_unmap_fa; 433 } 434 435 return 0; 436 437err_unmap_fa: 438 mlx4_UNMAP_FA(dev); 439 440err_free: 441 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 442 return err; 443} 444 445static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 446 int cmpt_entry_sz) 447{ 448 struct mlx4_priv *priv = mlx4_priv(dev); 449 int err; 450 451 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 452 cmpt_base + 453 ((u64) (MLX4_CMPT_TYPE_QP * 454 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 455 cmpt_entry_sz, dev->caps.num_qps, 456 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 457 0, 0); 458 if (err) 459 goto err; 460 461 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 462 cmpt_base + 463 ((u64) (MLX4_CMPT_TYPE_SRQ * 464 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 465 cmpt_entry_sz, dev->caps.num_srqs, 466 dev->caps.reserved_srqs, 0, 0); 467 if (err) 468 goto err_qp; 469 470 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 471 cmpt_base + 472 ((u64) (MLX4_CMPT_TYPE_CQ * 473 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 474 cmpt_entry_sz, dev->caps.num_cqs, 475 dev->caps.reserved_cqs, 0, 0); 476 if (err) 477 goto err_srq; 478 479 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 480 cmpt_base + 481 ((u64) (MLX4_CMPT_TYPE_EQ * 482 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 483 cmpt_entry_sz, 484 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); 485 if (err) 486 goto err_cq; 487 488 return 0; 489 490err_cq: 491 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 492 493err_srq: 494 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 495 496err_qp: 497 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 498 499err: 500 return err; 501} 502 503static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 504 struct mlx4_init_hca_param *init_hca, u64 icm_size) 505{ 506 struct mlx4_priv *priv = mlx4_priv(dev); 507 u64 aux_pages; 508 int err; 509 510 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 511 if (err) { 512 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 513 return err; 514 } 515 516 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 517 (unsigned long long) icm_size >> 10, 518 (unsigned long long) aux_pages << 2); 519 520 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 521 GFP_HIGHUSER | __GFP_NOWARN, 0); 522 if (!priv->fw.aux_icm) { 523 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 524 return -ENOMEM; 525 } 526 527 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 528 if (err) { 529 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 530 goto err_free_aux; 531 } 532 533 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 534 if (err) { 535 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 536 goto err_unmap_aux; 537 } 538 539 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 540 init_hca->eqc_base, dev_cap->eqc_entry_sz, 541 dev->caps.num_eqs, dev->caps.num_eqs, 542 0, 0); 543 if (err) { 544 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 545 goto err_unmap_cmpt; 546 } 547 548 /* 549 * Reserved MTT entries must be aligned up to a cacheline 550 * boundary, since the FW will write to them, while the driver 551 * writes to all other MTT entries. (The variable 552 * dev->caps.mtt_entry_sz below is really the MTT segment 553 * size, not the raw entry size) 554 */ 555 dev->caps.reserved_mtts = 556 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 557 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 558 559 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 560 init_hca->mtt_base, 561 dev->caps.mtt_entry_sz, 562 dev->caps.num_mtt_segs, 563 dev->caps.reserved_mtts, 1, 0); 564 if (err) { 565 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 566 goto err_unmap_eq; 567 } 568 569 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 570 init_hca->dmpt_base, 571 dev_cap->dmpt_entry_sz, 572 dev->caps.num_mpts, 573 dev->caps.reserved_mrws, 1, 1); 574 if (err) { 575 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 576 goto err_unmap_mtt; 577 } 578 579 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 580 init_hca->qpc_base, 581 dev_cap->qpc_entry_sz, 582 dev->caps.num_qps, 583 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 584 0, 0); 585 if (err) { 586 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 587 goto err_unmap_dmpt; 588 } 589 590 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 591 init_hca->auxc_base, 592 dev_cap->aux_entry_sz, 593 dev->caps.num_qps, 594 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 595 0, 0); 596 if (err) { 597 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 598 goto err_unmap_qp; 599 } 600 601 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 602 init_hca->altc_base, 603 dev_cap->altc_entry_sz, 604 dev->caps.num_qps, 605 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 606 0, 0); 607 if (err) { 608 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 609 goto err_unmap_auxc; 610 } 611 612 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 613 init_hca->rdmarc_base, 614 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 615 dev->caps.num_qps, 616 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 617 0, 0); 618 if (err) { 619 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 620 goto err_unmap_altc; 621 } 622 623 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 624 init_hca->cqc_base, 625 dev_cap->cqc_entry_sz, 626 dev->caps.num_cqs, 627 dev->caps.reserved_cqs, 0, 0); 628 if (err) { 629 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 630 goto err_unmap_rdmarc; 631 } 632 633 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 634 init_hca->srqc_base, 635 dev_cap->srq_entry_sz, 636 dev->caps.num_srqs, 637 dev->caps.reserved_srqs, 0, 0); 638 if (err) { 639 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 640 goto err_unmap_cq; 641 } 642 643 /* 644 * It's not strictly required, but for simplicity just map the 645 * whole multicast group table now. The table isn't very big 646 * and it's a lot easier than trying to track ref counts. 647 */ 648 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 649 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE, 650 dev->caps.num_mgms + dev->caps.num_amgms, 651 dev->caps.num_mgms + dev->caps.num_amgms, 652 0, 0); 653 if (err) { 654 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 655 goto err_unmap_srq; 656 } 657 658 return 0; 659 660err_unmap_srq: 661 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 662 663err_unmap_cq: 664 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 665 666err_unmap_rdmarc: 667 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 668 669err_unmap_altc: 670 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 671 672err_unmap_auxc: 673 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 674 675err_unmap_qp: 676 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 677 678err_unmap_dmpt: 679 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 680 681err_unmap_mtt: 682 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 683 684err_unmap_eq: 685 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 686 687err_unmap_cmpt: 688 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 689 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 690 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 691 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 692 693err_unmap_aux: 694 mlx4_UNMAP_ICM_AUX(dev); 695 696err_free_aux: 697 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 698 699 return err; 700} 701 702static void mlx4_free_icms(struct mlx4_dev *dev) 703{ 704 struct mlx4_priv *priv = mlx4_priv(dev); 705 706 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 707 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 708 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 709 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 710 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 711 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 712 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 713 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 714 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 715 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 716 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 717 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 718 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 719 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 720 721 mlx4_UNMAP_ICM_AUX(dev); 722 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 723} 724 725static int map_bf_area(struct mlx4_dev *dev) 726{ 727 struct mlx4_priv *priv = mlx4_priv(dev); 728 resource_size_t bf_start; 729 resource_size_t bf_len; 730 int err = 0; 731 732 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT); 733 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT); 734 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 735 if (!priv->bf_mapping) 736 err = -ENOMEM; 737 738 return err; 739} 740 741static void unmap_bf_area(struct mlx4_dev *dev) 742{ 743 if (mlx4_priv(dev)->bf_mapping) 744 io_mapping_free(mlx4_priv(dev)->bf_mapping); 745} 746 747static void mlx4_close_hca(struct mlx4_dev *dev) 748{ 749 unmap_bf_area(dev); 750 mlx4_CLOSE_HCA(dev, 0); 751 mlx4_free_icms(dev); 752 mlx4_UNMAP_FA(dev); 753 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 754} 755 756static int mlx4_init_hca(struct mlx4_dev *dev) 757{ 758 struct mlx4_priv *priv = mlx4_priv(dev); 759 struct mlx4_adapter adapter; 760 struct mlx4_dev_cap dev_cap; 761 struct mlx4_mod_stat_cfg mlx4_cfg; 762 struct mlx4_profile profile; 763 struct mlx4_init_hca_param init_hca; 764 u64 icm_size; 765 int err; 766 767 err = mlx4_QUERY_FW(dev); 768 if (err) { 769 if (err == -EACCES) 770 mlx4_info(dev, "non-primary physical function, skipping.\n"); 771 else 772 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 773 return err; 774 } 775 776 err = mlx4_load_fw(dev); 777 if (err) { 778 mlx4_err(dev, "Failed to start FW, aborting.\n"); 779 return err; 780 } 781 782 mlx4_cfg.log_pg_sz_m = 1; 783 mlx4_cfg.log_pg_sz = 0; 784 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 785 if (err) 786 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 787 788 err = mlx4_dev_cap(dev, &dev_cap); 789 if (err) { 790 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 791 goto err_stop_fw; 792 } 793 794 profile = default_profile; 795 796 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); 797 if ((long long) icm_size < 0) { 798 err = icm_size; 799 goto err_stop_fw; 800 } 801 802 if (map_bf_area(dev)) 803 mlx4_dbg(dev, "Failed to map blue flame area\n"); 804 805 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 806 807 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 808 if (err) 809 goto err_stop_fw; 810 811 err = mlx4_INIT_HCA(dev, &init_hca); 812 if (err) { 813 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 814 goto err_free_icm; 815 } 816 817 err = mlx4_QUERY_ADAPTER(dev, &adapter); 818 if (err) { 819 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 820 goto err_close; 821 } 822 823 priv->eq_table.inta_pin = adapter.inta_pin; 824 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 825 826 return 0; 827 828err_close: 829 mlx4_CLOSE_HCA(dev, 0); 830 831err_free_icm: 832 mlx4_free_icms(dev); 833 834err_stop_fw: 835 unmap_bf_area(dev); 836 mlx4_UNMAP_FA(dev); 837 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 838 839 return err; 840} 841 842static int mlx4_setup_hca(struct mlx4_dev *dev) 843{ 844 struct mlx4_priv *priv = mlx4_priv(dev); 845 int err; 846 int port; 847 __be32 ib_port_default_caps; 848 849 err = mlx4_init_uar_table(dev); 850 if (err) { 851 mlx4_err(dev, "Failed to initialize " 852 "user access region table, aborting.\n"); 853 return err; 854 } 855 856 err = mlx4_uar_alloc(dev, &priv->driver_uar); 857 if (err) { 858 mlx4_err(dev, "Failed to allocate driver access region, " 859 "aborting.\n"); 860 goto err_uar_table_free; 861 } 862 863 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 864 if (!priv->kar) { 865 mlx4_err(dev, "Couldn't map kernel access region, " 866 "aborting.\n"); 867 err = -ENOMEM; 868 goto err_uar_free; 869 } 870 871 err = mlx4_init_pd_table(dev); 872 if (err) { 873 mlx4_err(dev, "Failed to initialize " 874 "protection domain table, aborting.\n"); 875 goto err_kar_unmap; 876 } 877 878 err = mlx4_init_mr_table(dev); 879 if (err) { 880 mlx4_err(dev, "Failed to initialize " 881 "memory region table, aborting.\n"); 882 goto err_pd_table_free; 883 } 884 885 err = mlx4_init_eq_table(dev); 886 if (err) { 887 mlx4_err(dev, "Failed to initialize " 888 "event queue table, aborting.\n"); 889 goto err_mr_table_free; 890 } 891 892 err = mlx4_cmd_use_events(dev); 893 if (err) { 894 mlx4_err(dev, "Failed to switch to event-driven " 895 "firmware commands, aborting.\n"); 896 goto err_eq_table_free; 897 } 898 899 err = mlx4_NOP(dev); 900 if (err) { 901 if (dev->flags & MLX4_FLAG_MSI_X) { 902 mlx4_warn(dev, "NOP command failed to generate MSI-X " 903 "interrupt IRQ %d).\n", 904 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 905 mlx4_warn(dev, "Trying again without MSI-X.\n"); 906 } else { 907 mlx4_err(dev, "NOP command failed to generate interrupt " 908 "(IRQ %d), aborting.\n", 909 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 910 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 911 } 912 913 goto err_cmd_poll; 914 } 915 916 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 917 918 err = mlx4_init_cq_table(dev); 919 if (err) { 920 mlx4_err(dev, "Failed to initialize " 921 "completion queue table, aborting.\n"); 922 goto err_cmd_poll; 923 } 924 925 err = mlx4_init_srq_table(dev); 926 if (err) { 927 mlx4_err(dev, "Failed to initialize " 928 "shared receive queue table, aborting.\n"); 929 goto err_cq_table_free; 930 } 931 932 err = mlx4_init_qp_table(dev); 933 if (err) { 934 mlx4_err(dev, "Failed to initialize " 935 "queue pair table, aborting.\n"); 936 goto err_srq_table_free; 937 } 938 939 err = mlx4_init_mcg_table(dev); 940 if (err) { 941 mlx4_err(dev, "Failed to initialize " 942 "multicast group table, aborting.\n"); 943 goto err_qp_table_free; 944 } 945 946 for (port = 1; port <= dev->caps.num_ports; port++) { 947 enum mlx4_port_type port_type = 0; 948 mlx4_SENSE_PORT(dev, port, &port_type); 949 if (port_type) 950 dev->caps.port_type[port] = port_type; 951 ib_port_default_caps = 0; 952 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps); 953 if (err) 954 mlx4_warn(dev, "failed to get port %d default " 955 "ib capabilities (%d). Continuing with " 956 "caps = 0\n", port, err); 957 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 958 err = mlx4_SET_PORT(dev, port); 959 if (err) { 960 mlx4_err(dev, "Failed to set port %d, aborting\n", 961 port); 962 goto err_mcg_table_free; 963 } 964 } 965 mlx4_set_port_mask(dev); 966 967 return 0; 968 969err_mcg_table_free: 970 mlx4_cleanup_mcg_table(dev); 971 972err_qp_table_free: 973 mlx4_cleanup_qp_table(dev); 974 975err_srq_table_free: 976 mlx4_cleanup_srq_table(dev); 977 978err_cq_table_free: 979 mlx4_cleanup_cq_table(dev); 980 981err_cmd_poll: 982 mlx4_cmd_use_polling(dev); 983 984err_eq_table_free: 985 mlx4_cleanup_eq_table(dev); 986 987err_mr_table_free: 988 mlx4_cleanup_mr_table(dev); 989 990err_pd_table_free: 991 mlx4_cleanup_pd_table(dev); 992 993err_kar_unmap: 994 iounmap(priv->kar); 995 996err_uar_free: 997 mlx4_uar_free(dev, &priv->driver_uar); 998 999err_uar_table_free: 1000 mlx4_cleanup_uar_table(dev); 1001 return err; 1002} 1003 1004static void mlx4_enable_msi_x(struct mlx4_dev *dev) 1005{ 1006 struct mlx4_priv *priv = mlx4_priv(dev); 1007 struct msix_entry *entries; 1008 int nreq = min_t(int, dev->caps.num_ports * 1009 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1010 + MSIX_LEGACY_SZ, MAX_MSIX); 1011 int err; 1012 int i; 1013 1014 if (msi_x) { 1015 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 1016 nreq); 1017 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1018 if (!entries) 1019 goto no_msi; 1020 1021 for (i = 0; i < nreq; ++i) 1022 entries[i].entry = i; 1023 1024 retry: 1025 err = pci_enable_msix(dev->pdev, entries, nreq); 1026 if (err) { 1027 /* Try again if at least 2 vectors are available */ 1028 if (err > 1) { 1029 mlx4_info(dev, "Requested %d vectors, " 1030 "but only %d MSI-X vectors available, " 1031 "trying again\n", nreq, err); 1032 nreq = err; 1033 goto retry; 1034 } 1035 kfree(entries); 1036 goto no_msi; 1037 } 1038 1039 if (nreq < 1040 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1041 /*Working in legacy mode , all EQ's shared*/ 1042 dev->caps.comp_pool = 0; 1043 dev->caps.num_comp_vectors = nreq - 1; 1044 } else { 1045 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1046 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1047 } 1048 for (i = 0; i < nreq; ++i) 1049 priv->eq_table.eq[i].irq = entries[i].vector; 1050 1051 dev->flags |= MLX4_FLAG_MSI_X; 1052 1053 kfree(entries); 1054 return; 1055 } 1056 1057no_msi: 1058 dev->caps.num_comp_vectors = 1; 1059 dev->caps.comp_pool = 0; 1060 1061 for (i = 0; i < 2; ++i) 1062 priv->eq_table.eq[i].irq = dev->pdev->irq; 1063} 1064 1065static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1066{ 1067 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1068 int err = 0; 1069 1070 info->dev = dev; 1071 info->port = port; 1072 mlx4_init_mac_table(dev, &info->mac_table); 1073 mlx4_init_vlan_table(dev, &info->vlan_table); 1074 1075 sprintf(info->dev_name, "mlx4_port%d", port); 1076 info->port_attr.attr.name = info->dev_name; 1077 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1078 info->port_attr.show = show_port_type; 1079 info->port_attr.store = set_port_type; 1080 sysfs_attr_init(&info->port_attr.attr); 1081 1082 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1083 if (err) { 1084 mlx4_err(dev, "Failed to create file for port %d\n", port); 1085 info->port = -1; 1086 } 1087 1088 return err; 1089} 1090 1091static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1092{ 1093 if (info->port < 0) 1094 return; 1095 1096 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1097} 1098 1099static int mlx4_init_steering(struct mlx4_dev *dev) 1100{ 1101 struct mlx4_priv *priv = mlx4_priv(dev); 1102 int num_entries = dev->caps.num_ports; 1103 int i, j; 1104 1105 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1106 if (!priv->steer) 1107 return -ENOMEM; 1108 1109 for (i = 0; i < num_entries; i++) { 1110 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1111 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1112 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1113 } 1114 INIT_LIST_HEAD(&priv->steer[i].high_prios); 1115 } 1116 return 0; 1117} 1118 1119static void mlx4_clear_steering(struct mlx4_dev *dev) 1120{ 1121 struct mlx4_priv *priv = mlx4_priv(dev); 1122 struct mlx4_steer_index *entry, *tmp_entry; 1123 struct mlx4_promisc_qp *pqp, *tmp_pqp; 1124 int num_entries = dev->caps.num_ports; 1125 int i, j; 1126 1127 for (i = 0; i < num_entries; i++) { 1128 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1129 list_for_each_entry_safe(pqp, tmp_pqp, 1130 &priv->steer[i].promisc_qps[j], 1131 list) { 1132 list_del(&pqp->list); 1133 kfree(pqp); 1134 } 1135 list_for_each_entry_safe(entry, tmp_entry, 1136 &priv->steer[i].steer_entries[j], 1137 list) { 1138 list_del(&entry->list); 1139 list_for_each_entry_safe(pqp, tmp_pqp, 1140 &entry->duplicates, 1141 list) { 1142 list_del(&pqp->list); 1143 kfree(pqp); 1144 } 1145 kfree(entry); 1146 } 1147 } 1148 } 1149 kfree(priv->steer); 1150} 1151 1152static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1153{ 1154 struct mlx4_priv *priv; 1155 struct mlx4_dev *dev; 1156 int err; 1157 int port; 1158 1159 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1160 1161 err = pci_enable_device(pdev); 1162 if (err) { 1163 dev_err(&pdev->dev, "Cannot enable PCI device, " 1164 "aborting.\n"); 1165 return err; 1166 } 1167 1168 /* 1169 * Check for BARs. We expect 0: 1MB 1170 */ 1171 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || 1172 pci_resource_len(pdev, 0) != 1 << 20) { 1173 dev_err(&pdev->dev, "Missing DCS, aborting.\n"); 1174 err = -ENODEV; 1175 goto err_disable_pdev; 1176 } 1177 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1178 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1179 err = -ENODEV; 1180 goto err_disable_pdev; 1181 } 1182 1183 err = pci_request_regions(pdev, DRV_NAME); 1184 if (err) { 1185 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1186 goto err_disable_pdev; 1187 } 1188 1189 pci_set_master(pdev); 1190 1191 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1192 if (err) { 1193 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1194 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1195 if (err) { 1196 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1197 goto err_release_regions; 1198 } 1199 } 1200 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1201 if (err) { 1202 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1203 "consistent PCI DMA mask.\n"); 1204 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1205 if (err) { 1206 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1207 "aborting.\n"); 1208 goto err_release_regions; 1209 } 1210 } 1211 1212 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1213 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1214 1215 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1216 if (!priv) { 1217 dev_err(&pdev->dev, "Device struct alloc failed, " 1218 "aborting.\n"); 1219 err = -ENOMEM; 1220 goto err_release_regions; 1221 } 1222 1223 dev = &priv->dev; 1224 dev->pdev = pdev; 1225 INIT_LIST_HEAD(&priv->ctx_list); 1226 spin_lock_init(&priv->ctx_lock); 1227 1228 mutex_init(&priv->port_mutex); 1229 1230 INIT_LIST_HEAD(&priv->pgdir_list); 1231 mutex_init(&priv->pgdir_mutex); 1232 1233 pci_read_config_byte(pdev, PCI_REVISION_ID, &dev->rev_id); 1234 1235 INIT_LIST_HEAD(&priv->bf_list); 1236 mutex_init(&priv->bf_mutex); 1237 1238 /* 1239 * Now reset the HCA before we touch the PCI capabilities or 1240 * attempt a firmware command, since a boot ROM may have left 1241 * the HCA in an undefined state. 1242 */ 1243 err = mlx4_reset(dev); 1244 if (err) { 1245 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 1246 goto err_free_dev; 1247 } 1248 1249 if (mlx4_cmd_init(dev)) { 1250 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 1251 goto err_free_dev; 1252 } 1253 1254 err = mlx4_init_hca(dev); 1255 if (err) 1256 goto err_cmd; 1257 1258 err = mlx4_alloc_eq_table(dev); 1259 if (err) 1260 goto err_close; 1261 1262 priv->msix_ctl.pool_bm = 0; 1263 spin_lock_init(&priv->msix_ctl.pool_lock); 1264 1265 mlx4_enable_msi_x(dev); 1266 1267 err = mlx4_init_steering(dev); 1268 if (err) 1269 goto err_free_eq; 1270 1271 err = mlx4_setup_hca(dev); 1272 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) { 1273 dev->flags &= ~MLX4_FLAG_MSI_X; 1274 pci_disable_msix(pdev); 1275 err = mlx4_setup_hca(dev); 1276 } 1277 1278 if (err) 1279 goto err_steer; 1280 1281 for (port = 1; port <= dev->caps.num_ports; port++) { 1282 err = mlx4_init_port_info(dev, port); 1283 if (err) 1284 goto err_port; 1285 } 1286 1287 err = mlx4_register_device(dev); 1288 if (err) 1289 goto err_port; 1290 1291 mlx4_sense_init(dev); 1292 mlx4_start_sense(dev); 1293 1294 pci_set_drvdata(pdev, dev); 1295 1296 return 0; 1297 1298err_port: 1299 for (--port; port >= 1; --port) 1300 mlx4_cleanup_port_info(&priv->port[port]); 1301 1302 mlx4_cleanup_mcg_table(dev); 1303 mlx4_cleanup_qp_table(dev); 1304 mlx4_cleanup_srq_table(dev); 1305 mlx4_cleanup_cq_table(dev); 1306 mlx4_cmd_use_polling(dev); 1307 mlx4_cleanup_eq_table(dev); 1308 mlx4_cleanup_mr_table(dev); 1309 mlx4_cleanup_pd_table(dev); 1310 mlx4_cleanup_uar_table(dev); 1311 1312err_steer: 1313 mlx4_clear_steering(dev); 1314 1315err_free_eq: 1316 mlx4_free_eq_table(dev); 1317 1318err_close: 1319 if (dev->flags & MLX4_FLAG_MSI_X) 1320 pci_disable_msix(pdev); 1321 1322 mlx4_close_hca(dev); 1323 1324err_cmd: 1325 mlx4_cmd_cleanup(dev); 1326 1327err_free_dev: 1328 kfree(priv); 1329 1330err_release_regions: 1331 pci_release_regions(pdev); 1332 1333err_disable_pdev: 1334 pci_disable_device(pdev); 1335 pci_set_drvdata(pdev, NULL); 1336 return err; 1337} 1338 1339static int __devinit mlx4_init_one(struct pci_dev *pdev, 1340 const struct pci_device_id *id) 1341{ 1342 printk_once(KERN_INFO "%s", mlx4_version); 1343 1344 return __mlx4_init_one(pdev, id); 1345} 1346 1347static void mlx4_remove_one(struct pci_dev *pdev) 1348{ 1349 struct mlx4_dev *dev = pci_get_drvdata(pdev); 1350 struct mlx4_priv *priv = mlx4_priv(dev); 1351 int p; 1352 1353 if (dev) { 1354 mlx4_stop_sense(dev); 1355 mlx4_unregister_device(dev); 1356 1357 for (p = 1; p <= dev->caps.num_ports; p++) { 1358 mlx4_cleanup_port_info(&priv->port[p]); 1359 mlx4_CLOSE_PORT(dev, p); 1360 } 1361 1362 mlx4_cleanup_mcg_table(dev); 1363 mlx4_cleanup_qp_table(dev); 1364 mlx4_cleanup_srq_table(dev); 1365 mlx4_cleanup_cq_table(dev); 1366 mlx4_cmd_use_polling(dev); 1367 mlx4_cleanup_eq_table(dev); 1368 mlx4_cleanup_mr_table(dev); 1369 mlx4_cleanup_pd_table(dev); 1370 1371 iounmap(priv->kar); 1372 mlx4_uar_free(dev, &priv->driver_uar); 1373 mlx4_cleanup_uar_table(dev); 1374 mlx4_clear_steering(dev); 1375 mlx4_free_eq_table(dev); 1376 mlx4_close_hca(dev); 1377 mlx4_cmd_cleanup(dev); 1378 1379 if (dev->flags & MLX4_FLAG_MSI_X) 1380 pci_disable_msix(pdev); 1381 1382 kfree(priv); 1383 pci_release_regions(pdev); 1384 pci_disable_device(pdev); 1385 pci_set_drvdata(pdev, NULL); 1386 } 1387} 1388 1389int mlx4_restart_one(struct pci_dev *pdev) 1390{ 1391 mlx4_remove_one(pdev); 1392 return __mlx4_init_one(pdev, NULL); 1393} 1394 1395static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 1396 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ 1397 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ 1398 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */ 1399 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */ 1400 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */ 1401 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */ 1402 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 1403 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 1404 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 1405 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 1406 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 1407 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ 1408 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */ 1409 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */ 1410 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */ 1411 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */ 1412 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */ 1413 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */ 1414 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */ 1415 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */ 1416 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */ 1417 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */ 1418 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */ 1419 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */ 1420 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */ 1421 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */ 1422 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */ 1423 { 0, } 1424}; 1425 1426MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 1427 1428static struct pci_driver mlx4_driver = { 1429 .name = DRV_NAME, 1430 .id_table = mlx4_pci_table, 1431 .probe = mlx4_init_one, 1432 .remove = __devexit_p(mlx4_remove_one) 1433}; 1434 1435static int __init mlx4_verify_params(void) 1436{ 1437 if ((log_num_mac < 0) || (log_num_mac > 7)) { 1438 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 1439 return -1; 1440 } 1441 1442 if ((log_num_vlan < 0) || (log_num_vlan > 7)) { 1443 pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan); 1444 return -1; 1445 } 1446 1447 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 1448 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 1449 return -1; 1450 } 1451 1452 return 0; 1453} 1454 1455static int __init mlx4_init(void) 1456{ 1457 int ret; 1458 1459 if (mlx4_verify_params()) 1460 return -EINVAL; 1461 1462 mlx4_catas_init(); 1463 1464 mlx4_wq = create_singlethread_workqueue("mlx4"); 1465 if (!mlx4_wq) 1466 return -ENOMEM; 1467 1468 ret = pci_register_driver(&mlx4_driver); 1469 return ret < 0 ? ret : 0; 1470} 1471 1472static void __exit mlx4_cleanup(void) 1473{ 1474 pci_unregister_driver(&mlx4_driver); 1475 destroy_workqueue(mlx4_wq); 1476} 1477 1478module_init(mlx4_init); 1479module_exit(mlx4_cleanup);