Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-next'

Eli Cohen says:

====================
mlx5 driver changes related to PCI handling ***

The first of these patches is changing the pci device driver from mlx5_ib to
mlx5_core in a similar manner it is done in mlx4. This set the grounds for us
to introduce Ethernet driver for HW which uses mlx5.

The other two patches contain minor fixes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+525 -339
+24 -24
drivers/infiniband/hw/mlx5/cq.c
··· 180 180 struct mlx5_core_srq *msrq = NULL; 181 181 182 182 if (qp->ibqp.xrcd) { 183 - msrq = mlx5_core_get_srq(&dev->mdev, 183 + msrq = mlx5_core_get_srq(dev->mdev, 184 184 be32_to_cpu(cqe->srqn)); 185 185 srq = to_mibsrq(msrq); 186 186 } else { ··· 348 348 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, 349 349 u16 tail, u16 head) 350 350 { 351 - int idx; 351 + u16 idx; 352 352 353 353 do { 354 354 idx = tail & (qp->sq.wqe_cnt - 1); ··· 364 364 365 365 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 366 366 { 367 - mlx5_buf_free(&dev->mdev, &buf->buf); 367 + mlx5_buf_free(dev->mdev, &buf->buf); 368 368 } 369 369 370 370 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, ··· 450 450 * because CQs will be locked while QPs are removed 451 451 * from the table. 452 452 */ 453 - mqp = __mlx5_qp_lookup(&dev->mdev, qpn); 453 + mqp = __mlx5_qp_lookup(dev->mdev, qpn); 454 454 if (unlikely(!mqp)) { 455 455 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n", 456 456 cq->mcq.cqn, qpn); ··· 514 514 case MLX5_CQE_SIG_ERR: 515 515 sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; 516 516 517 - read_lock(&dev->mdev.priv.mr_table.lock); 518 - mmr = __mlx5_mr_lookup(&dev->mdev, 517 + read_lock(&dev->mdev->priv.mr_table.lock); 518 + mmr = __mlx5_mr_lookup(dev->mdev, 519 519 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); 520 520 if (unlikely(!mmr)) { 521 - read_unlock(&dev->mdev.priv.mr_table.lock); 521 + read_unlock(&dev->mdev->priv.mr_table.lock); 522 522 mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", 523 523 cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); 524 524 return -EINVAL; ··· 536 536 mr->sig->err_item.expected, 537 537 mr->sig->err_item.actual); 538 538 539 - read_unlock(&dev->mdev.priv.mr_table.lock); 539 + read_unlock(&dev->mdev->priv.mr_table.lock); 540 540 goto repoll; 541 541 } 542 542 ··· 575 575 mlx5_cq_arm(&to_mcq(ibcq)->mcq, 576 576 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 577 577 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, 578 - to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map, 579 - MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock)); 578 + to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map, 579 + MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock)); 580 580 581 581 return 0; 582 582 } ··· 586 586 { 587 587 int err; 588 588 589 - err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size, 589 + err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, 590 590 PAGE_SIZE * 2, &buf->buf); 591 591 if (err) 592 592 return err; ··· 691 691 { 692 692 int err; 693 693 694 - err = mlx5_db_alloc(&dev->mdev, &cq->db); 694 + err = mlx5_db_alloc(dev->mdev, &cq->db); 695 695 if (err) 696 696 return err; 697 697 ··· 716 716 mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas); 717 717 718 718 (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 719 - *index = dev->mdev.priv.uuari.uars[0].index; 719 + *index = dev->mdev->priv.uuari.uars[0].index; 720 720 721 721 return 0; 722 722 ··· 724 724 free_cq_buf(dev, &cq->buf); 725 725 726 726 err_db: 727 - mlx5_db_free(&dev->mdev, &cq->db); 727 + mlx5_db_free(dev->mdev, &cq->db); 728 728 return err; 729 729 } 730 730 731 731 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) 732 732 { 733 733 free_cq_buf(dev, &cq->buf); 734 - mlx5_db_free(&dev->mdev, &cq->db); 734 + mlx5_db_free(dev->mdev, &cq->db); 735 735 } 736 736 737 737 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, ··· 752 752 return ERR_PTR(-EINVAL); 753 753 754 754 entries = roundup_pow_of_two(entries + 1); 755 - if (entries > dev->mdev.caps.max_cqes) 755 + if (entries > dev->mdev->caps.max_cqes) 756 756 return ERR_PTR(-EINVAL); 757 757 758 758 cq = kzalloc(sizeof(*cq), GFP_KERNEL); ··· 789 789 cqb->ctx.c_eqn = cpu_to_be16(eqn); 790 790 cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma); 791 791 792 - err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen); 792 + err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen); 793 793 if (err) 794 794 goto err_cqb; 795 795 ··· 809 809 return &cq->ibcq; 810 810 811 811 err_cmd: 812 - mlx5_core_destroy_cq(&dev->mdev, &cq->mcq); 812 + mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 813 813 814 814 err_cqb: 815 815 mlx5_vfree(cqb); ··· 834 834 if (cq->uobject) 835 835 context = cq->uobject->context; 836 836 837 - mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq); 837 + mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); 838 838 if (context) 839 839 destroy_cq_user(mcq, context); 840 840 else ··· 919 919 int err; 920 920 u32 fsel; 921 921 922 - if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) 922 + if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) 923 923 return -ENOSYS; 924 924 925 925 in = kzalloc(sizeof(*in), GFP_KERNEL); ··· 931 931 in->ctx.cq_period = cpu_to_be16(cq_period); 932 932 in->ctx.cq_max_count = cpu_to_be16(cq_count); 933 933 in->field_select = cpu_to_be32(fsel); 934 - err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in)); 934 + err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in)); 935 935 kfree(in); 936 936 937 937 if (err) ··· 1074 1074 int uninitialized_var(cqe_size); 1075 1075 unsigned long flags; 1076 1076 1077 - if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { 1077 + if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { 1078 1078 pr_info("Firmware does not support resize CQ\n"); 1079 1079 return -ENOSYS; 1080 1080 } ··· 1083 1083 return -EINVAL; 1084 1084 1085 1085 entries = roundup_pow_of_two(entries + 1); 1086 - if (entries > dev->mdev.caps.max_cqes + 1) 1086 + if (entries > dev->mdev->caps.max_cqes + 1) 1087 1087 return -EINVAL; 1088 1088 1089 1089 if (entries == ibcq->cqe + 1) ··· 1128 1128 in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE); 1129 1129 in->cqn = cpu_to_be32(cq->mcq.cqn); 1130 1130 1131 - err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen); 1131 + err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); 1132 1132 if (err) 1133 1133 goto ex_alloc; 1134 1134
+3 -3
drivers/infiniband/hw/mlx5/mad.c
··· 41 41 }; 42 42 43 43 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 44 - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 44 + u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, 45 45 void *in_mad, void *response_mad) 46 46 { 47 47 u8 op_modifier = 0; ··· 54 54 if (ignore_bkey || !in_wc) 55 55 op_modifier |= 0x2; 56 56 57 - return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port); 57 + return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); 58 58 } 59 59 60 60 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, ··· 129 129 130 130 packet_error = be16_to_cpu(out_mad->status); 131 131 132 - dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ? 132 + dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ? 133 133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 134 134 135 135 out:
+98 -197
drivers/infiniband/hw/mlx5/main.c
··· 54 54 MODULE_LICENSE("Dual BSD/GPL"); 55 55 MODULE_VERSION(DRIVER_VERSION); 56 56 57 - static int prof_sel = 2; 58 - module_param_named(prof_sel, prof_sel, int, 0444); 59 - MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); 57 + static int deprecated_prof_sel = 2; 58 + module_param_named(prof_sel, deprecated_prof_sel, int, 0444); 59 + MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); 60 60 61 61 static char mlx5_version[] = 62 62 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 63 63 DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 64 64 65 - static struct mlx5_profile profile[] = { 66 - [0] = { 67 - .mask = 0, 68 - }, 69 - [1] = { 70 - .mask = MLX5_PROF_MASK_QP_SIZE, 71 - .log_max_qp = 12, 72 - }, 73 - [2] = { 74 - .mask = MLX5_PROF_MASK_QP_SIZE | 75 - MLX5_PROF_MASK_MR_CACHE, 76 - .log_max_qp = 17, 77 - .mr_cache[0] = { 78 - .size = 500, 79 - .limit = 250 80 - }, 81 - .mr_cache[1] = { 82 - .size = 500, 83 - .limit = 250 84 - }, 85 - .mr_cache[2] = { 86 - .size = 500, 87 - .limit = 250 88 - }, 89 - .mr_cache[3] = { 90 - .size = 500, 91 - .limit = 250 92 - }, 93 - .mr_cache[4] = { 94 - .size = 500, 95 - .limit = 250 96 - }, 97 - .mr_cache[5] = { 98 - .size = 500, 99 - .limit = 250 100 - }, 101 - .mr_cache[6] = { 102 - .size = 500, 103 - .limit = 250 104 - }, 105 - .mr_cache[7] = { 106 - .size = 500, 107 - .limit = 250 108 - }, 109 - .mr_cache[8] = { 110 - .size = 500, 111 - .limit = 250 112 - }, 113 - .mr_cache[9] = { 114 - .size = 500, 115 - .limit = 250 116 - }, 117 - .mr_cache[10] = { 118 - .size = 500, 119 - .limit = 250 120 - }, 121 - .mr_cache[11] = { 122 - .size = 500, 123 - .limit = 250 124 - }, 125 - .mr_cache[12] = { 126 - .size = 64, 127 - .limit = 32 128 - }, 129 - .mr_cache[13] = { 130 - .size = 32, 131 - .limit = 16 132 - }, 133 - .mr_cache[14] = { 134 - .size = 16, 135 - .limit = 8 136 - }, 137 - .mr_cache[15] = { 138 - .size = 8, 139 - .limit = 4 140 - }, 141 - }, 142 - }; 143 - 144 65 int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) 145 66 { 146 - struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 67 + struct mlx5_eq_table *table = &dev->mdev->priv.eq_table; 147 68 struct mlx5_eq *eq, *n; 148 69 int err = -ENOENT; 149 70 ··· 84 163 85 164 static int alloc_comp_eqs(struct mlx5_ib_dev *dev) 86 165 { 87 - struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 166 + struct mlx5_eq_table *table = &dev->mdev->priv.eq_table; 88 167 char name[MLX5_MAX_EQ_NAME]; 89 168 struct mlx5_eq *eq, *n; 90 169 int ncomp_vec; ··· 103 182 } 104 183 105 184 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); 106 - err = mlx5_create_map_eq(&dev->mdev, eq, 185 + err = mlx5_create_map_eq(dev->mdev, eq, 107 186 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 108 - name, &dev->mdev.priv.uuari.uars[0]); 187 + name, &dev->mdev->priv.uuari.uars[0]); 109 188 if (err) { 110 189 kfree(eq); 111 190 goto clean; ··· 125 204 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { 126 205 list_del(&eq->list); 127 206 spin_unlock(&table->lock); 128 - if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) 207 + if (mlx5_destroy_unmap_eq(dev->mdev, eq)) 129 208 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); 130 209 kfree(eq); 131 210 spin_lock(&table->lock); ··· 136 215 137 216 static void free_comp_eqs(struct mlx5_ib_dev *dev) 138 217 { 139 - struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 218 + struct mlx5_eq_table *table = &dev->mdev->priv.eq_table; 140 219 struct mlx5_eq *eq, *n; 141 220 142 221 spin_lock(&table->lock); 143 222 list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { 144 223 list_del(&eq->list); 145 224 spin_unlock(&table->lock); 146 - if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) 225 + if (mlx5_destroy_unmap_eq(dev->mdev, eq)) 147 226 mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); 148 227 kfree(eq); 149 228 spin_lock(&table->lock); ··· 176 255 177 256 memset(props, 0, sizeof(*props)); 178 257 179 - props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) | 180 - (fw_rev_min(&dev->mdev) << 16) | 181 - fw_rev_sub(&dev->mdev); 258 + props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 259 + (fw_rev_min(dev->mdev) << 16) | 260 + fw_rev_sub(dev->mdev); 182 261 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 183 262 IB_DEVICE_PORT_ACTIVE_EVENT | 184 263 IB_DEVICE_SYS_IMAGE_GUID | 185 264 IB_DEVICE_RC_RNR_NAK_GEN; 186 - flags = dev->mdev.caps.flags; 265 + flags = dev->mdev->caps.flags; 187 266 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 188 267 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 189 268 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) ··· 213 292 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 214 293 215 294 props->max_mr_size = ~0ull; 216 - props->page_size_cap = dev->mdev.caps.min_page_sz; 217 - props->max_qp = 1 << dev->mdev.caps.log_max_qp; 218 - props->max_qp_wr = dev->mdev.caps.max_wqes; 219 - max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); 220 - max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / 295 + props->page_size_cap = dev->mdev->caps.min_page_sz; 296 + props->max_qp = 1 << dev->mdev->caps.log_max_qp; 297 + props->max_qp_wr = dev->mdev->caps.max_wqes; 298 + max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); 299 + max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / 221 300 sizeof(struct mlx5_wqe_data_seg); 222 301 props->max_sge = min(max_rq_sg, max_sq_sg); 223 - props->max_cq = 1 << dev->mdev.caps.log_max_cq; 224 - props->max_cqe = dev->mdev.caps.max_cqes - 1; 225 - props->max_mr = 1 << dev->mdev.caps.log_max_mkey; 226 - props->max_pd = 1 << dev->mdev.caps.log_max_pd; 227 - props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp; 228 - props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp; 302 + props->max_cq = 1 << dev->mdev->caps.log_max_cq; 303 + props->max_cqe = dev->mdev->caps.max_cqes - 1; 304 + props->max_mr = 1 << dev->mdev->caps.log_max_mkey; 305 + props->max_pd = 1 << dev->mdev->caps.log_max_pd; 306 + props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp; 307 + props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp; 229 308 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 230 - props->max_srq = 1 << dev->mdev.caps.log_max_srq; 231 - props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1; 309 + props->max_srq = 1 << dev->mdev->caps.log_max_srq; 310 + props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1; 232 311 props->max_srq_sge = max_rq_sg - 1; 233 312 props->max_fast_reg_page_list_len = (unsigned int)-1; 234 - props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; 313 + props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay; 235 314 props->atomic_cap = IB_ATOMIC_NONE; 236 315 props->masked_atomic_cap = IB_ATOMIC_NONE; 237 316 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 238 - props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; 239 - props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; 317 + props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg; 318 + props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg; 240 319 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 241 320 props->max_mcast_grp; 242 321 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ ··· 257 336 int ext_active_speed; 258 337 int err = -ENOMEM; 259 338 260 - if (port < 1 || port > dev->mdev.caps.num_ports) { 339 + if (port < 1 || port > dev->mdev->caps.num_ports) { 261 340 mlx5_ib_warn(dev, "invalid port number %d\n", port); 262 341 return -EINVAL; 263 342 } ··· 288 367 props->phys_state = out_mad->data[33] >> 4; 289 368 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 290 369 props->gid_tbl_len = out_mad->data[50]; 291 - props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg; 292 - props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len; 370 + props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg; 371 + props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len; 293 372 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 294 373 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 295 374 props->active_width = out_mad->data[31] & 0xf; ··· 316 395 317 396 /* If reported active speed is QDR, check if is FDR-10 */ 318 397 if (props->active_speed == 4) { 319 - if (dev->mdev.caps.ext_port_cap[port - 1] & 398 + if (dev->mdev->caps.ext_port_cap[port - 1] & 320 399 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 321 400 init_query_mad(in_mad); 322 401 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; ··· 429 508 * a 144 trap. If cmd fails, just ignore. 430 509 */ 431 510 memcpy(&in, props->node_desc, 64); 432 - err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out, 511 + err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 433 512 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 434 513 if (err) 435 514 return err; ··· 456 535 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 457 536 ~props->clr_port_cap_mask; 458 537 459 - err = mlx5_set_port_caps(&dev->mdev, port, tmp); 538 + err = mlx5_set_port_caps(dev->mdev, port, tmp); 460 539 461 540 out: 462 541 mutex_unlock(&dev->cap_mask_mutex); ··· 478 557 int uuarn; 479 558 int err; 480 559 int i; 481 - int reqlen; 560 + size_t reqlen; 482 561 483 562 if (!dev->ib_active) 484 563 return ERR_PTR(-EAGAIN); ··· 512 591 513 592 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 514 593 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 515 - resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; 516 - resp.bf_reg_size = dev->mdev.caps.bf_reg_size; 594 + resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp; 595 + resp.bf_reg_size = dev->mdev->caps.bf_reg_size; 517 596 resp.cache_line_size = L1_CACHE_BYTES; 518 - resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz; 519 - resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz; 520 - resp.max_send_wqebb = dev->mdev.caps.max_wqes; 521 - resp.max_recv_wr = dev->mdev.caps.max_wqes; 522 - resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes; 597 + resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz; 598 + resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz; 599 + resp.max_send_wqebb = dev->mdev->caps.max_wqes; 600 + resp.max_recv_wr = dev->mdev->caps.max_wqes; 601 + resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes; 523 602 524 603 context = kzalloc(sizeof(*context), GFP_KERNEL); 525 604 if (!context) ··· 556 635 } 557 636 558 637 for (i = 0; i < num_uars; i++) { 559 - err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index); 638 + err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); 560 639 if (err) 561 640 goto out_count; 562 641 } ··· 565 644 mutex_init(&context->db_page_mutex); 566 645 567 646 resp.tot_uuars = req.total_num_uuars; 568 - resp.num_ports = dev->mdev.caps.num_ports; 647 + resp.num_ports = dev->mdev->caps.num_ports; 569 648 err = ib_copy_to_udata(udata, &resp, 570 649 sizeof(resp) - sizeof(resp.reserved)); 571 650 if (err) ··· 579 658 580 659 out_uars: 581 660 for (i--; i >= 0; i--) 582 - mlx5_cmd_free_uar(&dev->mdev, uars[i].index); 661 + mlx5_cmd_free_uar(dev->mdev, uars[i].index); 583 662 out_count: 584 663 kfree(uuari->count); 585 664 ··· 602 681 int i; 603 682 604 683 for (i = 0; i < uuari->num_uars; i++) { 605 - if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index)) 684 + if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) 606 685 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 607 686 } 608 687 ··· 616 695 617 696 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 618 697 { 619 - return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index; 698 + return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; 620 699 } 621 700 622 701 static int get_command(unsigned long offset) ··· 694 773 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 695 774 seg->start_addr = 0; 696 775 697 - err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in), 776 + err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in), 698 777 NULL, NULL, NULL); 699 778 if (err) { 700 779 mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); ··· 719 798 720 799 memset(&mr, 0, sizeof(mr)); 721 800 mr.key = key; 722 - err = mlx5_core_destroy_mkey(&dev->mdev, &mr); 801 + err = mlx5_core_destroy_mkey(dev->mdev, &mr); 723 802 if (err) 724 803 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); 725 804 } ··· 736 815 if (!pd) 737 816 return ERR_PTR(-ENOMEM); 738 817 739 - err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn); 818 + err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 740 819 if (err) { 741 820 kfree(pd); 742 821 return ERR_PTR(err); ··· 745 824 if (context) { 746 825 resp.pdn = pd->pdn; 747 826 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 748 - mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); 827 + mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 749 828 kfree(pd); 750 829 return ERR_PTR(-EFAULT); 751 830 } 752 831 } else { 753 832 err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); 754 833 if (err) { 755 - mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); 834 + mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 756 835 kfree(pd); 757 836 return ERR_PTR(err); 758 837 } ··· 769 848 if (!pd->uobject) 770 849 free_pa_mkey(mdev, mpd->pa_lkey); 771 850 772 - mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn); 851 + mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 773 852 kfree(mpd); 774 853 775 854 return 0; ··· 780 859 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 781 860 int err; 782 861 783 - err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num); 862 + err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 784 863 if (err) 785 864 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 786 865 ibqp->qp_num, gid->raw); ··· 793 872 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 794 873 int err; 795 874 796 - err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num); 875 + err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 797 876 if (err) 798 877 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 799 878 ibqp->qp_num, gid->raw); ··· 827 906 if (err) 828 907 goto out; 829 908 830 - dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); 909 + dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); 831 910 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); 832 911 833 912 out: ··· 842 921 struct mlx5_ib_dev *dev = 843 922 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 844 923 845 - return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages); 924 + return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages); 846 925 } 847 926 848 927 static ssize_t show_reg_pages(struct device *device, ··· 851 930 struct mlx5_ib_dev *dev = 852 931 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 853 932 854 - return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages); 933 + return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages); 855 934 } 856 935 857 936 static ssize_t show_hca(struct device *device, struct device_attribute *attr, ··· 859 938 { 860 939 struct mlx5_ib_dev *dev = 861 940 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 862 - return sprintf(buf, "MT%d\n", dev->mdev.pdev->device); 941 + return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 863 942 } 864 943 865 944 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, ··· 867 946 { 868 947 struct mlx5_ib_dev *dev = 869 948 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 870 - return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev), 871 - fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev)); 949 + return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), 950 + fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 872 951 } 873 952 874 953 static ssize_t show_rev(struct device *device, struct device_attribute *attr, ··· 876 955 { 877 956 struct mlx5_ib_dev *dev = 878 957 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 879 - return sprintf(buf, "%x\n", dev->mdev.rev_id); 958 + return sprintf(buf, "%x\n", dev->mdev->rev_id); 880 959 } 881 960 882 961 static ssize_t show_board(struct device *device, struct device_attribute *attr, ··· 885 964 struct mlx5_ib_dev *dev = 886 965 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 887 966 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 888 - dev->mdev.board_id); 967 + dev->mdev->board_id); 889 968 } 890 969 891 970 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); ··· 904 983 &dev_attr_reg_pages, 905 984 }; 906 985 907 - static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 908 - void *data) 986 + static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 987 + enum mlx5_dev_event event, unsigned long param) 909 988 { 910 - struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev); 989 + struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 911 990 struct ib_event ibev; 991 + 912 992 u8 port = 0; 913 993 914 994 switch (event) { ··· 920 998 921 999 case MLX5_DEV_EVENT_PORT_UP: 922 1000 ibev.event = IB_EVENT_PORT_ACTIVE; 923 - port = *(u8 *)data; 1001 + port = (u8)param; 924 1002 break; 925 1003 926 1004 case MLX5_DEV_EVENT_PORT_DOWN: 927 1005 ibev.event = IB_EVENT_PORT_ERR; 928 - port = *(u8 *)data; 1006 + port = (u8)param; 929 1007 break; 930 1008 931 1009 case MLX5_DEV_EVENT_PORT_INITIALIZED: ··· 934 1012 935 1013 case MLX5_DEV_EVENT_LID_CHANGE: 936 1014 ibev.event = IB_EVENT_LID_CHANGE; 937 - port = *(u8 *)data; 1015 + port = (u8)param; 938 1016 break; 939 1017 940 1018 case MLX5_DEV_EVENT_PKEY_CHANGE: 941 1019 ibev.event = IB_EVENT_PKEY_CHANGE; 942 - port = *(u8 *)data; 1020 + port = (u8)param; 943 1021 break; 944 1022 945 1023 case MLX5_DEV_EVENT_GUID_CHANGE: 946 1024 ibev.event = IB_EVENT_GID_CHANGE; 947 - port = *(u8 *)data; 1025 + port = (u8)param; 948 1026 break; 949 1027 950 1028 case MLX5_DEV_EVENT_CLIENT_REREG: 951 1029 ibev.event = IB_EVENT_CLIENT_REREGISTER; 952 - port = *(u8 *)data; 1030 + port = (u8)param; 953 1031 break; 954 1032 } 955 1033 ··· 969 1047 { 970 1048 int port; 971 1049 972 - for (port = 1; port <= dev->mdev.caps.num_ports; port++) 1050 + for (port = 1; port <= dev->mdev->caps.num_ports; port++) 973 1051 mlx5_query_ext_port_caps(dev, port); 974 1052 } 975 1053 ··· 994 1072 goto out; 995 1073 } 996 1074 997 - for (port = 1; port <= dev->mdev.caps.num_ports; port++) { 1075 + for (port = 1; port <= dev->mdev->caps.num_ports; port++) { 998 1076 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 999 1077 if (err) { 1000 1078 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); 1001 1079 break; 1002 1080 } 1003 - dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys; 1004 - dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; 1081 + dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; 1082 + dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; 1005 1083 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 1006 1084 dprops->max_pkeys, pprops->gid_tbl_len); 1007 1085 } ··· 1250 1328 mlx5_ib_dealloc_pd(devr->p0); 1251 1329 } 1252 1330 1253 - static int init_one(struct pci_dev *pdev, 1254 - const struct pci_device_id *id) 1331 + static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 1255 1332 { 1256 - struct mlx5_core_dev *mdev; 1257 1333 struct mlx5_ib_dev *dev; 1258 1334 int err; 1259 1335 int i; ··· 1260 1340 1261 1341 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 1262 1342 if (!dev) 1263 - return -ENOMEM; 1343 + return NULL; 1264 1344 1265 - mdev = &dev->mdev; 1266 - mdev->event = mlx5_ib_event; 1267 - if (prof_sel >= ARRAY_SIZE(profile)) { 1268 - pr_warn("selected pofile out of range, selceting default\n"); 1269 - prof_sel = 0; 1270 - } 1271 - mdev->profile = &profile[prof_sel]; 1272 - err = mlx5_dev_init(mdev, pdev); 1273 - if (err) 1274 - goto err_free; 1345 + dev->mdev = mdev; 1275 1346 1276 1347 err = get_port_caps(dev); 1277 1348 if (err) 1278 - goto err_cleanup; 1349 + goto err_dealloc; 1279 1350 1280 1351 get_ext_port_caps(dev); 1281 1352 1282 1353 err = alloc_comp_eqs(dev); 1283 1354 if (err) 1284 - goto err_cleanup; 1355 + goto err_dealloc; 1285 1356 1286 1357 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 1287 1358 ··· 1391 1480 1392 1481 dev->ib_active = true; 1393 1482 1394 - return 0; 1483 + return dev; 1395 1484 1396 1485 err_umrc: 1397 1486 destroy_umrc_res(dev); ··· 1405 1494 err_eqs: 1406 1495 free_comp_eqs(dev); 1407 1496 1408 - err_cleanup: 1409 - mlx5_dev_cleanup(mdev); 1410 - 1411 - err_free: 1497 + err_dealloc: 1412 1498 ib_dealloc_device((struct ib_device *)dev); 1413 1499 1414 - return err; 1500 + return NULL; 1415 1501 } 1416 1502 1417 - static void remove_one(struct pci_dev *pdev) 1503 + static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 1418 1504 { 1419 - struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev); 1420 - 1505 + struct mlx5_ib_dev *dev = context; 1421 1506 destroy_umrc_res(dev); 1422 1507 ib_unregister_device(&dev->ib_dev); 1423 1508 destroy_dev_resources(&dev->devr); 1424 1509 free_comp_eqs(dev); 1425 - mlx5_dev_cleanup(&dev->mdev); 1426 1510 ib_dealloc_device(&dev->ib_dev); 1427 1511 } 1428 1512 1429 - static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = { 1430 - { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ 1431 - { 0, } 1432 - }; 1433 - 1434 - MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table); 1435 - 1436 - static struct pci_driver mlx5_ib_driver = { 1437 - .name = DRIVER_NAME, 1438 - .id_table = mlx5_ib_pci_table, 1439 - .probe = init_one, 1440 - .remove = remove_one 1513 + static struct mlx5_interface mlx5_ib_interface = { 1514 + .add = mlx5_ib_add, 1515 + .remove = mlx5_ib_remove, 1516 + .event = mlx5_ib_event, 1441 1517 }; 1442 1518 1443 1519 static int __init mlx5_ib_init(void) 1444 1520 { 1445 - return pci_register_driver(&mlx5_ib_driver); 1521 + if (deprecated_prof_sel != 2) 1522 + pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); 1523 + 1524 + return mlx5_register_interface(&mlx5_ib_interface); 1446 1525 } 1447 1526 1448 1527 static void __exit mlx5_ib_cleanup(void) 1449 1528 { 1450 - pci_unregister_driver(&mlx5_ib_driver); 1529 + mlx5_unregister_interface(&mlx5_ib_interface); 1451 1530 } 1452 1531 1453 1532 module_init(mlx5_ib_init);
+1 -1
drivers/infiniband/hw/mlx5/mem.c
··· 148 148 u64 off_mask; 149 149 u64 buf_off; 150 150 151 - page_size = 1 << page_shift; 151 + page_size = (u64)1 << page_shift; 152 152 page_mask = page_size - 1; 153 153 buf_off = addr & page_mask; 154 154 off_size = page_size >> 6;
+2 -12
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 360 360 361 361 struct mlx5_ib_dev { 362 362 struct ib_device ib_dev; 363 - struct mlx5_core_dev mdev; 363 + struct mlx5_core_dev *mdev; 364 364 MLX5_DECLARE_DOORBELL_LOCK(uar_lock); 365 365 struct list_head eqs_list; 366 366 int num_ports; ··· 454 454 return container_of(ibah, struct mlx5_ib_ah, ibah); 455 455 } 456 456 457 - static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev) 458 - { 459 - return container_of(dev, struct mlx5_ib_dev, mdev); 460 - } 461 - 462 - static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev) 463 - { 464 - return mlx5_core2ibdev(pci2mlx5_core_dev(pdev)); 465 - } 466 - 467 457 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt, 468 458 struct mlx5_db *db); 469 459 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db); ··· 461 471 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq); 462 472 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index); 463 473 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, 464 - int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 474 + u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh, 465 475 void *in_mad, void *response_mad); 466 476 struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr, 467 477 struct mlx5_ib_ah *ah);
+24 -24
drivers/infiniband/hw/mlx5/mr.c
··· 73 73 struct mlx5_cache_ent *ent = &cache->ent[c]; 74 74 u8 key; 75 75 unsigned long flags; 76 - struct mlx5_mr_table *table = &dev->mdev.priv.mr_table; 76 + struct mlx5_mr_table *table = &dev->mdev->priv.mr_table; 77 77 int err; 78 78 79 79 spin_lock_irqsave(&ent->lock, flags); ··· 97 97 return; 98 98 } 99 99 100 - spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags); 101 - key = dev->mdev.priv.mkey_key++; 102 - spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags); 100 + spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); 101 + key = dev->mdev->priv.mkey_key++; 102 + spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); 103 103 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; 104 104 105 105 cache->last_add = jiffies; ··· 155 155 spin_lock_irq(&ent->lock); 156 156 ent->pending++; 157 157 spin_unlock_irq(&ent->lock); 158 - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, 158 + err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, 159 159 sizeof(*in), reg_mr_callback, 160 160 mr, &mr->out); 161 161 if (err) { ··· 188 188 ent->cur--; 189 189 ent->size--; 190 190 spin_unlock_irq(&ent->lock); 191 - err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 191 + err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); 192 192 if (err) 193 193 mlx5_ib_warn(dev, "failed destroy mkey\n"); 194 194 else ··· 479 479 ent->cur--; 480 480 ent->size--; 481 481 spin_unlock_irq(&ent->lock); 482 - err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 482 + err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); 483 483 if (err) 484 484 mlx5_ib_warn(dev, "failed destroy mkey\n"); 485 485 else ··· 496 496 if (!mlx5_debugfs_root) 497 497 return 0; 498 498 499 - cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root); 499 + cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); 500 500 if (!cache->root) 501 501 return -ENOMEM; 502 502 ··· 571 571 ent->order = i + 2; 572 572 ent->dev = dev; 573 573 574 - if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) 575 - limit = dev->mdev.profile->mr_cache[i].limit; 574 + if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) 575 + limit = dev->mdev->profile->mr_cache[i].limit; 576 576 else 577 577 limit = 0; 578 578 ··· 610 610 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) 611 611 { 612 612 struct mlx5_ib_dev *dev = to_mdev(pd->device); 613 - struct mlx5_core_dev *mdev = &dev->mdev; 613 + struct mlx5_core_dev *mdev = dev->mdev; 614 614 struct mlx5_create_mkey_mbox_in *in; 615 615 struct mlx5_mkey_seg *seg; 616 616 struct mlx5_ib_mr *mr; ··· 846 846 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 847 847 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 848 848 1 << page_shift)); 849 - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL, 849 + err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL, 850 850 NULL, NULL); 851 851 if (err) { 852 852 mlx5_ib_warn(dev, "create mkey failed\n"); ··· 923 923 mr->umem = umem; 924 924 mr->npages = npages; 925 925 spin_lock(&dev->mr_lock); 926 - dev->mdev.priv.reg_pages += npages; 926 + dev->mdev->priv.reg_pages += npages; 927 927 spin_unlock(&dev->mr_lock); 928 928 mr->ibmr.lkey = mr->mmr.key; 929 929 mr->ibmr.rkey = mr->mmr.key; ··· 978 978 int err; 979 979 980 980 if (!umred) { 981 - err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 981 + err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); 982 982 if (err) { 983 983 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 984 984 mr->mmr.key, err); ··· 996 996 if (umem) { 997 997 ib_umem_release(umem); 998 998 spin_lock(&dev->mr_lock); 999 - dev->mdev.priv.reg_pages -= npages; 999 + dev->mdev->priv.reg_pages -= npages; 1000 1000 spin_unlock(&dev->mr_lock); 1001 1001 } 1002 1002 ··· 1044 1044 } 1045 1045 1046 1046 /* create mem & wire PSVs */ 1047 - err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn, 1047 + err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 1048 1048 2, psv_index); 1049 1049 if (err) 1050 1050 goto err_free_sig; ··· 1060 1060 } 1061 1061 1062 1062 in->seg.flags = MLX5_PERM_UMR_EN | access_mode; 1063 - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), 1063 + err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), 1064 1064 NULL, NULL, NULL); 1065 1065 if (err) 1066 1066 goto err_destroy_psv; ··· 1074 1074 1075 1075 err_destroy_psv: 1076 1076 if (mr->sig) { 1077 - if (mlx5_core_destroy_psv(&dev->mdev, 1077 + if (mlx5_core_destroy_psv(dev->mdev, 1078 1078 mr->sig->psv_memory.psv_idx)) 1079 1079 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1080 1080 mr->sig->psv_memory.psv_idx); 1081 - if (mlx5_core_destroy_psv(&dev->mdev, 1081 + if (mlx5_core_destroy_psv(dev->mdev, 1082 1082 mr->sig->psv_wire.psv_idx)) 1083 1083 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1084 1084 mr->sig->psv_wire.psv_idx); ··· 1099 1099 int err; 1100 1100 1101 1101 if (mr->sig) { 1102 - if (mlx5_core_destroy_psv(&dev->mdev, 1102 + if (mlx5_core_destroy_psv(dev->mdev, 1103 1103 mr->sig->psv_memory.psv_idx)) 1104 1104 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n", 1105 1105 mr->sig->psv_memory.psv_idx); 1106 - if (mlx5_core_destroy_psv(&dev->mdev, 1106 + if (mlx5_core_destroy_psv(dev->mdev, 1107 1107 mr->sig->psv_wire.psv_idx)) 1108 1108 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", 1109 1109 mr->sig->psv_wire.psv_idx); 1110 1110 kfree(mr->sig); 1111 1111 } 1112 1112 1113 - err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 1113 + err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); 1114 1114 if (err) { 1115 1115 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1116 1116 mr->mmr.key, err); ··· 1149 1149 * TBD not needed - issue 197292 */ 1150 1150 in->seg.log2_page_size = PAGE_SHIFT; 1151 1151 1152 - err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL, 1152 + err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL, 1153 1153 NULL, NULL); 1154 1154 kfree(in); 1155 1155 if (err) ··· 1202 1202 struct mlx5_ib_dev *dev = to_mdev(page_list->device); 1203 1203 int size = page_list->max_page_list_len * sizeof(u64); 1204 1204 1205 - dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list, 1205 + dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list, 1206 1206 mfrpl->map); 1207 1207 kfree(mfrpl->ibfrpl.page_list); 1208 1208 kfree(mfrpl);
+44 -44
drivers/infiniband/hw/mlx5/qp.c
··· 162 162 int wq_size; 163 163 164 164 /* Sanity check RQ size before proceeding */ 165 - if (cap->max_recv_wr > dev->mdev.caps.max_wqes) 165 + if (cap->max_recv_wr > dev->mdev->caps.max_wqes) 166 166 return -EINVAL; 167 167 168 168 if (!has_rq) { ··· 182 182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 183 183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 184 184 qp->rq.wqe_cnt = wq_size / wqe_size; 185 - if (wqe_size > dev->mdev.caps.max_rq_desc_sz) { 185 + if (wqe_size > dev->mdev->caps.max_rq_desc_sz) { 186 186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 187 187 wqe_size, 188 - dev->mdev.caps.max_rq_desc_sz); 188 + dev->mdev->caps.max_rq_desc_sz); 189 189 return -EINVAL; 190 190 } 191 191 qp->rq.wqe_shift = ilog2(wqe_size); ··· 277 277 if (wqe_size < 0) 278 278 return wqe_size; 279 279 280 - if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { 280 + if (wqe_size > dev->mdev->caps.max_sq_desc_sz) { 281 281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 282 - wqe_size, dev->mdev.caps.max_sq_desc_sz); 282 + wqe_size, dev->mdev->caps.max_sq_desc_sz); 283 283 return -EINVAL; 284 284 } 285 285 ··· 292 292 293 293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 294 294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 295 - if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { 295 + if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { 296 296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 297 - qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); 297 + qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); 298 298 return -ENOMEM; 299 299 } 300 300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); ··· 311 311 { 312 312 int desc_sz = 1 << qp->sq.wqe_shift; 313 313 314 - if (desc_sz > dev->mdev.caps.max_sq_desc_sz) { 314 + if (desc_sz > dev->mdev->caps.max_sq_desc_sz) { 315 315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 316 - desc_sz, dev->mdev.caps.max_sq_desc_sz); 316 + desc_sz, dev->mdev->caps.max_sq_desc_sz); 317 317 return -EINVAL; 318 318 } 319 319 ··· 325 325 326 326 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 327 327 328 - if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) { 328 + if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { 329 329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 330 - qp->sq.wqe_cnt, dev->mdev.caps.max_wqes); 330 + qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); 331 331 return -EINVAL; 332 332 } 333 333 ··· 674 674 int uuarn; 675 675 int err; 676 676 677 - uuari = &dev->mdev.priv.uuari; 677 + uuari = &dev->mdev->priv.uuari; 678 678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) 679 679 return -EINVAL; 680 680 ··· 700 700 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 701 701 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 702 702 703 - err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); 703 + err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); 704 704 if (err) { 705 705 mlx5_ib_dbg(dev, "err %d\n", err); 706 706 goto err_uuar; ··· 722 722 723 723 mlx5_fill_page_array(&qp->buf, (*in)->pas); 724 724 725 - err = mlx5_db_alloc(&dev->mdev, &qp->db); 725 + err = mlx5_db_alloc(dev->mdev, &qp->db); 726 726 if (err) { 727 727 mlx5_ib_dbg(dev, "err %d\n", err); 728 728 goto err_free; ··· 747 747 return 0; 748 748 749 749 err_wrid: 750 - mlx5_db_free(&dev->mdev, &qp->db); 750 + mlx5_db_free(dev->mdev, &qp->db); 751 751 kfree(qp->sq.wqe_head); 752 752 kfree(qp->sq.w_list); 753 753 kfree(qp->sq.wrid); ··· 758 758 mlx5_vfree(*in); 759 759 760 760 err_buf: 761 - mlx5_buf_free(&dev->mdev, &qp->buf); 761 + mlx5_buf_free(dev->mdev, &qp->buf); 762 762 763 763 err_uuar: 764 - free_uuar(&dev->mdev.priv.uuari, uuarn); 764 + free_uuar(&dev->mdev->priv.uuari, uuarn); 765 765 return err; 766 766 } 767 767 768 768 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) 769 769 { 770 - mlx5_db_free(&dev->mdev, &qp->db); 770 + mlx5_db_free(dev->mdev, &qp->db); 771 771 kfree(qp->sq.wqe_head); 772 772 kfree(qp->sq.w_list); 773 773 kfree(qp->sq.wrid); 774 774 kfree(qp->sq.wr_data); 775 775 kfree(qp->rq.wrid); 776 - mlx5_buf_free(&dev->mdev, &qp->buf); 777 - free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn); 776 + mlx5_buf_free(dev->mdev, &qp->buf); 777 + free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn); 778 778 } 779 779 780 780 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) ··· 812 812 spin_lock_init(&qp->rq.lock); 813 813 814 814 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 815 - if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 815 + if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 816 816 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 817 817 return -EINVAL; 818 818 } else { ··· 851 851 mlx5_ib_dbg(dev, "invalid rq params\n"); 852 852 return -EINVAL; 853 853 } 854 - if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) { 854 + if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) { 855 855 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 856 - ucmd.sq_wqe_count, dev->mdev.caps.max_wqes); 856 + ucmd.sq_wqe_count, dev->mdev->caps.max_wqes); 857 857 return -EINVAL; 858 858 } 859 859 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); ··· 957 957 958 958 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma); 959 959 960 - err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen); 960 + err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); 961 961 if (err) { 962 962 mlx5_ib_dbg(dev, "create qp failed\n"); 963 963 goto err_create; ··· 1081 1081 if (!in) 1082 1082 return; 1083 1083 if (qp->state != IB_QPS_RESET) 1084 - if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state), 1084 + if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), 1085 1085 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp)) 1086 1086 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", 1087 1087 qp->mqp.qpn); ··· 1097 1097 mlx5_ib_unlock_cqs(send_cq, recv_cq); 1098 1098 } 1099 1099 1100 - err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp); 1100 + err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); 1101 1101 if (err) 1102 1102 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); 1103 1103 kfree(in); ··· 1165 1165 switch (init_attr->qp_type) { 1166 1166 case IB_QPT_XRC_TGT: 1167 1167 case IB_QPT_XRC_INI: 1168 - if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { 1168 + if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { 1169 1169 mlx5_ib_dbg(dev, "XRC not supported\n"); 1170 1170 return ERR_PTR(-ENOSYS); 1171 1171 } ··· 1279 1279 } else { 1280 1280 while (rate != IB_RATE_2_5_GBPS && 1281 1281 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1282 - dev->mdev.caps.stat_rate_support)) 1282 + dev->mdev->caps.stat_rate_support)) 1283 1283 --rate; 1284 1284 } 1285 1285 ··· 1318 1318 path->port = port; 1319 1319 1320 1320 if (ah->ah_flags & IB_AH_GRH) { 1321 - if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) { 1321 + if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) { 1322 1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1323 - ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len); 1323 + ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len); 1324 1324 return -EINVAL; 1325 1325 } 1326 1326 ··· 1539 1539 err = -EINVAL; 1540 1540 goto out; 1541 1541 } 1542 - context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg; 1542 + context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg; 1543 1543 } 1544 1544 1545 1545 if (attr_mask & IB_QP_DEST_QPN) ··· 1637 1637 optpar = ib_mask_to_mlx5_opt(attr_mask); 1638 1638 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st]; 1639 1639 in->optparam = cpu_to_be32(optpar); 1640 - err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state), 1640 + err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state), 1641 1641 to_mlx5_state(new_state), in, sqd_event, 1642 1642 &qp->mqp); 1643 1643 if (err) ··· 1699 1699 goto out; 1700 1700 1701 1701 if ((attr_mask & IB_QP_PORT) && 1702 - (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports)) 1702 + (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports)) 1703 1703 goto out; 1704 1704 1705 1705 if (attr_mask & IB_QP_PKEY_INDEX) { 1706 1706 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1707 - if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len) 1707 + if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len) 1708 1708 goto out; 1709 1709 } 1710 1710 1711 1711 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1712 - attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp) 1712 + attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp) 1713 1713 goto out; 1714 1714 1715 1715 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1716 - attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp) 1716 + attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp) 1717 1717 goto out; 1718 1718 1719 1719 if (cur_state == new_state && cur_state == IB_QPS_RESET) { ··· 2479 2479 { 2480 2480 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 2481 2481 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2482 - struct mlx5_core_dev *mdev = &dev->mdev; 2482 + struct mlx5_core_dev *mdev = dev->mdev; 2483 2483 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2484 2484 struct mlx5_ib_mr *mr; 2485 2485 struct mlx5_wqe_data_seg *dpseg; ··· 2539 2539 case IB_WR_RDMA_WRITE_WITH_IMM: 2540 2540 set_raddr_seg(seg, wr->wr.rdma.remote_addr, 2541 2541 wr->wr.rdma.rkey); 2542 - seg += sizeof(struct mlx5_wqe_raddr_seg); 2542 + seg += sizeof(struct mlx5_wqe_raddr_seg); 2543 2543 size += sizeof(struct mlx5_wqe_raddr_seg) / 16; 2544 2544 break; 2545 2545 ··· 2668 2668 case IB_QPT_SMI: 2669 2669 case IB_QPT_GSI: 2670 2670 set_datagram_seg(seg, wr); 2671 - seg += sizeof(struct mlx5_wqe_datagram_seg); 2671 + seg += sizeof(struct mlx5_wqe_datagram_seg); 2672 2672 size += sizeof(struct mlx5_wqe_datagram_seg) / 16; 2673 2673 if (unlikely((seg == qend))) 2674 2674 seg = mlx5_get_send_wqe(qp, 0); ··· 2888 2888 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr, 2889 2889 struct mlx5_qp_path *path) 2890 2890 { 2891 - struct mlx5_core_dev *dev = &ibdev->mdev; 2891 + struct mlx5_core_dev *dev = ibdev->mdev; 2892 2892 2893 2893 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); 2894 2894 ib_ah_attr->port_num = path->port; ··· 2931 2931 goto out; 2932 2932 } 2933 2933 context = &outb->ctx; 2934 - err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb)); 2934 + err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); 2935 2935 if (err) 2936 2936 goto out_free; 2937 2937 ··· 3014 3014 struct mlx5_ib_xrcd *xrcd; 3015 3015 int err; 3016 3016 3017 - if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) 3017 + if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) 3018 3018 return ERR_PTR(-ENOSYS); 3019 3019 3020 3020 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 3021 3021 if (!xrcd) 3022 3022 return ERR_PTR(-ENOMEM); 3023 3023 3024 - err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn); 3024 + err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn); 3025 3025 if (err) { 3026 3026 kfree(xrcd); 3027 3027 return ERR_PTR(-ENOMEM); ··· 3036 3036 u32 xrcdn = to_mxrcd(xrcd)->xrcdn; 3037 3037 int err; 3038 3038 3039 - err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn); 3039 + err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn); 3040 3040 if (err) { 3041 3041 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn); 3042 3042 return err;
+13 -13
drivers/infiniband/hw/mlx5/srq.c
··· 159 159 int page_shift; 160 160 int npages; 161 161 162 - err = mlx5_db_alloc(&dev->mdev, &srq->db); 162 + err = mlx5_db_alloc(dev->mdev, &srq->db); 163 163 if (err) { 164 164 mlx5_ib_warn(dev, "alloc dbell rec failed\n"); 165 165 return err; ··· 167 167 168 168 *srq->db.db = 0; 169 169 170 - if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { 170 + if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) { 171 171 mlx5_ib_dbg(dev, "buf alloc failed\n"); 172 172 err = -ENOMEM; 173 173 goto err_db; ··· 212 212 mlx5_vfree(*in); 213 213 214 214 err_buf: 215 - mlx5_buf_free(&dev->mdev, &srq->buf); 215 + mlx5_buf_free(dev->mdev, &srq->buf); 216 216 217 217 err_db: 218 - mlx5_db_free(&dev->mdev, &srq->db); 218 + mlx5_db_free(dev->mdev, &srq->db); 219 219 return err; 220 220 } 221 221 ··· 229 229 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq) 230 230 { 231 231 kfree(srq->wrid); 232 - mlx5_buf_free(&dev->mdev, &srq->buf); 233 - mlx5_db_free(&dev->mdev, &srq->db); 232 + mlx5_buf_free(dev->mdev, &srq->buf); 233 + mlx5_db_free(dev->mdev, &srq->db); 234 234 } 235 235 236 236 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ··· 248 248 u32 flgs, xrcdn; 249 249 250 250 /* Sanity check SRQ size before proceeding */ 251 - if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) { 251 + if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) { 252 252 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 253 253 init_attr->attr.max_wr, 254 - dev->mdev.caps.max_srq_wqes); 254 + dev->mdev->caps.max_srq_wqes); 255 255 return ERR_PTR(-EINVAL); 256 256 } 257 257 ··· 303 303 304 304 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); 305 305 in->ctx.db_record = cpu_to_be64(srq->db.dma); 306 - err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen); 306 + err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen); 307 307 mlx5_vfree(in); 308 308 if (err) { 309 309 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); ··· 327 327 return &srq->ibsrq; 328 328 329 329 err_core: 330 - mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); 330 + mlx5_core_destroy_srq(dev->mdev, &srq->msrq); 331 331 332 332 err_usr_kern_srq: 333 333 if (pd->uobject) ··· 357 357 return -EINVAL; 358 358 359 359 mutex_lock(&srq->mutex); 360 - ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1); 360 + ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); 361 361 mutex_unlock(&srq->mutex); 362 362 363 363 if (ret) ··· 378 378 if (!out) 379 379 return -ENOMEM; 380 380 381 - ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out); 381 + ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); 382 382 if (ret) 383 383 goto out_box; 384 384 ··· 396 396 struct mlx5_ib_dev *dev = to_mdev(srq->device); 397 397 struct mlx5_ib_srq *msrq = to_msrq(srq); 398 398 399 - mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq); 399 + mlx5_core_destroy_srq(dev->mdev, &msrq->msrq); 400 400 401 401 if (srq->uobject) { 402 402 mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
··· 56 56 if (size <= max_direct) { 57 57 buf->nbufs = 1; 58 58 buf->npages = 1; 59 - buf->page_shift = get_order(size) + PAGE_SHIFT; 59 + buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; 60 60 buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev, 61 61 size, &t, GFP_KERNEL); 62 62 if (!buf->direct.buf)
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 464 464 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 465 465 struct mlx5_cmd_mailbox *next = msg->next; 466 466 int data_only; 467 - int offset = 0; 467 + u32 offset = 0; 468 468 int dump_len; 469 469 470 470 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 252 252 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 253 253 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 254 254 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 255 - dev->event(dev, port_subtype_event(eqe->sub_type), &port); 255 + if (dev->event) 256 + dev->event(dev, port_subtype_event(eqe->sub_type), 257 + (unsigned long)port); 256 258 break; 257 259 default: 258 260 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/mad.c
··· 37 37 #include "mlx5_core.h" 38 38 39 39 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, 40 - u16 opmod, int port) 40 + u16 opmod, u8 port) 41 41 { 42 42 struct mlx5_mad_ifc_mbox_in *in = NULL; 43 43 struct mlx5_mad_ifc_mbox_out *out = NULL;
+288 -4
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 58 58 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644); 59 59 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0"); 60 60 61 + #define MLX5_DEFAULT_PROF 2 62 + static int prof_sel = MLX5_DEFAULT_PROF; 63 + module_param_named(prof_sel, prof_sel, int, 0444); 64 + MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); 65 + 61 66 struct workqueue_struct *mlx5_core_wq; 67 + static LIST_HEAD(intf_list); 68 + static LIST_HEAD(dev_list); 69 + static DEFINE_MUTEX(intf_mutex); 70 + 71 + struct mlx5_device_context { 72 + struct list_head list; 73 + struct mlx5_interface *intf; 74 + void *context; 75 + }; 76 + 77 + static struct mlx5_profile profile[] = { 78 + [0] = { 79 + .mask = 0, 80 + }, 81 + [1] = { 82 + .mask = MLX5_PROF_MASK_QP_SIZE, 83 + .log_max_qp = 12, 84 + }, 85 + [2] = { 86 + .mask = MLX5_PROF_MASK_QP_SIZE | 87 + MLX5_PROF_MASK_MR_CACHE, 88 + .log_max_qp = 17, 89 + .mr_cache[0] = { 90 + .size = 500, 91 + .limit = 250 92 + }, 93 + .mr_cache[1] = { 94 + .size = 500, 95 + .limit = 250 96 + }, 97 + .mr_cache[2] = { 98 + .size = 500, 99 + .limit = 250 100 + }, 101 + .mr_cache[3] = { 102 + .size = 500, 103 + .limit = 250 104 + }, 105 + .mr_cache[4] = { 106 + .size = 500, 107 + .limit = 250 108 + }, 109 + .mr_cache[5] = { 110 + .size = 500, 111 + .limit = 250 112 + }, 113 + .mr_cache[6] = { 114 + .size = 500, 115 + .limit = 250 116 + }, 117 + .mr_cache[7] = { 118 + .size = 500, 119 + .limit = 250 120 + }, 121 + .mr_cache[8] = { 122 + .size = 500, 123 + .limit = 250 124 + }, 125 + .mr_cache[9] = { 126 + .size = 500, 127 + .limit = 250 128 + }, 129 + .mr_cache[10] = { 130 + .size = 500, 131 + .limit = 250 132 + }, 133 + .mr_cache[11] = { 134 + .size = 500, 135 + .limit = 250 136 + }, 137 + .mr_cache[12] = { 138 + .size = 64, 139 + .limit = 32 140 + }, 141 + .mr_cache[13] = { 142 + .size = 32, 143 + .limit = 16 144 + }, 145 + .mr_cache[14] = { 146 + .size = 16, 147 + .limit = 8 148 + }, 149 + .mr_cache[15] = { 150 + .size = 8, 151 + .limit = 4 152 + }, 153 + }, 154 + }; 62 155 63 156 static int set_dma_caps(struct pci_dev *pdev) 64 157 { ··· 311 218 312 219 copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); 313 220 314 - if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 221 + if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 315 222 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 316 223 317 224 flags = be64_to_cpu(query_out->hca_cap.flags); ··· 392 299 return 0; 393 300 } 394 301 395 - int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 302 + static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev) 396 303 { 397 304 struct mlx5_priv *priv = &dev->priv; 398 305 int err; ··· 582 489 } 583 490 EXPORT_SYMBOL(mlx5_dev_init); 584 491 585 - void mlx5_dev_cleanup(struct mlx5_core_dev *dev) 492 + static void mlx5_dev_cleanup(struct mlx5_core_dev *dev) 586 493 { 587 494 struct mlx5_priv *priv = &dev->priv; 588 495 ··· 609 516 pci_disable_device(dev->pdev); 610 517 debugfs_remove(priv->dbg_root); 611 518 } 612 - EXPORT_SYMBOL(mlx5_dev_cleanup); 519 + 520 + static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 521 + { 522 + struct mlx5_device_context *dev_ctx; 523 + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 524 + 525 + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); 526 + if (!dev_ctx) { 527 + pr_warn("mlx5_add_device: alloc context failed\n"); 528 + return; 529 + } 530 + 531 + dev_ctx->intf = intf; 532 + dev_ctx->context = intf->add(dev); 533 + 534 + if (dev_ctx->context) { 535 + spin_lock_irq(&priv->ctx_lock); 536 + list_add_tail(&dev_ctx->list, &priv->ctx_list); 537 + spin_unlock_irq(&priv->ctx_lock); 538 + } else { 539 + kfree(dev_ctx); 540 + } 541 + } 542 + 543 + static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) 544 + { 545 + struct mlx5_device_context *dev_ctx; 546 + struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); 547 + 548 + list_for_each_entry(dev_ctx, &priv->ctx_list, list) 549 + if (dev_ctx->intf == intf) { 550 + spin_lock_irq(&priv->ctx_lock); 551 + list_del(&dev_ctx->list); 552 + spin_unlock_irq(&priv->ctx_lock); 553 + 554 + intf->remove(dev, dev_ctx->context); 555 + kfree(dev_ctx); 556 + return; 557 + } 558 + } 559 + static int mlx5_register_device(struct mlx5_core_dev *dev) 560 + { 561 + struct mlx5_priv *priv = &dev->priv; 562 + struct mlx5_interface *intf; 563 + 564 + mutex_lock(&intf_mutex); 565 + list_add_tail(&priv->dev_list, &dev_list); 566 + list_for_each_entry(intf, &intf_list, list) 567 + mlx5_add_device(intf, priv); 568 + mutex_unlock(&intf_mutex); 569 + 570 + return 0; 571 + } 572 + static void mlx5_unregister_device(struct mlx5_core_dev *dev) 573 + { 574 + struct mlx5_priv *priv = &dev->priv; 575 + struct mlx5_interface *intf; 576 + 577 + mutex_lock(&intf_mutex); 578 + list_for_each_entry(intf, &intf_list, list) 579 + mlx5_remove_device(intf, priv); 580 + list_del(&priv->dev_list); 581 + mutex_unlock(&intf_mutex); 582 + } 583 + 584 + int mlx5_register_interface(struct mlx5_interface *intf) 585 + { 586 + struct mlx5_priv *priv; 587 + 588 + if (!intf->add || !intf->remove) 589 + return -EINVAL; 590 + 591 + mutex_lock(&intf_mutex); 592 + list_add_tail(&intf->list, &intf_list); 593 + list_for_each_entry(priv, &dev_list, dev_list) 594 + mlx5_add_device(intf, priv); 595 + mutex_unlock(&intf_mutex); 596 + 597 + return 0; 598 + } 599 + EXPORT_SYMBOL(mlx5_register_interface); 600 + 601 + void mlx5_unregister_interface(struct mlx5_interface *intf) 602 + { 603 + struct mlx5_priv *priv; 604 + 605 + mutex_lock(&intf_mutex); 606 + list_for_each_entry(priv, &dev_list, dev_list) 607 + mlx5_remove_device(intf, priv); 608 + list_del(&intf->list); 609 + mutex_unlock(&intf_mutex); 610 + } 611 + EXPORT_SYMBOL(mlx5_unregister_interface); 612 + 613 + static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 614 + unsigned long param) 615 + { 616 + struct mlx5_priv *priv = &dev->priv; 617 + struct mlx5_device_context *dev_ctx; 618 + unsigned long flags; 619 + 620 + spin_lock_irqsave(&priv->ctx_lock, flags); 621 + 622 + list_for_each_entry(dev_ctx, &priv->ctx_list, list) 623 + if (dev_ctx->intf->event) 624 + dev_ctx->intf->event(dev, dev_ctx->context, event, param); 625 + 626 + spin_unlock_irqrestore(&priv->ctx_lock, flags); 627 + } 628 + 629 + struct mlx5_core_event_handler { 630 + void (*event)(struct mlx5_core_dev *dev, 631 + enum mlx5_dev_event event, 632 + void *data); 633 + }; 634 + 635 + static int init_one(struct pci_dev *pdev, 636 + const struct pci_device_id *id) 637 + { 638 + struct mlx5_core_dev *dev; 639 + struct mlx5_priv *priv; 640 + int err; 641 + 642 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 643 + if (!dev) { 644 + dev_err(&pdev->dev, "kzalloc failed\n"); 645 + return -ENOMEM; 646 + } 647 + priv = &dev->priv; 648 + 649 + pci_set_drvdata(pdev, dev); 650 + 651 + if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { 652 + pr_warn("selected profile out of range, selecting default (%d)\n", 653 + MLX5_DEFAULT_PROF); 654 + prof_sel = MLX5_DEFAULT_PROF; 655 + } 656 + dev->profile = &profile[prof_sel]; 657 + dev->event = mlx5_core_event; 658 + 659 + err = mlx5_dev_init(dev, pdev); 660 + if (err) { 661 + dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); 662 + goto out; 663 + } 664 + 665 + INIT_LIST_HEAD(&priv->ctx_list); 666 + spin_lock_init(&priv->ctx_lock); 667 + err = mlx5_register_device(dev); 668 + if (err) { 669 + dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 670 + goto out_init; 671 + } 672 + 673 + return 0; 674 + 675 + out_init: 676 + mlx5_dev_cleanup(dev); 677 + out: 678 + kfree(dev); 679 + return err; 680 + } 681 + static void remove_one(struct pci_dev *pdev) 682 + { 683 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 684 + 685 + mlx5_unregister_device(dev); 686 + mlx5_dev_cleanup(dev); 687 + kfree(dev); 688 + } 689 + 690 + static const struct pci_device_id mlx5_core_pci_table[] = { 691 + { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ 692 + { 0, } 693 + }; 694 + 695 + MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table); 696 + 697 + static struct pci_driver mlx5_core_driver = { 698 + .name = DRIVER_NAME, 699 + .id_table = mlx5_core_pci_table, 700 + .probe = init_one, 701 + .remove = remove_one 702 + }; 613 703 614 704 static int __init init(void) 615 705 { ··· 806 530 } 807 531 mlx5_health_init(); 808 532 533 + err = pci_register_driver(&mlx5_core_driver); 534 + if (err) 535 + goto err_health; 536 + 809 537 return 0; 810 538 539 + err_health: 540 + mlx5_health_cleanup(); 541 + destroy_workqueue(mlx5_core_wq); 811 542 err_debug: 812 543 mlx5_unregister_debugfs(); 813 544 return err; ··· 822 539 823 540 static void __exit cleanup(void) 824 541 { 542 + pci_unregister_driver(&mlx5_core_driver); 825 543 mlx5_health_cleanup(); 826 544 destroy_workqueue(mlx5_core_wq); 827 545 mlx5_unregister_debugfs();
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
··· 51 51 52 52 struct mlx5_pages_req { 53 53 struct mlx5_core_dev *dev; 54 - u32 func_id; 54 + u16 func_id; 55 55 s32 npages; 56 56 struct work_struct work; 57 57 };
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 86 86 __be32 caps_31_0; 87 87 }; 88 88 89 - int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps) 89 + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps) 90 90 { 91 91 struct mlx5_reg_pcap in; 92 92 struct mlx5_reg_pcap out;
-4
include/linux/mlx5/device.h
··· 456 456 u8 syndrome; 457 457 }; 458 458 459 - struct mlx5_eqe_dropped_packet { 460 - }; 461 - 462 459 struct mlx5_eqe_port_state { 463 460 u8 reserved0[8]; 464 461 u8 port; ··· 495 498 struct mlx5_eqe_comp comp; 496 499 struct mlx5_eqe_qp_srq qp_srq; 497 500 struct mlx5_eqe_cq_err cq_err; 498 - struct mlx5_eqe_dropped_packet dp; 499 501 struct mlx5_eqe_port_state port; 500 502 struct mlx5_eqe_gpio gpio; 501 503 struct mlx5_eqe_congestion cong;
+20 -7
include/linux/mlx5/driver.h
··· 381 381 struct mlx5_buf_list *page_list; 382 382 int nbufs; 383 383 int npages; 384 - int page_shift; 385 384 int size; 385 + u8 page_shift; 386 386 }; 387 387 388 388 struct mlx5_eq { ··· 543 543 /* protect mkey key part */ 544 544 spinlock_t mkey_lock; 545 545 u8 mkey_key; 546 + 547 + struct list_head dev_list; 548 + struct list_head ctx_list; 549 + spinlock_t ctx_lock; 546 550 }; 547 551 548 552 struct mlx5_core_dev { ··· 559 555 struct mlx5_init_seg __iomem *iseg; 560 556 void (*event) (struct mlx5_core_dev *dev, 561 557 enum mlx5_dev_event event, 562 - void *data); 558 + unsigned long param); 563 559 struct mlx5_priv priv; 564 560 struct mlx5_profile *profile; 565 561 atomic_t num_qps; ··· 690 686 return key & 0xffffff00u; 691 687 } 692 688 693 - int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); 694 - void mlx5_dev_cleanup(struct mlx5_core_dev *dev); 695 689 int mlx5_cmd_init(struct mlx5_core_dev *dev); 696 690 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 697 691 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); ··· 736 734 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 737 735 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 738 736 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, 739 - u16 opmod, int port); 737 + u16 opmod, u8 port); 740 738 void mlx5_pagealloc_init(struct mlx5_core_dev *dev); 741 739 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); 742 740 int mlx5_pagealloc_start(struct mlx5_core_dev *dev); ··· 769 767 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, 770 768 int size_in, void *data_out, int size_out, 771 769 u16 reg_num, int arg, int write); 772 - int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); 770 + int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); 773 771 774 772 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 775 773 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); ··· 813 811 MAX_MR_CACHE_ENTRIES = 16, 814 812 }; 815 813 814 + struct mlx5_interface { 815 + void * (*add)(struct mlx5_core_dev *dev); 816 + void (*remove)(struct mlx5_core_dev *dev, void *context); 817 + void (*event)(struct mlx5_core_dev *dev, void *context, 818 + enum mlx5_dev_event event, unsigned long param); 819 + struct list_head list; 820 + }; 821 + 822 + int mlx5_register_interface(struct mlx5_interface *intf); 823 + void mlx5_unregister_interface(struct mlx5_interface *intf); 824 + 816 825 struct mlx5_profile { 817 826 u64 mask; 818 - u32 log_max_qp; 827 + u8 log_max_qp; 819 828 struct { 820 829 int size; 821 830 int limit;