Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB: simplify static rate encoding

Push translation of static rate to HCA format into low-level drivers,
where it belongs. For static rate encoding, use encoding of rate
field from IB standard PathRecord, with addition of value 0, for
backwards compatibility with current usage. The changes are:

- Add enum ib_rate to midlayer includes.
- Get rid of static rate translation in IPoIB; just use static rate
directly from Path and MulticastGroup records.
- Update mthca driver to translate absolute static rate into the
format used by hardware. This also fixes mthca's static rate
handling for HCAs that are capable of 4X DDR.

Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Jack Morgenstein and committed by
Roland Dreier
bf6a9e31 d2e0655e

+261 -64
+34
drivers/infiniband/core/verbs.c
··· 45 45 #include <rdma/ib_verbs.h> 46 46 #include <rdma/ib_cache.h> 47 47 48 + int ib_rate_to_mult(enum ib_rate rate) 49 + { 50 + switch (rate) { 51 + case IB_RATE_2_5_GBPS: return 1; 52 + case IB_RATE_5_GBPS: return 2; 53 + case IB_RATE_10_GBPS: return 4; 54 + case IB_RATE_20_GBPS: return 8; 55 + case IB_RATE_30_GBPS: return 12; 56 + case IB_RATE_40_GBPS: return 16; 57 + case IB_RATE_60_GBPS: return 24; 58 + case IB_RATE_80_GBPS: return 32; 59 + case IB_RATE_120_GBPS: return 48; 60 + default: return -1; 61 + } 62 + } 63 + EXPORT_SYMBOL(ib_rate_to_mult); 64 + 65 + enum ib_rate mult_to_ib_rate(int mult) 66 + { 67 + switch (mult) { 68 + case 1: return IB_RATE_2_5_GBPS; 69 + case 2: return IB_RATE_5_GBPS; 70 + case 4: return IB_RATE_10_GBPS; 71 + case 8: return IB_RATE_20_GBPS; 72 + case 12: return IB_RATE_30_GBPS; 73 + case 16: return IB_RATE_40_GBPS; 74 + case 24: return IB_RATE_60_GBPS; 75 + case 32: return IB_RATE_80_GBPS; 76 + case 48: return IB_RATE_120_GBPS; 77 + default: return IB_RATE_PORT_CURRENT; 78 + } 79 + } 80 + EXPORT_SYMBOL(mult_to_ib_rate); 81 + 48 82 /* Protection domains */ 49 83 50 84 struct ib_pd *ib_alloc_pd(struct ib_device *device)
+99 -1
drivers/infiniband/hw/mthca/mthca_av.c
··· 42 42 43 43 #include "mthca_dev.h" 44 44 45 + enum { 46 + MTHCA_RATE_TAVOR_FULL = 0, 47 + MTHCA_RATE_TAVOR_1X = 1, 48 + MTHCA_RATE_TAVOR_4X = 2, 49 + MTHCA_RATE_TAVOR_1X_DDR = 3 50 + }; 51 + 52 + enum { 53 + MTHCA_RATE_MEMFREE_FULL = 0, 54 + MTHCA_RATE_MEMFREE_QUARTER = 1, 55 + MTHCA_RATE_MEMFREE_EIGHTH = 2, 56 + MTHCA_RATE_MEMFREE_HALF = 3 57 + }; 58 + 45 59 struct mthca_av { 46 60 __be32 port_pd; 47 61 u8 reserved1; ··· 68 54 __be32 sl_tclass_flowlabel; 69 55 __be32 dgid[4]; 70 56 }; 57 + 58 + static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate) 59 + { 60 + switch (mthca_rate) { 61 + case MTHCA_RATE_MEMFREE_EIGHTH: 62 + return mult_to_ib_rate(port_rate >> 3); 63 + case MTHCA_RATE_MEMFREE_QUARTER: 64 + return mult_to_ib_rate(port_rate >> 2); 65 + case MTHCA_RATE_MEMFREE_HALF: 66 + return mult_to_ib_rate(port_rate >> 1); 67 + case MTHCA_RATE_MEMFREE_FULL: 68 + default: 69 + return mult_to_ib_rate(port_rate); 70 + } 71 + } 72 + 73 + static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate) 74 + { 75 + switch (mthca_rate) { 76 + case MTHCA_RATE_TAVOR_1X: return IB_RATE_2_5_GBPS; 77 + case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS; 78 + case MTHCA_RATE_TAVOR_4X: return IB_RATE_10_GBPS; 79 + default: return port_rate; 80 + } 81 + } 82 + 83 + enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port) 84 + { 85 + if (mthca_is_memfree(dev)) { 86 + /* Handle old Arbel FW */ 87 + if (dev->limits.stat_rate_support == 0x3 && mthca_rate) 88 + return IB_RATE_2_5_GBPS; 89 + 90 + return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]); 91 + } else 92 + return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]); 93 + } 94 + 95 + static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate) 96 + { 97 + if (cur_rate <= req_rate) 98 + return 0; 99 + 100 + /* 101 + * Inter-packet delay (IPD) to get from rate X down to a rate 102 + * no more than Y is (X - 1) / Y. 103 + */ 104 + switch ((cur_rate - 1) / req_rate) { 105 + case 0: return MTHCA_RATE_MEMFREE_FULL; 106 + case 1: return MTHCA_RATE_MEMFREE_HALF; 107 + case 2: /* fall through */ 108 + case 3: return MTHCA_RATE_MEMFREE_QUARTER; 109 + default: return MTHCA_RATE_MEMFREE_EIGHTH; 110 + } 111 + } 112 + 113 + static u8 ib_rate_to_tavor(u8 static_rate) 114 + { 115 + switch (static_rate) { 116 + case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X; 117 + case IB_RATE_5_GBPS: return MTHCA_RATE_TAVOR_1X_DDR; 118 + case IB_RATE_10_GBPS: return MTHCA_RATE_TAVOR_4X; 119 + default: return MTHCA_RATE_TAVOR_FULL; 120 + } 121 + } 122 + 123 + u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port) 124 + { 125 + u8 rate; 126 + 127 + if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1]) 128 + return 0; 129 + 130 + if (mthca_is_memfree(dev)) 131 + rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate), 132 + dev->rate[port - 1]); 133 + else 134 + rate = ib_rate_to_tavor(static_rate); 135 + 136 + if (!(dev->limits.stat_rate_support & (1 << rate))) 137 + rate = 1; 138 + 139 + return rate; 140 + } 71 141 72 142 int mthca_create_ah(struct mthca_dev *dev, 73 143 struct mthca_pd *pd, ··· 205 107 av->g_slid = ah_attr->src_path_bits; 206 108 av->dlid = cpu_to_be16(ah_attr->dlid); 207 109 av->msg_sr = (3 << 4) | /* 2K message */ 208 - ah_attr->static_rate; 110 + mthca_get_rate(dev, ah_attr->static_rate, ah_attr->port_num); 209 111 av->sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); 210 112 if (ah_attr->ah_flags & IB_AH_GRH) { 211 113 av->g_slid |= 0x80;
+4
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 965 965 u32 *outbox; 966 966 u8 field; 967 967 u16 size; 968 + u16 stat_rate; 968 969 int err; 969 970 970 971 #define QUERY_DEV_LIM_OUT_SIZE 0x100 ··· 996 995 #define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 997 996 #define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 998 997 #define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b 998 + #define QUERY_DEV_LIM_RATE_SUPPORT_OFFSET 0x3c 999 999 #define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f 1000 1000 #define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 1001 1001 #define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 ··· 1088 1086 dev_lim->num_ports = field & 0xf; 1089 1087 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); 1090 1088 dev_lim->max_gids = 1 << (field & 0xf); 1089 + MTHCA_GET(stat_rate, outbox, QUERY_DEV_LIM_RATE_SUPPORT_OFFSET); 1090 + dev_lim->stat_rate_support = stat_rate; 1091 1091 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); 1092 1092 dev_lim->max_pkeys = 1 << (field & 0xf); 1093 1093 MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET);
+1
drivers/infiniband/hw/mthca/mthca_cmd.h
··· 146 146 int max_vl; 147 147 int num_ports; 148 148 int max_gids; 149 + u16 stat_rate_support; 149 150 int max_pkeys; 150 151 u32 flags; 151 152 int reserved_uars;
+4
drivers/infiniband/hw/mthca/mthca_dev.h
··· 172 172 int reserved_pds; 173 173 u32 page_size_cap; 174 174 u32 flags; 175 + u16 stat_rate_support; 175 176 u8 port_width_cap; 176 177 }; 177 178 ··· 354 353 struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2]; 355 354 struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; 356 355 spinlock_t sm_lock; 356 + u8 rate[MTHCA_MAX_PORTS]; 357 357 }; 358 358 359 359 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG ··· 557 555 struct ib_ud_header *header); 558 556 int mthca_ah_query(struct ib_ah *ibah, struct ib_ah_attr *attr); 559 557 int mthca_ah_grh_present(struct mthca_ah *ah); 558 + u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u8 port); 559 + enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u8 port); 560 560 561 561 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); 562 562 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
+40 -2
drivers/infiniband/hw/mthca/mthca_mad.c
··· 49 49 MTHCA_VENDOR_CLASS2 = 0xa 50 50 }; 51 51 52 + int mthca_update_rate(struct mthca_dev *dev, u8 port_num) 53 + { 54 + struct ib_port_attr *tprops = NULL; 55 + int ret; 56 + 57 + tprops = kmalloc(sizeof *tprops, GFP_KERNEL); 58 + if (!tprops) 59 + return -ENOMEM; 60 + 61 + ret = ib_query_port(&dev->ib_dev, port_num, tprops); 62 + if (ret) { 63 + printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n", 64 + ret, dev->ib_dev.name, port_num); 65 + goto out; 66 + } 67 + 68 + dev->rate[port_num - 1] = tprops->active_speed * 69 + ib_width_enum_to_int(tprops->active_width); 70 + 71 + out: 72 + kfree(tprops); 73 + return ret; 74 + } 75 + 52 76 static void update_sm_ah(struct mthca_dev *dev, 53 77 u8 port_num, u16 lid, u8 sl) 54 78 { ··· 114 90 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 115 91 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 116 92 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 93 + mthca_update_rate(to_mdev(ibdev), port_num); 117 94 update_sm_ah(to_mdev(ibdev), port_num, 118 95 be16_to_cpup((__be16 *) (mad->data + 58)), 119 96 (*(u8 *) (mad->data + 76)) & 0xf); ··· 271 246 { 272 247 struct ib_mad_agent *agent; 273 248 int p, q; 249 + int ret; 274 250 275 251 spin_lock_init(&dev->sm_lock); 276 252 ··· 281 255 q ? IB_QPT_GSI : IB_QPT_SMI, 282 256 NULL, 0, send_handler, 283 257 NULL, NULL); 284 - if (IS_ERR(agent)) 258 + if (IS_ERR(agent)) { 259 + ret = PTR_ERR(agent); 285 260 goto err; 261 + } 286 262 dev->send_agent[p][q] = agent; 287 263 } 264 + 265 + 266 + for (p = 1; p <= dev->limits.num_ports; ++p) { 267 + ret = mthca_update_rate(dev, p); 268 + if (ret) { 269 + mthca_err(dev, "Failed to obtain port %d rate." 270 + " aborting.\n", p); 271 + goto err; 272 + } 273 + } 288 274 289 275 return 0; 290 276 ··· 306 268 if (dev->send_agent[p][q]) 307 269 ib_unregister_mad_agent(dev->send_agent[p][q]); 308 270 309 - return PTR_ERR(agent); 271 + return ret; 310 272 } 311 273 312 274 void __devexit mthca_free_agents(struct mthca_dev *dev)
+12
drivers/infiniband/hw/mthca/mthca_main.c
··· 199 199 mdev->limits.port_width_cap = dev_lim->max_port_width; 200 200 mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1); 201 201 mdev->limits.flags = dev_lim->flags; 202 + /* 203 + * For old FW that doesn't return static rate support, use a 204 + * value of 0x3 (only static rate values of 0 or 1 are handled), 205 + * except on Sinai, where even old FW can handle static rate 206 + * values of 2 and 3. 207 + */ 208 + if (dev_lim->stat_rate_support) 209 + mdev->limits.stat_rate_support = dev_lim->stat_rate_support; 210 + else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) 211 + mdev->limits.stat_rate_support = 0xf; 212 + else 213 + mdev->limits.stat_rate_support = 0x3; 202 214 203 215 /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. 204 216 May be doable since hardware supports it for SRQ.
+2 -1
drivers/infiniband/hw/mthca/mthca_provider.h
··· 257 257 atomic_t refcount; 258 258 u32 qpn; 259 259 int is_direct; 260 + u8 port; /* for SQP and memfree use only */ 261 + u8 alt_port; /* for memfree use only */ 260 262 u8 transport; 261 263 u8 state; 262 264 u8 atomic_rd_en; ··· 280 278 281 279 struct mthca_sqp { 282 280 struct mthca_qp qp; 283 - int port; 284 281 int pkey_index; 285 282 u32 qkey; 286 283 u32 send_psn;
+33 -13
drivers/infiniband/hw/mthca/mthca_qp.c
··· 248 248 return; 249 249 } 250 250 251 + if (event_type == IB_EVENT_PATH_MIG) 252 + qp->port = qp->alt_port; 253 + 251 254 event.device = &dev->ib_dev; 252 255 event.event = event_type; 253 256 event.element.qp = &qp->ibqp; ··· 395 392 { 396 393 memset(ib_ah_attr, 0, sizeof *path); 397 394 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 395 + 396 + if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) 397 + return; 398 + 398 399 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 399 400 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 400 401 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 401 - ib_ah_attr->static_rate = path->static_rate & 0x7; 402 + ib_ah_attr->static_rate = mthca_rate_to_ib(dev, 403 + path->static_rate & 0x7, 404 + ib_ah_attr->port_num); 402 405 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 403 406 if (ib_ah_attr->ah_flags) { 404 407 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); ··· 464 455 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 465 456 qp_attr->cap.max_inline_data = qp->max_inline_data; 466 457 467 - to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 468 - to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 458 + if (qp->transport == RC || qp->transport == UC) { 459 + to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 460 + to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 461 + } 469 462 470 463 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 471 464 qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f; ··· 495 484 } 496 485 497 486 static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah, 498 - struct mthca_qp_path *path) 487 + struct mthca_qp_path *path, u8 port) 499 488 { 500 489 path->g_mylmc = ah->src_path_bits & 0x7f; 501 490 path->rlid = cpu_to_be16(ah->dlid); 502 - path->static_rate = !!ah->static_rate; 491 + path->static_rate = mthca_get_rate(dev, ah->static_rate, port); 503 492 504 493 if (ah->ah_flags & IB_AH_GRH) { 505 494 if (ah->grh.sgid_index >= dev->limits.gid_table_len) { ··· 645 634 646 635 if (qp->transport == MLX) 647 636 qp_context->pri_path.port_pkey |= 648 - cpu_to_be32(to_msqp(qp)->port << 24); 637 + cpu_to_be32(qp->port << 24); 649 638 else { 650 639 if (attr_mask & IB_QP_PORT) { 651 640 qp_context->pri_path.port_pkey |= ··· 668 657 } 669 658 670 659 if (attr_mask & IB_QP_AV) { 671 - if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path)) 660 + if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 661 + attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 672 662 return -EINVAL; 673 663 674 664 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); ··· 693 681 return -EINVAL; 694 682 } 695 683 696 - if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path)) 684 + if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 685 + attr->alt_ah_attr.port_num)) 697 686 return -EINVAL; 698 687 699 688 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | ··· 804 791 qp->atomic_rd_en = attr->qp_access_flags; 805 792 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 806 793 qp->resp_depth = attr->max_dest_rd_atomic; 794 + if (attr_mask & IB_QP_PORT) 795 + qp->port = attr->port_num; 796 + if (attr_mask & IB_QP_ALT_PATH) 797 + qp->alt_port = attr->alt_port_num; 807 798 808 799 if (is_sqp(dev, qp)) 809 800 store_attrs(to_msqp(qp), attr, attr_mask); ··· 819 802 if (is_qp0(dev, qp)) { 820 803 if (cur_state != IB_QPS_RTR && 821 804 new_state == IB_QPS_RTR) 822 - init_port(dev, to_msqp(qp)->port); 805 + init_port(dev, qp->port); 823 806 824 807 if (cur_state != IB_QPS_RESET && 825 808 cur_state != IB_QPS_ERR && 826 809 (new_state == IB_QPS_RESET || 827 810 new_state == IB_QPS_ERR)) 828 - mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); 811 + mthca_CLOSE_IB(dev, qp->port, &status); 829 812 } 830 813 831 814 /* ··· 1229 1212 if (qp->qpn == -1) 1230 1213 return -ENOMEM; 1231 1214 1215 + /* initialize port to zero for error-catching. */ 1216 + qp->port = 0; 1217 + 1232 1218 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1233 1219 send_policy, qp); 1234 1220 if (err) { ··· 1281 1261 if (err) 1282 1262 goto err_out; 1283 1263 1284 - sqp->port = port; 1264 + sqp->qp.port = port; 1285 1265 sqp->qp.qpn = mqpn; 1286 1266 sqp->qp.transport = MLX; 1287 1267 ··· 1424 1404 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1425 1405 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1426 1406 if (!sqp->qp.ibqp.qp_num) 1427 - ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1407 + ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1428 1408 sqp->pkey_index, &pkey); 1429 1409 else 1430 - ib_get_cached_pkey(&dev->ib_dev, sqp->port, 1410 + ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1431 1411 wr->wr.ud.pkey_index, &pkey); 1432 1412 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1433 1413 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_fs.c
··· 213 213 gid_buf, path.pathrec.dlid ? "yes" : "no"); 214 214 215 215 if (path.pathrec.dlid) { 216 - rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25; 216 + rate = ib_rate_to_mult(path.pathrec.rate) * 25; 217 217 218 218 seq_printf(file, 219 219 " DLID: 0x%04x\n"
+2 -9
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 373 373 struct ib_ah_attr av = { 374 374 .dlid = be16_to_cpu(pathrec->dlid), 375 375 .sl = pathrec->sl, 376 - .port_num = priv->port 376 + .port_num = priv->port, 377 + .static_rate = pathrec->rate 377 378 }; 378 - int path_rate = ib_sa_rate_enum_to_int(pathrec->rate); 379 - 380 - if (path_rate > 0 && priv->local_rate > path_rate) 381 - av.static_rate = (priv->local_rate - 1) / path_rate; 382 - 383 - ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n", 384 - av.static_rate, priv->local_rate, 385 - ib_sa_rate_enum_to_int(pathrec->rate)); 386 379 387 380 ah = ipoib_create_ah(dev, priv->pd, &av); 388 381 }
+1 -9
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 250 250 .port_num = priv->port, 251 251 .sl = mcast->mcmember.sl, 252 252 .ah_flags = IB_AH_GRH, 253 + .static_rate = mcast->mcmember.rate, 253 254 .grh = { 254 255 .flow_label = be32_to_cpu(mcast->mcmember.flow_label), 255 256 .hop_limit = mcast->mcmember.hop_limit, ··· 258 257 .traffic_class = mcast->mcmember.traffic_class 259 258 } 260 259 }; 261 - int path_rate = ib_sa_rate_enum_to_int(mcast->mcmember.rate); 262 - 263 260 av.grh.dgid = mcast->mcmember.mgid; 264 - 265 - if (path_rate > 0 && priv->local_rate > path_rate) 266 - av.static_rate = (priv->local_rate - 1) / path_rate; 267 - 268 - ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n", 269 - av.static_rate, priv->local_rate, 270 - ib_sa_rate_enum_to_int(mcast->mcmember.rate)); 271 261 272 262 ah = ipoib_create_ah(dev, priv->pd, &av); 273 263 if (!ah) {
-28
include/rdma/ib_sa.h
··· 91 91 IB_SA_BEST = 3 92 92 }; 93 93 94 - enum ib_sa_rate { 95 - IB_SA_RATE_2_5_GBPS = 2, 96 - IB_SA_RATE_5_GBPS = 5, 97 - IB_SA_RATE_10_GBPS = 3, 98 - IB_SA_RATE_20_GBPS = 6, 99 - IB_SA_RATE_30_GBPS = 4, 100 - IB_SA_RATE_40_GBPS = 7, 101 - IB_SA_RATE_60_GBPS = 8, 102 - IB_SA_RATE_80_GBPS = 9, 103 - IB_SA_RATE_120_GBPS = 10 104 - }; 105 - 106 - static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate) 107 - { 108 - switch (rate) { 109 - case IB_SA_RATE_2_5_GBPS: return 1; 110 - case IB_SA_RATE_5_GBPS: return 2; 111 - case IB_SA_RATE_10_GBPS: return 4; 112 - case IB_SA_RATE_20_GBPS: return 8; 113 - case IB_SA_RATE_30_GBPS: return 12; 114 - case IB_SA_RATE_40_GBPS: return 16; 115 - case IB_SA_RATE_60_GBPS: return 24; 116 - case IB_SA_RATE_80_GBPS: return 32; 117 - case IB_SA_RATE_120_GBPS: return 48; 118 - default: return -1; 119 - } 120 - } 121 - 122 94 /* 123 95 * Structures for SA records are named "struct ib_sa_xxx_rec." No 124 96 * attempt is made to pack structures to match the physical layout of
+28
include/rdma/ib_verbs.h
··· 314 314 IB_AH_GRH = 1 315 315 }; 316 316 317 + enum ib_rate { 318 + IB_RATE_PORT_CURRENT = 0, 319 + IB_RATE_2_5_GBPS = 2, 320 + IB_RATE_5_GBPS = 5, 321 + IB_RATE_10_GBPS = 3, 322 + IB_RATE_20_GBPS = 6, 323 + IB_RATE_30_GBPS = 4, 324 + IB_RATE_40_GBPS = 7, 325 + IB_RATE_60_GBPS = 8, 326 + IB_RATE_80_GBPS = 9, 327 + IB_RATE_120_GBPS = 10 328 + }; 329 + 330 + /** 331 + * ib_rate_to_mult - Convert the IB rate enum to a multiple of the 332 + * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be 333 + * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec. 334 + * @rate: rate to convert. 335 + */ 336 + int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 337 + 338 + /** 339 + * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 340 + * enum. 341 + * @mult: multiple to convert. 342 + */ 343 + enum ib_rate mult_to_ib_rate(int mult) __attribute_const__; 344 + 317 345 struct ib_ah_attr { 318 346 struct ib_global_route grh; 319 347 u16 dlid;