IB/mad: RMPP support for additional classes

Add RMPP support for additional management classes that support it.
Also, validate RMPP is consistent with management class specified.

Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Hal Rosenstock and committed by
Roland Dreier
618a3c03 fa9656bb

+86 -46
+49 -5
drivers/infiniband/core/mad.c
··· 227 227 if (!is_vendor_oui(mad_reg_req->oui)) 228 228 goto error1; 229 229 } 230 + /* Make sure class supplied is consistent with RMPP */ 231 + if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 232 + if (!rmpp_version) 233 + goto error1; 234 + } else { 235 + if (rmpp_version) 236 + goto error1; 237 + } 230 238 /* Make sure class supplied is consistent with QP type */ 231 239 if (qp_type == IB_QPT_SMI) { 232 240 if ((mad_reg_req->mgmt_class != ··· 898 890 } 899 891 EXPORT_SYMBOL(ib_create_send_mad); 900 892 893 + int ib_get_mad_data_offset(u8 mgmt_class) 894 + { 895 + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 896 + return IB_MGMT_SA_HDR; 897 + else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 898 + (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 899 + (mgmt_class == IB_MGMT_CLASS_BIS)) 900 + return IB_MGMT_DEVICE_HDR; 901 + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 902 + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 903 + return IB_MGMT_VENDOR_HDR; 904 + else 905 + return IB_MGMT_MAD_HDR; 906 + } 907 + EXPORT_SYMBOL(ib_get_mad_data_offset); 908 + 909 + int ib_is_mad_class_rmpp(u8 mgmt_class) 910 + { 911 + if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 912 + (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 913 + (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 914 + (mgmt_class == IB_MGMT_CLASS_BIS) || 915 + ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 916 + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 917 + return 1; 918 + return 0; 919 + } 920 + EXPORT_SYMBOL(ib_is_mad_class_rmpp); 921 + 901 922 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 902 923 { 903 924 struct ib_mad_send_wr_private *mad_send_wr; ··· 1057 1020 !send_buf->mad_agent->recv_handler)) { 1058 1021 ret = -EINVAL; 1059 1022 goto error; 1023 + } 1024 + 1025 + if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1026 + if (mad_agent_priv->agent.rmpp_version) { 1027 + ret = -EINVAL; 1028 + goto error; 1029 + } 1060 1030 } 1061 1031 1062 1032 /* ··· 2498 2454 } 2499 2455 } 2500 2456 sg_list.addr = dma_map_single(qp_info->port_priv-> 2501 - device->dma_device, 2502 - &mad_priv->grh, 2503 - sizeof *mad_priv - 2504 - sizeof mad_priv->header, 2505 - DMA_FROM_DEVICE); 2457 + device->dma_device, 2458 + &mad_priv->grh, 2459 + sizeof *mad_priv - 2460 + sizeof mad_priv->header, 2461 + DMA_FROM_DEVICE); 2506 2462 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2507 2463 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2508 2464 mad_priv->header.mad_list.mad_queue = recv_queue;
+4 -15
drivers/infiniband/core/mad_rmpp.c
··· 1 1 /* 2 2 * Copyright (c) 2005 Intel Inc. All rights reserved. 3 - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 3 + * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two 6 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 100 100 } 101 101 } 102 102 103 - static int data_offset(u8 mgmt_class) 104 - { 105 - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 106 - return IB_MGMT_SA_HDR; 107 - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 108 - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 109 - return IB_MGMT_VENDOR_HDR; 110 - else 111 - return IB_MGMT_RMPP_HDR; 112 - } 113 - 114 103 static void format_ack(struct ib_mad_send_buf *msg, 115 104 struct ib_rmpp_mad *data, 116 105 struct mad_rmpp_recv *rmpp_recv) ··· 126 137 struct ib_mad_send_buf *msg; 127 138 int ret, hdr_len; 128 139 129 - hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 140 + hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 130 141 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 131 142 recv_wc->wc->pkey_index, 1, hdr_len, 132 143 0, GFP_KERNEL); ··· 152 163 if (IS_ERR(ah)) 153 164 return (void *) ah; 154 165 155 - hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 166 + hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 156 167 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 157 168 recv_wc->wc->pkey_index, 1, 158 169 hdr_len, 0, GFP_KERNEL); ··· 397 408 398 409 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 399 410 400 - hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 411 + hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 401 412 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 402 413 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 403 414 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
+7 -25
drivers/infiniband/core/user_mad.c
··· 177 177 return ret; 178 178 } 179 179 180 - static int data_offset(u8 mgmt_class) 181 - { 182 - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 183 - return IB_MGMT_SA_HDR; 184 - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 185 - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 186 - return IB_MGMT_VENDOR_HDR; 187 - else 188 - return IB_MGMT_RMPP_HDR; 189 - } 190 - 191 180 static void send_handler(struct ib_mad_agent *agent, 192 181 struct ib_mad_send_wc *send_wc) 193 182 { ··· 272 283 */ 273 284 return -ENOSPC; 274 285 } 275 - offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); 286 + offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 276 287 max_seg_payload = sizeof (struct ib_mad) - offset; 277 288 278 289 for (left = packet->length - seg_payload, buf += seg_payload; ··· 430 441 } 431 442 432 443 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 433 - if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 434 - hdr_len = IB_MGMT_SA_HDR; 435 - copy_offset = IB_MGMT_RMPP_HDR; 436 - rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 437 - IB_MGMT_RMPP_FLAG_ACTIVE; 438 - } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && 439 - rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { 440 - hdr_len = IB_MGMT_VENDOR_HDR; 441 - copy_offset = IB_MGMT_RMPP_HDR; 442 - rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 443 - IB_MGMT_RMPP_FLAG_ACTIVE; 444 - } else { 445 - hdr_len = IB_MGMT_MAD_HDR; 444 + hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 445 + if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { 446 446 copy_offset = IB_MGMT_MAD_HDR; 447 447 rmpp_active = 0; 448 + } else { 449 + copy_offset = IB_MGMT_RMPP_HDR; 450 + rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 451 + IB_MGMT_RMPP_FLAG_ACTIVE; 448 452 } 449 453 450 454 data_len = count - sizeof (struct ib_user_mad) - hdr_len;
+26 -1
include/rdma/ib_mad.h
··· 3 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 6 + * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved. 7 7 * 8 8 * This software is available to you under a choice of one of two 9 9 * licenses. You may choose to be licensed under the terms of the GNU ··· 55 55 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06 56 56 #define IB_MGMT_CLASS_CM 0x07 57 57 #define IB_MGMT_CLASS_SNMP 0x08 58 + #define IB_MGMT_CLASS_DEVICE_ADM 0x10 59 + #define IB_MGMT_CLASS_BOOT_MGMT 0x11 60 + #define IB_MGMT_CLASS_BIS 0x12 61 + #define IB_MGMT_CLASS_CONG_MGMT 0x21 58 62 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 59 63 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 60 64 ··· 121 117 IB_MGMT_VENDOR_DATA = 216, 122 118 IB_MGMT_SA_HDR = 56, 123 119 IB_MGMT_SA_DATA = 200, 120 + IB_MGMT_DEVICE_HDR = 64, 121 + IB_MGMT_DEVICE_DATA = 192, 124 122 }; 125 123 126 124 struct ib_mad_hdr { ··· 607 601 int rmpp_active, 608 602 int hdr_len, int data_len, 609 603 gfp_t gfp_mask); 604 + 605 + /** 606 + * ib_is_mad_class_rmpp - returns whether given management class 607 + * supports RMPP. 608 + * @mgmt_class: management class 609 + * 610 + * This routine returns whether the management class supports RMPP. 611 + */ 612 + int ib_is_mad_class_rmpp(u8 mgmt_class); 613 + 614 + /** 615 + * ib_get_mad_data_offset - returns the data offset for a given 616 + * management class. 617 + * @mgmt_class: management class 618 + * 619 + * This routine returns the data offset in the MAD for the management 620 + * class requested. 621 + */ 622 + int ib_get_mad_data_offset(u8 mgmt_class); 610 623 611 624 /** 612 625 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.