Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/mad: RMPP support for additional classes
IB/mad: include GID/class when matching receives
IB/mthca: Fix section mismatch problems
IPoIB: Fix oops with raw sockets
IB/mthca: Fix check of size in SRQ creation
IB/srp: Fix unmapping of fake scatterlist

+169 -91
+101 -11
drivers/infiniband/core/mad.c
··· 227 if (!is_vendor_oui(mad_reg_req->oui)) 228 goto error1; 229 } 230 /* Make sure class supplied is consistent with QP type */ 231 if (qp_type == IB_QPT_SMI) { 232 if ((mad_reg_req->mgmt_class != ··· 898 } 899 EXPORT_SYMBOL(ib_create_send_mad); 900 901 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 902 { 903 struct ib_mad_send_wr_private *mad_send_wr; ··· 1057 !send_buf->mad_agent->recv_handler)) { 1058 ret = -EINVAL; 1059 goto error; 1060 } 1061 1062 /* ··· 1662 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1663 } 1664 1665 struct ib_mad_send_wr_private* 1666 - ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid) 1667 { 1668 struct ib_mad_send_wr_private *mad_send_wr; 1669 1670 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 1671 agent_list) { 1672 - if (mad_send_wr->tid == tid) 1673 return mad_send_wr; 1674 } 1675 ··· 1725 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1726 agent_list) { 1727 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 1728 - mad_send_wr->tid == tid && mad_send_wr->timeout) { 1729 /* Verify request has not been canceled */ 1730 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1731 mad_send_wr : NULL; ··· 1753 struct ib_mad_send_wr_private *mad_send_wr; 1754 struct ib_mad_send_wc mad_send_wc; 1755 unsigned long flags; 1756 - __be64 tid; 1757 1758 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1759 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); ··· 1768 1769 /* Complete corresponding request */ 1770 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1771 - tid = mad_recv_wc->recv_buf.mad->mad_hdr.tid; 1772 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1773 - mad_send_wr = ib_find_send_mad(mad_agent_priv, tid); 1774 if (!mad_send_wr) { 1775 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1776 ib_free_recv_mad(mad_recv_wc); ··· 2498 } 2499 } 2500 sg_list.addr = dma_map_single(qp_info->port_priv-> 2501 - device->dma_device, 2502 - &mad_priv->grh, 2503 - sizeof *mad_priv - 2504 - sizeof mad_priv->header, 2505 - DMA_FROM_DEVICE); 2506 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2507 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2508 mad_priv->header.mad_list.mad_queue = recv_queue;
··· 227 if (!is_vendor_oui(mad_reg_req->oui)) 228 goto error1; 229 } 230 + /* Make sure class supplied is consistent with RMPP */ 231 + if (ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) { 232 + if (!rmpp_version) 233 + goto error1; 234 + } else { 235 + if (rmpp_version) 236 + goto error1; 237 + } 238 /* Make sure class supplied is consistent with QP type */ 239 if (qp_type == IB_QPT_SMI) { 240 if ((mad_reg_req->mgmt_class != ··· 890 } 891 EXPORT_SYMBOL(ib_create_send_mad); 892 893 + int ib_get_mad_data_offset(u8 mgmt_class) 894 + { 895 + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 896 + return IB_MGMT_SA_HDR; 897 + else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 898 + (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 899 + (mgmt_class == IB_MGMT_CLASS_BIS)) 900 + return IB_MGMT_DEVICE_HDR; 901 + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 902 + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 903 + return IB_MGMT_VENDOR_HDR; 904 + else 905 + return IB_MGMT_MAD_HDR; 906 + } 907 + EXPORT_SYMBOL(ib_get_mad_data_offset); 908 + 909 + int ib_is_mad_class_rmpp(u8 mgmt_class) 910 + { 911 + if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) || 912 + (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) || 913 + (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) || 914 + (mgmt_class == IB_MGMT_CLASS_BIS) || 915 + ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 916 + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))) 917 + return 1; 918 + return 0; 919 + } 920 + EXPORT_SYMBOL(ib_is_mad_class_rmpp); 921 + 922 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num) 923 { 924 struct ib_mad_send_wr_private *mad_send_wr; ··· 1020 !send_buf->mad_agent->recv_handler)) { 1021 ret = -EINVAL; 1022 goto error; 1023 + } 1024 + 1025 + if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) { 1026 + if (mad_agent_priv->agent.rmpp_version) { 1027 + ret = -EINVAL; 1028 + goto error; 1029 + } 1030 } 1031 1032 /* ··· 1618 (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA); 1619 } 1620 1621 + static inline int rcv_has_same_class(struct ib_mad_send_wr_private *wr, 1622 + struct ib_mad_recv_wc *rwc) 1623 + { 1624 + return ((struct ib_mad *)(wr->send_buf.mad))->mad_hdr.mgmt_class == 1625 + rwc->recv_buf.mad->mad_hdr.mgmt_class; 1626 + } 1627 + 1628 + static inline int rcv_has_same_gid(struct ib_mad_send_wr_private *wr, 1629 + struct ib_mad_recv_wc *rwc ) 1630 + { 1631 + struct ib_ah_attr attr; 1632 + u8 send_resp, rcv_resp; 1633 + 1634 + send_resp = ((struct ib_mad *)(wr->send_buf.mad))-> 1635 + mad_hdr.method & IB_MGMT_METHOD_RESP; 1636 + rcv_resp = rwc->recv_buf.mad->mad_hdr.method & IB_MGMT_METHOD_RESP; 1637 + 1638 + if (!send_resp && rcv_resp) 1639 + /* is request/response. GID/LIDs are both local (same). */ 1640 + return 1; 1641 + 1642 + if (send_resp == rcv_resp) 1643 + /* both requests, or both responses. GIDs different */ 1644 + return 0; 1645 + 1646 + if (ib_query_ah(wr->send_buf.ah, &attr)) 1647 + /* Assume not equal, to avoid false positives. */ 1648 + return 0; 1649 + 1650 + if (!(attr.ah_flags & IB_AH_GRH) && !(rwc->wc->wc_flags & IB_WC_GRH)) 1651 + return attr.dlid == rwc->wc->slid; 1652 + else if ((attr.ah_flags & IB_AH_GRH) && 1653 + (rwc->wc->wc_flags & IB_WC_GRH)) 1654 + return memcmp(attr.grh.dgid.raw, 1655 + rwc->recv_buf.grh->sgid.raw, 16) == 0; 1656 + else 1657 + /* one has GID, other does not. Assume different */ 1658 + return 0; 1659 + } 1660 struct ib_mad_send_wr_private* 1661 + ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, 1662 + struct ib_mad_recv_wc *mad_recv_wc) 1663 { 1664 struct ib_mad_send_wr_private *mad_send_wr; 1665 + struct ib_mad *mad; 1666 + 1667 + mad = (struct ib_mad *)mad_recv_wc->recv_buf.mad; 1668 1669 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 1670 agent_list) { 1671 + if ((mad_send_wr->tid == mad->mad_hdr.tid) && 1672 + rcv_has_same_class(mad_send_wr, mad_recv_wc) && 1673 + rcv_has_same_gid(mad_send_wr, mad_recv_wc)) 1674 return mad_send_wr; 1675 } 1676 ··· 1636 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1637 agent_list) { 1638 if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 1639 + mad_send_wr->tid == mad->mad_hdr.tid && 1640 + mad_send_wr->timeout && 1641 + rcv_has_same_class(mad_send_wr, mad_recv_wc) && 1642 + rcv_has_same_gid(mad_send_wr, mad_recv_wc)) { 1643 /* Verify request has not been canceled */ 1644 return (mad_send_wr->status == IB_WC_SUCCESS) ? 1645 mad_send_wr : NULL; ··· 1661 struct ib_mad_send_wr_private *mad_send_wr; 1662 struct ib_mad_send_wc mad_send_wc; 1663 unsigned long flags; 1664 1665 INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); 1666 list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); ··· 1677 1678 /* Complete corresponding request */ 1679 if (response_mad(mad_recv_wc->recv_buf.mad)) { 1680 spin_lock_irqsave(&mad_agent_priv->lock, flags); 1681 + mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); 1682 if (!mad_send_wr) { 1683 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1684 ib_free_recv_mad(mad_recv_wc); ··· 2408 } 2409 } 2410 sg_list.addr = dma_map_single(qp_info->port_priv-> 2411 + device->dma_device, 2412 + &mad_priv->grh, 2413 + sizeof *mad_priv - 2414 + sizeof mad_priv->header, 2415 + DMA_FROM_DEVICE); 2416 pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2417 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2418 mad_priv->header.mad_list.mad_queue = recv_queue;
+2 -1
drivers/infiniband/core/mad_priv.h
··· 216 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 217 218 struct ib_mad_send_wr_private * 219 - ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, __be64 tid); 220 221 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 222 struct ib_mad_send_wc *mad_send_wc);
··· 216 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); 217 218 struct ib_mad_send_wr_private * 219 + ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, 220 + struct ib_mad_recv_wc *mad_recv_wc); 221 222 void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, 223 struct ib_mad_send_wc *mad_send_wc);
+17 -37
drivers/infiniband/core/mad_rmpp.c
··· 1 /* 2 * Copyright (c) 2005 Intel Inc. All rights reserved. 3 - * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 100 } 101 } 102 103 - static int data_offset(u8 mgmt_class) 104 - { 105 - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 106 - return IB_MGMT_SA_HDR; 107 - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 108 - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 109 - return IB_MGMT_VENDOR_HDR; 110 - else 111 - return IB_MGMT_RMPP_HDR; 112 - } 113 - 114 static void format_ack(struct ib_mad_send_buf *msg, 115 struct ib_rmpp_mad *data, 116 struct mad_rmpp_recv *rmpp_recv) ··· 126 struct ib_mad_send_buf *msg; 127 int ret, hdr_len; 128 129 - hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 130 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 131 recv_wc->wc->pkey_index, 1, hdr_len, 132 0, GFP_KERNEL); ··· 152 if (IS_ERR(ah)) 153 return (void *) ah; 154 155 - hdr_len = data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 156 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 157 recv_wc->wc->pkey_index, 1, 158 hdr_len, 0, GFP_KERNEL); ··· 397 398 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 399 400 - hdr_size = data_offset(rmpp_mad->mad_hdr.mgmt_class); 401 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 402 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 403 if (pad > IB_MGMT_RMPP_DATA || pad < 0) ··· 551 return ib_send_mad(mad_send_wr); 552 } 553 554 - static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, 555 - u8 rmpp_status) 556 { 557 struct ib_mad_send_wr_private *mad_send_wr; 558 struct ib_mad_send_wc wc; 559 unsigned long flags; 560 561 spin_lock_irqsave(&agent->lock, flags); 562 - mad_send_wr = ib_find_send_mad(agent, tid); 563 if (!mad_send_wr) 564 goto out; /* Unmatched send */ 565 ··· 601 602 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 603 if (rmpp_mad->rmpp_hdr.rmpp_status) { 604 - abort_send(agent, rmpp_mad->mad_hdr.tid, 605 - IB_MGMT_RMPP_STATUS_BAD_STATUS); 606 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 607 return; 608 } ··· 609 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 610 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 611 if (newwin < seg_num) { 612 - abort_send(agent, rmpp_mad->mad_hdr.tid, 613 - IB_MGMT_RMPP_STATUS_W2S); 614 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 615 return; 616 } 617 618 spin_lock_irqsave(&agent->lock, flags); 619 - mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); 620 if (!mad_send_wr) 621 goto out; /* Unmatched ACK */ 622 ··· 626 if (seg_num > mad_send_wr->send_buf.seg_count || 627 seg_num > mad_send_wr->newwin) { 628 spin_unlock_irqrestore(&agent->lock, flags); 629 - abort_send(agent, rmpp_mad->mad_hdr.tid, 630 - IB_MGMT_RMPP_STATUS_S2B); 631 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 632 return; 633 } ··· 714 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 715 716 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 717 - abort_send(agent, rmpp_mad->mad_hdr.tid, 718 - IB_MGMT_RMPP_STATUS_BAD_STATUS); 719 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 720 } else 721 - abort_send(agent, rmpp_mad->mad_hdr.tid, 722 - rmpp_mad->rmpp_hdr.rmpp_status); 723 } 724 725 static void process_rmpp_abort(struct ib_mad_agent_private *agent, ··· 729 730 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 731 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 732 - abort_send(agent, rmpp_mad->mad_hdr.tid, 733 - IB_MGMT_RMPP_STATUS_BAD_STATUS); 734 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 735 } else 736 - abort_send(agent, rmpp_mad->mad_hdr.tid, 737 - rmpp_mad->rmpp_hdr.rmpp_status); 738 } 739 740 struct ib_mad_recv_wc * ··· 746 return mad_recv_wc; 747 748 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 749 - abort_send(agent, rmpp_mad->mad_hdr.tid, 750 - IB_MGMT_RMPP_STATUS_UNV); 751 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 752 goto out; 753 } ··· 764 process_rmpp_abort(agent, mad_recv_wc); 765 break; 766 default: 767 - abort_send(agent, rmpp_mad->mad_hdr.tid, 768 - IB_MGMT_RMPP_STATUS_BADT); 769 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 770 break; 771 }
··· 1 /* 2 * Copyright (c) 2005 Intel Inc. All rights reserved. 3 + * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 100 } 101 } 102 103 static void format_ack(struct ib_mad_send_buf *msg, 104 struct ib_rmpp_mad *data, 105 struct mad_rmpp_recv *rmpp_recv) ··· 137 struct ib_mad_send_buf *msg; 138 int ret, hdr_len; 139 140 + hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 141 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 142 recv_wc->wc->pkey_index, 1, hdr_len, 143 0, GFP_KERNEL); ··· 163 if (IS_ERR(ah)) 164 return (void *) ah; 165 166 + hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 167 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 168 recv_wc->wc->pkey_index, 1, 169 hdr_len, 0, GFP_KERNEL); ··· 408 409 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 410 411 + hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 412 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 413 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 414 if (pad > IB_MGMT_RMPP_DATA || pad < 0) ··· 562 return ib_send_mad(mad_send_wr); 563 } 564 565 + static void abort_send(struct ib_mad_agent_private *agent, 566 + struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) 567 { 568 struct ib_mad_send_wr_private *mad_send_wr; 569 struct ib_mad_send_wc wc; 570 unsigned long flags; 571 572 spin_lock_irqsave(&agent->lock, flags); 573 + mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); 574 if (!mad_send_wr) 575 goto out; /* Unmatched send */ 576 ··· 612 613 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 614 if (rmpp_mad->rmpp_hdr.rmpp_status) { 615 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 616 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 617 return; 618 } ··· 621 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 622 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 623 if (newwin < seg_num) { 624 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 625 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 626 return; 627 } 628 629 spin_lock_irqsave(&agent->lock, flags); 630 + mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); 631 if (!mad_send_wr) 632 goto out; /* Unmatched ACK */ 633 ··· 639 if (seg_num > mad_send_wr->send_buf.seg_count || 640 seg_num > mad_send_wr->newwin) { 641 spin_unlock_irqrestore(&agent->lock, flags); 642 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 643 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 644 return; 645 } ··· 728 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 729 730 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 731 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 732 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 733 } else 734 + abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); 735 } 736 737 static void process_rmpp_abort(struct ib_mad_agent_private *agent, ··· 745 746 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 747 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 748 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 749 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 750 } else 751 + abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); 752 } 753 754 struct ib_mad_recv_wc * ··· 764 return mad_recv_wc; 765 766 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 767 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 768 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 769 goto out; 770 } ··· 783 process_rmpp_abort(agent, mad_recv_wc); 784 break; 785 default: 786 + abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 787 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 788 break; 789 }
+7 -25
drivers/infiniband/core/user_mad.c
··· 177 return ret; 178 } 179 180 - static int data_offset(u8 mgmt_class) 181 - { 182 - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 183 - return IB_MGMT_SA_HDR; 184 - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 185 - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 186 - return IB_MGMT_VENDOR_HDR; 187 - else 188 - return IB_MGMT_RMPP_HDR; 189 - } 190 - 191 static void send_handler(struct ib_mad_agent *agent, 192 struct ib_mad_send_wc *send_wc) 193 { ··· 272 */ 273 return -ENOSPC; 274 } 275 - offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class); 276 max_seg_payload = sizeof (struct ib_mad) - offset; 277 278 for (left = packet->length - seg_payload, buf += seg_payload; ··· 430 } 431 432 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 433 - if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 434 - hdr_len = IB_MGMT_SA_HDR; 435 - copy_offset = IB_MGMT_RMPP_HDR; 436 - rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 437 - IB_MGMT_RMPP_FLAG_ACTIVE; 438 - } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && 439 - rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { 440 - hdr_len = IB_MGMT_VENDOR_HDR; 441 - copy_offset = IB_MGMT_RMPP_HDR; 442 - rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 443 - IB_MGMT_RMPP_FLAG_ACTIVE; 444 - } else { 445 - hdr_len = IB_MGMT_MAD_HDR; 446 copy_offset = IB_MGMT_MAD_HDR; 447 rmpp_active = 0; 448 } 449 450 data_len = count - sizeof (struct ib_user_mad) - hdr_len;
··· 177 return ret; 178 } 179 180 static void send_handler(struct ib_mad_agent *agent, 181 struct ib_mad_send_wc *send_wc) 182 { ··· 283 */ 284 return -ENOSPC; 285 } 286 + offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 287 max_seg_payload = sizeof (struct ib_mad) - offset; 288 289 for (left = packet->length - seg_payload, buf += seg_payload; ··· 441 } 442 443 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 444 + hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 445 + if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) { 446 copy_offset = IB_MGMT_MAD_HDR; 447 rmpp_active = 0; 448 + } else { 449 + copy_offset = IB_MGMT_RMPP_HDR; 450 + rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 451 + IB_MGMT_RMPP_FLAG_ACTIVE; 452 } 453 454 data_len = count - sizeof (struct ib_user_mad) - hdr_len;
+1 -1
drivers/infiniband/hw/mthca/mthca_av.c
··· 265 return -ENOMEM; 266 } 267 268 - void __devexit mthca_cleanup_av_table(struct mthca_dev *dev) 269 { 270 if (mthca_is_memfree(dev)) 271 return;
··· 265 return -ENOMEM; 266 } 267 268 + void mthca_cleanup_av_table(struct mthca_dev *dev) 269 { 270 if (mthca_is_memfree(dev)) 271 return;
+1 -1
drivers/infiniband/hw/mthca/mthca_cq.c
··· 973 return err; 974 } 975 976 - void __devexit mthca_cleanup_cq_table(struct mthca_dev *dev) 977 { 978 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); 979 mthca_alloc_cleanup(&dev->cq_table.alloc);
··· 973 return err; 974 } 975 976 + void mthca_cleanup_cq_table(struct mthca_dev *dev) 977 { 978 mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); 979 mthca_alloc_cleanup(&dev->cq_table.alloc);
+3 -3
drivers/infiniband/hw/mthca/mthca_eq.c
··· 765 766 } 767 768 - static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev) 769 { 770 if (mthca_is_memfree(dev)) { 771 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & ··· 821 return ret; 822 } 823 824 - void __devexit mthca_unmap_eq_icm(struct mthca_dev *dev) 825 { 826 u8 status; 827 ··· 954 return err; 955 } 956 957 - void __devexit mthca_cleanup_eq_table(struct mthca_dev *dev) 958 { 959 u8 status; 960 int i;
··· 765 766 } 767 768 + static void mthca_unmap_eq_regs(struct mthca_dev *dev) 769 { 770 if (mthca_is_memfree(dev)) { 771 mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) & ··· 821 return ret; 822 } 823 824 + void mthca_unmap_eq_icm(struct mthca_dev *dev) 825 { 826 u8 status; 827 ··· 954 return err; 955 } 956 957 + void mthca_cleanup_eq_table(struct mthca_dev *dev) 958 { 959 u8 status; 960 int i;
+1 -1
drivers/infiniband/hw/mthca/mthca_mad.c
··· 271 return PTR_ERR(agent); 272 } 273 274 - void mthca_free_agents(struct mthca_dev *dev) 275 { 276 struct ib_mad_agent *agent; 277 int p, q;
··· 271 return PTR_ERR(agent); 272 } 273 274 + void __devexit mthca_free_agents(struct mthca_dev *dev) 275 { 276 struct ib_mad_agent *agent; 277 int p, q;
+1 -1
drivers/infiniband/hw/mthca/mthca_mcg.c
··· 388 return 0; 389 } 390 391 - void __devexit mthca_cleanup_mcg_table(struct mthca_dev *dev) 392 { 393 mthca_alloc_cleanup(&dev->mcg_table.alloc); 394 }
··· 388 return 0; 389 } 390 391 + void mthca_cleanup_mcg_table(struct mthca_dev *dev) 392 { 393 mthca_alloc_cleanup(&dev->mcg_table.alloc); 394 }
+2 -2
drivers/infiniband/hw/mthca/mthca_mr.c
··· 170 return -ENOMEM; 171 } 172 173 - static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy) 174 { 175 int i; 176 ··· 866 return err; 867 } 868 869 - void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev) 870 { 871 /* XXX check if any MRs are still allocated? */ 872 if (dev->limits.fmr_reserved_mtts)
··· 170 return -ENOMEM; 171 } 172 173 + static void mthca_buddy_cleanup(struct mthca_buddy *buddy) 174 { 175 int i; 176 ··· 866 return err; 867 } 868 869 + void mthca_cleanup_mr_table(struct mthca_dev *dev) 870 { 871 /* XXX check if any MRs are still allocated? */ 872 if (dev->limits.fmr_reserved_mtts)
+1 -1
drivers/infiniband/hw/mthca/mthca_pd.c
··· 77 dev->limits.reserved_pds); 78 } 79 80 - void __devexit mthca_cleanup_pd_table(struct mthca_dev *dev) 81 { 82 /* XXX check if any PDs are still allocated? */ 83 mthca_alloc_cleanup(&dev->pd_table.alloc);
··· 77 dev->limits.reserved_pds); 78 } 79 80 + void mthca_cleanup_pd_table(struct mthca_dev *dev) 81 { 82 /* XXX check if any PDs are still allocated? */ 83 mthca_alloc_cleanup(&dev->pd_table.alloc);
+1 -1
drivers/infiniband/hw/mthca/mthca_qp.c
··· 2204 return err; 2205 } 2206 2207 - void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev) 2208 { 2209 int i; 2210 u8 status;
··· 2204 return err; 2205 } 2206 2207 + void mthca_cleanup_qp_table(struct mthca_dev *dev) 2208 { 2209 int i; 2210 u8 status;
+2 -2
drivers/infiniband/hw/mthca/mthca_srq.c
··· 206 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 207 srq->max_gs * sizeof (struct mthca_data_seg))); 208 209 - if (ds > dev->limits.max_desc_sz) 210 return -EINVAL; 211 212 srq->wqe_shift = long_log2(ds); ··· 684 return err; 685 } 686 687 - void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) 688 { 689 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 690 return;
··· 206 roundup_pow_of_two(sizeof (struct mthca_next_seg) + 207 srq->max_gs * sizeof (struct mthca_data_seg))); 208 209 + if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) 210 return -EINVAL; 211 212 srq->wqe_shift = long_log2(ds); ··· 684 return err; 685 } 686 687 + void mthca_cleanup_srq_table(struct mthca_dev *dev) 688 { 689 if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 690 return;
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 723 * destination address onto the front of the skb so we can 724 * figure out where to send the packet later. 725 */ 726 - if (!skb->dst || !skb->dst->neighbour) { 727 struct ipoib_pseudoheader *phdr = 728 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 729 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
··· 723 * destination address onto the front of the skb so we can 724 * figure out where to send the packet later. 725 */ 726 + if ((!skb->dst || !skb->dst->neighbour) && daddr) { 727 struct ipoib_pseudoheader *phdr = 728 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); 729 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+2 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 607 */ 608 if (likely(scmnd->use_sg)) { 609 nents = scmnd->use_sg; 610 - scat = (struct scatterlist *) scmnd->request_buffer; 611 } else { 612 nents = 1; 613 - scat = (struct scatterlist *) scmnd->request_buffer; 614 } 615 616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
··· 607 */ 608 if (likely(scmnd->use_sg)) { 609 nents = scmnd->use_sg; 610 + scat = scmnd->request_buffer; 611 } else { 612 nents = 1; 613 + scat = &req->fake_sg; 614 } 615 616 dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents,
+26 -1
include/rdma/ib_mad.h
··· 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 - * Copyright (c) 2004 Voltaire Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU ··· 55 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06 56 #define IB_MGMT_CLASS_CM 0x07 57 #define IB_MGMT_CLASS_SNMP 0x08 58 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 59 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 60 ··· 121 IB_MGMT_VENDOR_DATA = 216, 122 IB_MGMT_SA_HDR = 56, 123 IB_MGMT_SA_DATA = 200, 124 }; 125 126 struct ib_mad_hdr { ··· 607 int rmpp_active, 608 int hdr_len, int data_len, 609 gfp_t gfp_mask); 610 611 /** 612 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
··· 3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004 Topspin Corporation. All rights reserved. 6 + * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU ··· 55 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06 56 #define IB_MGMT_CLASS_CM 0x07 57 #define IB_MGMT_CLASS_SNMP 0x08 58 + #define IB_MGMT_CLASS_DEVICE_ADM 0x10 59 + #define IB_MGMT_CLASS_BOOT_MGMT 0x11 60 + #define IB_MGMT_CLASS_BIS 0x12 61 + #define IB_MGMT_CLASS_CONG_MGMT 0x21 62 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 63 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 64 ··· 117 IB_MGMT_VENDOR_DATA = 216, 118 IB_MGMT_SA_HDR = 56, 119 IB_MGMT_SA_DATA = 200, 120 + IB_MGMT_DEVICE_HDR = 64, 121 + IB_MGMT_DEVICE_DATA = 192, 122 }; 123 124 struct ib_mad_hdr { ··· 601 int rmpp_active, 602 int hdr_len, int data_len, 603 gfp_t gfp_mask); 604 + 605 + /** 606 + * ib_is_mad_class_rmpp - returns whether given management class 607 + * supports RMPP. 608 + * @mgmt_class: management class 609 + * 610 + * This routine returns whether the management class supports RMPP. 611 + */ 612 + int ib_is_mad_class_rmpp(u8 mgmt_class); 613 + 614 + /** 615 + * ib_get_mad_data_offset - returns the data offset for a given 616 + * management class. 617 + * @mgmt_class: management class 618 + * 619 + * This routine returns the data offset in the MAD for the management 620 + * class requested. 621 + */ 622 + int ib_get_mad_data_offset(u8 mgmt_class); 623 624 /** 625 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.