[PATCH] IB: Add handling for ABORT and STOP RMPP MADs.

Add handling for ABORT / STOP RMPP MADs.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Sean Hefty and committed by
Roland Dreier
fe9e08e1 b9ef520f

+246 -65
+244 -65
drivers/infiniband/core/mad_rmpp.c
··· 100 100 } 101 101 } 102 102 103 + static int data_offset(u8 mgmt_class) 104 + { 105 + if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 106 + return offsetof(struct ib_sa_mad, data); 107 + else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 108 + (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 109 + return offsetof(struct ib_vendor_mad, data); 110 + else 111 + return offsetof(struct ib_rmpp_mad, data); 112 + } 113 + 114 + static void format_ack(struct ib_rmpp_mad *ack, 115 + struct ib_rmpp_mad *data, 116 + struct mad_rmpp_recv *rmpp_recv) 117 + { 118 + unsigned long flags; 119 + 120 + memcpy(&ack->mad_hdr, &data->mad_hdr, 121 + data_offset(data->mad_hdr.mgmt_class)); 122 + 123 + ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 124 + ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; 125 + ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 126 + 127 + spin_lock_irqsave(&rmpp_recv->lock, flags); 128 + rmpp_recv->last_ack = rmpp_recv->seg_num; 129 + ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); 130 + ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); 131 + spin_unlock_irqrestore(&rmpp_recv->lock, flags); 132 + } 133 + 134 + static void ack_recv(struct mad_rmpp_recv *rmpp_recv, 135 + struct ib_mad_recv_wc *recv_wc) 136 + { 137 + struct ib_mad_send_buf *msg; 138 + struct ib_send_wr *bad_send_wr; 139 + int hdr_len, ret; 140 + 141 + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 142 + msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 143 + recv_wc->wc->pkey_index, rmpp_recv->ah, 1, 144 + hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, 145 + GFP_KERNEL); 146 + if (!msg) 147 + return; 148 + 149 + format_ack((struct ib_rmpp_mad *) msg->mad, 150 + (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 151 + ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, 152 + &bad_send_wr); 153 + if (ret) 154 + ib_free_send_mad(msg); 155 + } 156 + 157 + static int alloc_response_msg(struct ib_mad_agent *agent, 158 + struct ib_mad_recv_wc *recv_wc, 159 + struct ib_mad_send_buf **msg) 160 + { 161 + struct ib_mad_send_buf *m; 162 + struct ib_ah *ah; 163 + int hdr_len; 164 + 165 + ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, 166 + recv_wc->recv_buf.grh, agent->port_num); 167 + if (IS_ERR(ah)) 168 + return PTR_ERR(ah); 169 + 170 + hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 171 + m = ib_create_send_mad(agent, recv_wc->wc->src_qp, 172 + recv_wc->wc->pkey_index, ah, 1, hdr_len, 173 + sizeof(struct ib_rmpp_mad) - hdr_len, 174 + GFP_KERNEL); 175 + if (IS_ERR(m)) { 176 + ib_destroy_ah(ah); 177 + return PTR_ERR(m); 178 + } 179 + *msg = m; 180 + return 0; 181 + } 182 + 183 + static void free_msg(struct ib_mad_send_buf *msg) 184 + { 185 + ib_destroy_ah(msg->send_wr.wr.ud.ah); 186 + ib_free_send_mad(msg); 187 + } 188 + 189 + static void nack_recv(struct ib_mad_agent_private *agent, 190 + struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) 191 + { 192 + struct ib_mad_send_buf *msg; 193 + struct ib_rmpp_mad *rmpp_mad; 194 + struct ib_send_wr *bad_send_wr; 195 + int ret; 196 + 197 + ret = alloc_response_msg(&agent->agent, recv_wc, &msg); 198 + if (ret) 199 + return; 200 + 201 + rmpp_mad = (struct ib_rmpp_mad *) msg->mad; 202 + memcpy(rmpp_mad, recv_wc->recv_buf.mad, 203 + data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); 204 + 205 + rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 206 + rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; 207 + rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; 208 + ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 209 + rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; 210 + rmpp_mad->rmpp_hdr.seg_num = 0; 211 + rmpp_mad->rmpp_hdr.paylen_newwin = 0; 212 + 213 + ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); 214 + if (ret) 215 + free_msg(msg); 216 + } 217 + 103 218 static void recv_timeout_handler(void *data) 104 219 { 105 220 struct mad_rmpp_recv *rmpp_recv = data; ··· 230 115 list_del(&rmpp_recv->list); 231 116 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 232 117 233 - /* TODO: send abort. */ 234 118 rmpp_wc = rmpp_recv->rmpp_wc; 119 + nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); 235 120 destroy_rmpp_recv(rmpp_recv); 236 121 ib_free_recv_mad(rmpp_wc); 237 122 } ··· 343 228 list_add_tail(&rmpp_recv->list, &agent->rmpp_list); 344 229 345 230 return cur_rmpp_recv; 346 - } 347 - 348 - static int data_offset(u8 mgmt_class) 349 - { 350 - if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 351 - return offsetof(struct ib_sa_mad, data); 352 - else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 353 - (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 354 - return offsetof(struct ib_vendor_mad, data); 355 - else 356 - return offsetof(struct ib_rmpp_mad, data); 357 - } 358 - 359 - static void format_ack(struct ib_rmpp_mad *ack, 360 - struct ib_rmpp_mad *data, 361 - struct mad_rmpp_recv *rmpp_recv) 362 - { 363 - unsigned long flags; 364 - 365 - memcpy(&ack->mad_hdr, &data->mad_hdr, 366 - data_offset(data->mad_hdr.mgmt_class)); 367 - 368 - ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 369 - ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; 370 - ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 371 - 372 - spin_lock_irqsave(&rmpp_recv->lock, flags); 373 - rmpp_recv->last_ack = rmpp_recv->seg_num; 374 - ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); 375 - ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); 376 - spin_unlock_irqrestore(&rmpp_recv->lock, flags); 377 - } 378 - 379 - static void ack_recv(struct mad_rmpp_recv *rmpp_recv, 380 - struct ib_mad_recv_wc *recv_wc) 381 - { 382 - struct ib_mad_send_buf *msg; 383 - struct ib_send_wr *bad_send_wr; 384 - int hdr_len, ret; 385 - 386 - hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 387 - msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 388 - recv_wc->wc->pkey_index, rmpp_recv->ah, 1, 389 - hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, 390 - GFP_KERNEL); 391 - if (!msg) 392 - return; 393 - 394 - format_ack((struct ib_rmpp_mad *) msg->mad, 395 - (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 396 - ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, 397 - &bad_send_wr); 398 - if (ret) 399 - ib_free_send_mad(msg); 400 231 } 401 232 402 233 static inline int get_last_flag(struct ib_mad_recv_buf *seg) ··· 620 559 return ib_send_mad(mad_send_wr); 621 560 } 622 561 562 + static void abort_send(struct ib_mad_agent_private *agent, __be64 tid, 563 + u8 rmpp_status) 564 + { 565 + struct ib_mad_send_wr_private *mad_send_wr; 566 + struct ib_mad_send_wc wc; 567 + unsigned long flags; 568 + 569 + spin_lock_irqsave(&agent->lock, flags); 570 + mad_send_wr = ib_find_send_mad(agent, tid); 571 + if (!mad_send_wr) 572 + goto out; /* Unmatched send */ 573 + 574 + if ((mad_send_wr->last_ack == mad_send_wr->total_seg) || 575 + (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 576 + goto out; /* Send is already done */ 577 + 578 + ib_mark_mad_done(mad_send_wr); 579 + spin_unlock_irqrestore(&agent->lock, flags); 580 + 581 + wc.status = IB_WC_REM_ABORT_ERR; 582 + wc.vendor_err = rmpp_status; 583 + wc.wr_id = mad_send_wr->wr_id; 584 + ib_mad_complete_send_wr(mad_send_wr, &wc); 585 + return; 586 + out: 587 + spin_unlock_irqrestore(&agent->lock, flags); 588 + } 589 + 623 590 static void process_rmpp_ack(struct ib_mad_agent_private *agent, 624 591 struct ib_mad_recv_wc *mad_recv_wc) 625 592 { ··· 657 568 int seg_num, newwin, ret; 658 569 659 570 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 660 - if (rmpp_mad->rmpp_hdr.rmpp_status) 571 + if (rmpp_mad->rmpp_hdr.rmpp_status) { 572 + abort_send(agent, rmpp_mad->mad_hdr.tid, 573 + IB_MGMT_RMPP_STATUS_BAD_STATUS); 574 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 661 575 return; 576 + } 662 577 663 578 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 664 579 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 580 + if (newwin < seg_num) { 581 + abort_send(agent, rmpp_mad->mad_hdr.tid, 582 + IB_MGMT_RMPP_STATUS_W2S); 583 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 584 + return; 585 + } 665 586 666 587 spin_lock_irqsave(&agent->lock, flags); 667 588 mad_send_wr = ib_find_send_mad(agent, rmpp_mad->mad_hdr.tid); ··· 682 583 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 683 584 goto out; /* Send is already done */ 684 585 685 - if (seg_num > mad_send_wr->total_seg) 686 - goto out; /* Bad ACK */ 586 + if (seg_num > mad_send_wr->total_seg || seg_num > mad_send_wr->newwin) { 587 + spin_unlock_irqrestore(&agent->lock, flags); 588 + abort_send(agent, rmpp_mad->mad_hdr.tid, 589 + IB_MGMT_RMPP_STATUS_S2B); 590 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 591 + return; 592 + } 687 593 688 594 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) 689 595 goto out; /* Old ACK */ ··· 732 628 spin_unlock_irqrestore(&agent->lock, flags); 733 629 } 734 630 631 + static struct ib_mad_recv_wc * 632 + process_rmpp_data(struct ib_mad_agent_private *agent, 633 + struct ib_mad_recv_wc *mad_recv_wc) 634 + { 635 + struct ib_rmpp_hdr *rmpp_hdr; 636 + u8 rmpp_status; 637 + 638 + rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; 639 + 640 + if (rmpp_hdr->rmpp_status) { 641 + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; 642 + goto bad; 643 + } 644 + 645 + if (rmpp_hdr->seg_num == __constant_htonl(1)) { 646 + if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 647 + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 648 + goto bad; 649 + } 650 + return start_rmpp(agent, mad_recv_wc); 651 + } else { 652 + if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { 653 + rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 654 + goto bad; 655 + } 656 + return continue_rmpp(agent, mad_recv_wc); 657 + } 658 + bad: 659 + nack_recv(agent, mad_recv_wc, rmpp_status); 660 + ib_free_recv_mad(mad_recv_wc); 661 + return NULL; 662 + } 663 + 664 + static void process_rmpp_stop(struct ib_mad_agent_private *agent, 665 + struct ib_mad_recv_wc *mad_recv_wc) 666 + { 667 + struct ib_rmpp_mad *rmpp_mad; 668 + 669 + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 670 + 671 + if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 672 + abort_send(agent, rmpp_mad->mad_hdr.tid, 673 + IB_MGMT_RMPP_STATUS_BAD_STATUS); 674 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 675 + } else 676 + abort_send(agent, rmpp_mad->mad_hdr.tid, 677 + rmpp_mad->rmpp_hdr.rmpp_status); 678 + } 679 + 680 + static void process_rmpp_abort(struct ib_mad_agent_private *agent, 681 + struct ib_mad_recv_wc *mad_recv_wc) 682 + { 683 + struct ib_rmpp_mad *rmpp_mad; 684 + 685 + rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 686 + 687 + if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 688 + rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 689 + abort_send(agent, rmpp_mad->mad_hdr.tid, 690 + IB_MGMT_RMPP_STATUS_BAD_STATUS); 691 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 692 + } else 693 + abort_send(agent, rmpp_mad->mad_hdr.tid, 694 + rmpp_mad->rmpp_hdr.rmpp_status); 695 + } 696 + 735 697 struct ib_mad_recv_wc * 736 698 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, 737 699 struct ib_mad_recv_wc *mad_recv_wc) ··· 808 638 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) 809 639 return mad_recv_wc; 810 640 811 - if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) 641 + if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 642 + abort_send(agent, rmpp_mad->mad_hdr.tid, 643 + IB_MGMT_RMPP_STATUS_UNV); 644 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 812 645 goto out; 646 + } 813 647 814 648 switch (rmpp_mad->rmpp_hdr.rmpp_type) { 815 649 case IB_MGMT_RMPP_TYPE_DATA: 816 - if (rmpp_mad->rmpp_hdr.seg_num == __constant_htonl(1)) 817 - return start_rmpp(agent, mad_recv_wc); 818 - else 819 - return continue_rmpp(agent, mad_recv_wc); 650 + return process_rmpp_data(agent, mad_recv_wc); 820 651 case IB_MGMT_RMPP_TYPE_ACK: 821 652 process_rmpp_ack(agent, mad_recv_wc); 822 653 break; 823 654 case IB_MGMT_RMPP_TYPE_STOP: 655 + process_rmpp_stop(agent, mad_recv_wc); 656 + break; 824 657 case IB_MGMT_RMPP_TYPE_ABORT: 825 - /* TODO: process_rmpp_nack(agent, mad_recv_wc); */ 658 + process_rmpp_abort(agent, mad_recv_wc); 826 659 break; 827 660 default: 661 + abort_send(agent, rmpp_mad->mad_hdr.tid, 662 + IB_MGMT_RMPP_STATUS_BADT); 663 + nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 828 664 break; 829 665 } 830 666 out: ··· 890 714 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 891 715 msg = (struct ib_mad_send_buf *) (unsigned long) 892 716 mad_send_wc->wr_id; 893 - ib_free_send_mad(msg); 717 + if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) 718 + ib_free_send_mad(msg); 719 + else 720 + free_msg(msg); 894 721 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 895 722 } 896 723
+2
drivers/infiniband/include/ib_mad.h
··· 90 90 91 91 #define IB_MGMT_RMPP_STATUS_SUCCESS 0 92 92 #define IB_MGMT_RMPP_STATUS_RESX 1 93 + #define IB_MGMT_RMPP_STATUS_ABORT_MIN 118 93 94 #define IB_MGMT_RMPP_STATUS_T2L 118 94 95 #define IB_MGMT_RMPP_STATUS_BAD_LEN 119 95 96 #define IB_MGMT_RMPP_STATUS_BAD_SEG 120 ··· 101 100 #define IB_MGMT_RMPP_STATUS_UNV 125 102 101 #define IB_MGMT_RMPP_STATUS_TMR 126 103 102 #define IB_MGMT_RMPP_STATUS_UNSPEC 127 103 + #define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 104 104 105 105 #define IB_QP0 0 106 106 #define IB_QP1 __constant_htonl(1)