Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/{hfi1, qib, rdmavt}: Move send completion logic to rdmavt

Moving send completion code into rdmavt in order to have shared logic
between qib and hfi1 drivers.

Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: Brian Welty <brian.welty@intel.com>
Signed-off-by: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Signed-off-by: Harish Chegondi <harish.chegondi@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Venkata Sandeep Dhanalakota and committed by
Jason Gunthorpe
116aa033 019f118b

+124 -114
+7 -7
drivers/infiniband/hw/hfi1/rc.c
··· 309 309 } 310 310 clear_ahg(qp); 311 311 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 312 - hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 312 + rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 313 313 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 314 314 /* will get called again */ 315 315 goto done_free_tx; ··· 378 378 wqe->wr.ex.invalidate_rkey); 379 379 local_ops = 1; 380 380 } 381 - hfi1_send_complete(qp, wqe, 382 - err ? IB_WC_LOC_PROT_ERR 383 - : IB_WC_SUCCESS); 381 + rvt_send_complete(qp, wqe, 382 + err ? IB_WC_LOC_PROT_ERR 383 + : IB_WC_SUCCESS); 384 384 if (local_ops) 385 385 atomic_dec(&qp->local_ops_pending); 386 386 goto done_free_tx; ··· 1043 1043 hfi1_migrate_qp(qp); 1044 1044 qp->s_retry = qp->s_retry_cnt; 1045 1045 } else if (qp->s_last == qp->s_acked) { 1046 - hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 1046 + rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 1047 1047 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1048 1048 return; 1049 1049 } else { /* need to handle delayed completion */ ··· 1468 1468 ibp->rvp.n_other_naks++; 1469 1469 class_b: 1470 1470 if (qp->s_last == qp->s_acked) { 1471 - hfi1_send_complete(qp, wqe, status); 1471 + rvt_send_complete(qp, wqe, status); 1472 1472 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1473 1473 } 1474 1474 break; ··· 1706 1706 status = IB_WC_LOC_LEN_ERR; 1707 1707 ack_err: 1708 1708 if (qp->s_last == qp->s_acked) { 1709 - hfi1_send_complete(qp, wqe, status); 1709 + rvt_send_complete(qp, wqe, status); 1710 1710 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1711 1711 } 1712 1712 ack_done:
+2 -43
drivers/infiniband/hw/hfi1/ruc.c
··· 411 411 ibp->rvp.n_loop_pkts++; 412 412 flush_send: 413 413 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 414 - hfi1_send_complete(sqp, wqe, send_status); 414 + rvt_send_complete(sqp, wqe, send_status); 415 415 if (local_ops) { 416 416 atomic_dec(&sqp->local_ops_pending); 417 417 local_ops = 0; ··· 459 459 460 460 serr: 461 461 spin_lock_irqsave(&sqp->s_lock, flags); 462 - hfi1_send_complete(sqp, wqe, send_status); 462 + rvt_send_complete(sqp, wqe, send_status); 463 463 if (sqp->ibqp.qp_type == IB_QPT_RC) { 464 464 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); 465 465 ··· 921 921 } while (make_req(qp, &ps)); 922 922 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait); 923 923 spin_unlock_irqrestore(&qp->s_lock, ps.flags); 924 - } 925 - 926 - /* 927 - * This should be called with s_lock held. 928 - */ 929 - void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 930 - enum ib_wc_status status) 931 - { 932 - u32 old_last, last; 933 - 934 - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 935 - return; 936 - 937 - last = qp->s_last; 938 - old_last = last; 939 - trace_hfi1_qp_send_completion(qp, wqe, last); 940 - if (++last >= qp->s_size) 941 - last = 0; 942 - trace_hfi1_qp_send_completion(qp, wqe, last); 943 - qp->s_last = last; 944 - /* See post_send() */ 945 - barrier(); 946 - rvt_put_swqe(wqe); 947 - if (qp->ibqp.qp_type == IB_QPT_UD || 948 - qp->ibqp.qp_type == IB_QPT_SMI || 949 - qp->ibqp.qp_type == IB_QPT_GSI) 950 - atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); 951 - 952 - rvt_qp_swqe_complete(qp, 953 - wqe, 954 - ib_hfi1_wc_opcode[wqe->wr.opcode], 955 - status); 956 - 957 - if (qp->s_acked == old_last) 958 - qp->s_acked = last; 959 - if (qp->s_cur == old_last) 960 - qp->s_cur = last; 961 - if (qp->s_tail == old_last) 962 - qp->s_tail = last; 963 - if (qp->state == IB_QPS_SQD && last == qp->s_cur) 964 - qp->s_draining = 0; 965 924 }
+2 -2
drivers/infiniband/hw/hfi1/uc.c
··· 88 88 } 89 89 clear_ahg(qp); 90 90 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 91 - hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 91 + rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 92 92 goto done_free_tx; 93 93 } 94 94 ··· 140 140 qp, wqe->wr.ex.invalidate_rkey); 141 141 local_ops = 1; 142 142 } 143 - hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR 143 + rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR 144 144 : IB_WC_SUCCESS); 145 145 if (local_ops) 146 146 atomic_dec(&qp->local_ops_pending);
+2 -2
drivers/infiniband/hw/hfi1/ud.c
··· 518 518 goto bail; 519 519 } 520 520 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 521 - hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 521 + rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 522 522 goto done_free_tx; 523 523 } 524 524 ··· 560 560 ud_loopback(qp, wqe); 561 561 spin_lock_irqsave(&qp->s_lock, tflags); 562 562 ps->flags = tflags; 563 - hfi1_send_complete(qp, wqe, IB_WC_SUCCESS); 563 + rvt_send_complete(qp, wqe, IB_WC_SUCCESS); 564 564 goto done_free_tx; 565 565 } 566 566 }
+6 -3
drivers/infiniband/hw/hfi1/verbs.c
··· 492 492 493 493 spin_lock(&qp->s_lock); 494 494 if (tx->wqe) { 495 - hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 495 + rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 496 496 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 497 497 struct hfi1_opa_header *hdr; 498 498 ··· 938 938 pio_bail: 939 939 if (qp->s_wqe) { 940 940 spin_lock_irqsave(&qp->s_lock, flags); 941 - hfi1_send_complete(qp, qp->s_wqe, wc_status); 941 + rvt_send_complete(qp, qp->s_wqe, wc_status); 942 942 spin_unlock_irqrestore(&qp->s_lock, flags); 943 943 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 944 944 spin_lock_irqsave(&qp->s_lock, flags); ··· 1145 1145 hfi1_cdbg(PIO, "%s() Failed. Completing with err", 1146 1146 __func__); 1147 1147 spin_lock_irqsave(&qp->s_lock, flags); 1148 - hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 1148 + rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 1149 1149 spin_unlock_irqrestore(&qp->s_lock, flags); 1150 1150 } 1151 1151 return -EINVAL; ··· 1734 1734 1735 1735 /* post send table */ 1736 1736 dd->verbs_dev.rdi.post_parms = hfi1_post_parms; 1737 + 1738 + /* opcode translation table */ 1739 + dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode; 1737 1740 1738 1741 ppd = dd->pport; 1739 1742 for (i = 0; i < dd->num_pports; i++, ppd++)
-3
drivers/infiniband/hw/hfi1/verbs.h
··· 363 363 364 364 void hfi1_do_send(struct rvt_qp *qp, bool in_thread); 365 365 366 - void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 367 - enum ib_wc_status status); 368 - 369 366 void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn); 370 367 371 368 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
+4 -4
drivers/infiniband/hw/qib/qib_rc.c
··· 254 254 goto bail; 255 255 } 256 256 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 257 - qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 257 + rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? 258 258 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); 259 259 /* will get called again */ 260 260 goto done; ··· 838 838 qib_migrate_qp(qp); 839 839 qp->s_retry = qp->s_retry_cnt; 840 840 } else if (qp->s_last == qp->s_acked) { 841 - qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 841 + rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 842 842 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 843 843 return; 844 844 } else /* XXX need to handle delayed completion */ ··· 1221 1221 ibp->rvp.n_other_naks++; 1222 1222 class_b: 1223 1223 if (qp->s_last == qp->s_acked) { 1224 - qib_send_complete(qp, wqe, status); 1224 + rvt_send_complete(qp, wqe, status); 1225 1225 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1226 1226 } 1227 1227 break; ··· 1492 1492 status = IB_WC_LOC_LEN_ERR; 1493 1493 ack_err: 1494 1494 if (qp->s_last == qp->s_acked) { 1495 - qib_send_complete(qp, wqe, status); 1495 + rvt_send_complete(qp, wqe, status); 1496 1496 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1497 1497 } 1498 1498 ack_done:
+2 -41
drivers/infiniband/hw/qib/qib_ruc.c
··· 403 403 ibp->rvp.n_loop_pkts++; 404 404 flush_send: 405 405 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 406 - qib_send_complete(sqp, wqe, send_status); 406 + rvt_send_complete(sqp, wqe, send_status); 407 407 goto again; 408 408 409 409 rnr_nak: ··· 447 447 448 448 serr: 449 449 spin_lock_irqsave(&sqp->s_lock, flags); 450 - qib_send_complete(sqp, wqe, send_status); 450 + rvt_send_complete(sqp, wqe, send_status); 451 451 if (sqp->ibqp.qp_type == IB_QPT_RC) { 452 452 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); 453 453 ··· 612 612 } while (make_req(qp, &flags)); 613 613 614 614 spin_unlock_irqrestore(&qp->s_lock, flags); 615 - } 616 - 617 - /* 618 - * This should be called with s_lock held. 619 - */ 620 - void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 621 - enum ib_wc_status status) 622 - { 623 - u32 old_last, last; 624 - 625 - if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 626 - return; 627 - 628 - last = qp->s_last; 629 - old_last = last; 630 - if (++last >= qp->s_size) 631 - last = 0; 632 - qp->s_last = last; 633 - /* See post_send() */ 634 - barrier(); 635 - rvt_put_swqe(wqe); 636 - if (qp->ibqp.qp_type == IB_QPT_UD || 637 - qp->ibqp.qp_type == IB_QPT_SMI || 638 - qp->ibqp.qp_type == IB_QPT_GSI) 639 - atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); 640 - 641 - rvt_qp_swqe_complete(qp, 642 - wqe, 643 - ib_qib_wc_opcode[wqe->wr.opcode], 644 - status); 645 - 646 - if (qp->s_acked == old_last) 647 - qp->s_acked = last; 648 - if (qp->s_cur == old_last) 649 - qp->s_cur = last; 650 - if (qp->s_tail == old_last) 651 - qp->s_tail = last; 652 - if (qp->state == IB_QPS_SQD && last == qp->s_cur) 653 - qp->s_draining = 0; 654 615 }
+1 -1
drivers/infiniband/hw/qib/qib_sdma.c
··· 651 651 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) 652 652 rvt_error_qp(qp, IB_WC_GENERAL_ERR); 653 653 } else if (qp->s_wqe) 654 - qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 654 + rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); 655 655 spin_unlock(&qp->s_lock); 656 656 spin_unlock(&qp->r_lock); 657 657 /* return zero to process the next send work request */
+1 -1
drivers/infiniband/hw/qib/qib_uc.c
··· 68 68 goto bail; 69 69 } 70 70 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 71 - qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 71 + rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 72 72 goto done; 73 73 } 74 74
+2 -2
drivers/infiniband/hw/qib/qib_ud.c
··· 260 260 goto bail; 261 261 } 262 262 wqe = rvt_get_swqe_ptr(qp, qp->s_last); 263 - qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 263 + rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 264 264 goto done; 265 265 } 266 266 ··· 304 304 qib_ud_loopback(qp, wqe); 305 305 spin_lock_irqsave(&qp->s_lock, tflags); 306 306 *flags = tflags; 307 - qib_send_complete(qp, wqe, IB_WC_SUCCESS); 307 + rvt_send_complete(qp, wqe, IB_WC_SUCCESS); 308 308 goto done; 309 309 } 310 310 }
+5 -2
drivers/infiniband/hw/qib/qib_verbs.c
··· 731 731 732 732 spin_lock(&qp->s_lock); 733 733 if (tx->wqe) 734 - qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 734 + rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS); 735 735 else if (qp->ibqp.qp_type == IB_QPT_RC) { 736 736 struct ib_header *hdr; 737 737 ··· 1004 1004 } 1005 1005 if (qp->s_wqe) { 1006 1006 spin_lock_irqsave(&qp->s_lock, flags); 1007 - qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1007 + rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1008 1008 spin_unlock_irqrestore(&qp->s_lock, flags); 1009 1009 } else if (qp->ibqp.qp_type == IB_QPT_RC) { 1010 1010 spin_lock_irqsave(&qp->s_lock, flags); ··· 1491 1491 rdi->dparms.props.max_mcast_grp; 1492 1492 /* post send table */ 1493 1493 dd->verbs_dev.rdi.post_parms = qib_post_parms; 1494 + 1495 + /* opcode translation table */ 1496 + dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode; 1494 1497 } 1495 1498 1496 1499 /**
-3
drivers/infiniband/hw/qib/qib_verbs.h
··· 331 331 332 332 void qib_do_send(struct rvt_qp *qp); 333 333 334 - void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 335 - enum ib_wc_status status); 336 - 337 334 void qib_send_rc_ack(struct rvt_qp *qp); 338 335 339 336 int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
+43
drivers/infiniband/sw/rdmavt/qp.c
··· 2658 2658 } 2659 2659 EXPORT_SYMBOL(rvt_qp_iter); 2660 2660 2661 + /* 2662 + * This should be called with s_lock held. 2663 + */ 2664 + void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 2665 + enum ib_wc_status status) 2666 + { 2667 + u32 old_last, last; 2668 + struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); 2669 + 2670 + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) 2671 + return; 2672 + 2673 + last = qp->s_last; 2674 + old_last = last; 2675 + trace_rvt_qp_send_completion(qp, wqe, last); 2676 + if (++last >= qp->s_size) 2677 + last = 0; 2678 + trace_rvt_qp_send_completion(qp, wqe, last); 2679 + qp->s_last = last; 2680 + /* See post_send() */ 2681 + barrier(); 2682 + rvt_put_swqe(wqe); 2683 + if (qp->ibqp.qp_type == IB_QPT_UD || 2684 + qp->ibqp.qp_type == IB_QPT_SMI || 2685 + qp->ibqp.qp_type == IB_QPT_GSI) 2686 + atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); 2687 + 2688 + rvt_qp_swqe_complete(qp, 2689 + wqe, 2690 + rdi->wc_opcode[wqe->wr.opcode], 2691 + status); 2692 + 2693 + if (qp->s_acked == old_last) 2694 + qp->s_acked = last; 2695 + if (qp->s_cur == old_last) 2696 + qp->s_cur = last; 2697 + if (qp->s_tail == old_last) 2698 + qp->s_tail = last; 2699 + if (qp->state == IB_QPS_SQD && last == qp->s_cur) 2700 + qp->s_draining = 0; 2701 + } 2702 + EXPORT_SYMBOL(rvt_send_complete); 2703 + 2661 2704 /** 2662 2705 * rvt_copy_sge - copy data to SGE memory 2663 2706 * @qp: associated QP
+42
drivers/infiniband/sw/rdmavt/trace_tx.h
··· 153 153 ) 154 154 ); 155 155 156 + TRACE_EVENT( 157 + rvt_qp_send_completion, 158 + TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx), 159 + TP_ARGS(qp, wqe, idx), 160 + TP_STRUCT__entry( 161 + RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) 162 + __field(struct rvt_swqe *, wqe) 163 + __field(u64, wr_id) 164 + __field(u32, qpn) 165 + __field(u32, qpt) 166 + __field(u32, length) 167 + __field(u32, idx) 168 + __field(u32, ssn) 169 + __field(enum ib_wr_opcode, opcode) 170 + __field(int, send_flags) 171 + ), 172 + TP_fast_assign( 173 + RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)) 174 + __entry->wqe = wqe; 175 + __entry->wr_id = wqe->wr.wr_id; 176 + __entry->qpn = qp->ibqp.qp_num; 177 + __entry->qpt = qp->ibqp.qp_type; 178 + __entry->length = wqe->length; 179 + __entry->idx = idx; 180 + __entry->ssn = wqe->ssn; 181 + __entry->opcode = wqe->wr.opcode; 182 + __entry->send_flags = wqe->wr.send_flags; 183 + ), 184 + TP_printk( 185 + "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x", 186 + __get_str(dev), 187 + __entry->qpn, 188 + __entry->qpt, 189 + __entry->wqe, 190 + __entry->idx, 191 + __entry->wr_id, 192 + __entry->length, 193 + __entry->ssn, 194 + __entry->opcode, 195 + __entry->send_flags 196 + ) 197 + ); 156 198 #endif /* __RVT_TRACE_TX_H */ 157 199 158 200 #undef TRACE_INCLUDE_PATH
+3
include/rdma/rdma_vt.h
··· 398 398 /* post send table */ 399 399 const struct rvt_operation_params *post_parms; 400 400 401 + /* opcode translation table */ 402 + const enum ib_wc_opcode *wc_opcode; 403 + 401 404 /* Driver specific helper functions */ 402 405 struct rvt_driver_provided driver_f; 403 406
+2
include/rdma/rdmavt_qp.h
··· 681 681 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, 682 682 void *data, u32 length, 683 683 bool release, bool copy_last); 684 + void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, 685 + enum ib_wc_status status); 684 686 685 687 /** 686 688 * struct rvt_qp_iter - the iterator for QPs