[PATCH] IB/mthca: Add SRQ implementation

Add mthca support for shared receive queues (SRQs),
including userspace SRQs.

Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Roland Dreier and committed by
Roland Dreier
ec34a922 d20a4019

+857 -25
+1 -1
drivers/infiniband/hw/mthca/Makefile
··· 9 9 ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \ 10 10 mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \ 11 11 mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \ 12 - mthca_provider.o mthca_memfree.o mthca_uar.o 12 + mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
+24
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 109 109 CMD_SW2HW_SRQ = 0x35, 110 110 CMD_HW2SW_SRQ = 0x36, 111 111 CMD_QUERY_SRQ = 0x37, 112 + CMD_ARM_SRQ = 0x40, 112 113 113 114 /* QP/EE commands */ 114 115 CMD_RST2INIT_QPEE = 0x19, ··· 1033 1032 1034 1033 mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n", 1035 1034 dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz); 1035 + mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", 1036 + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz); 1036 1037 mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 1037 1038 dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz); 1038 1039 mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", ··· 1501 1498 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, 1502 1499 CMD_HW2SW_CQ, 1503 1500 CMD_TIME_CLASS_A, status); 1501 + } 1502 + 1503 + int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1504 + int srq_num, u8 *status) 1505 + { 1506 + return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, 1507 + CMD_TIME_CLASS_A, status); 1508 + } 1509 + 1510 + int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1511 + int srq_num, u8 *status) 1512 + { 1513 + return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, 1514 + CMD_HW2SW_SRQ, 1515 + CMD_TIME_CLASS_A, status); 1516 + } 1517 + 1518 + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) 1519 + { 1520 + return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, 1521 + CMD_TIME_CLASS_B, status); 1504 1522 } 1505 1523 1506 1524 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
+5
drivers/infiniband/hw/mthca/mthca_cmd.h
··· 298 298 int cq_num, u8 *status); 299 299 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 300 300 int cq_num, u8 *status); 301 + int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 302 + int srq_num, u8 *status); 303 + int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 304 + int srq_num, u8 *status); 305 + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); 301 306 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 302 307 int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 303 308 u8 *status);
+20 -12
drivers/infiniband/hw/mthca/mthca_cq.c
··· 224 224 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); 225 225 } 226 226 227 - void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn) 227 + void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 228 + struct mthca_srq *srq) 228 229 { 229 230 struct mthca_cq *cq; 230 231 struct mthca_cqe *cqe; ··· 266 265 */ 267 266 while (prod_index > cq->cons_index) { 268 267 cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe); 269 - if (cqe->my_qpn == cpu_to_be32(qpn)) 268 + if (cqe->my_qpn == cpu_to_be32(qpn)) { 269 + if (srq) 270 + mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); 270 271 ++nfreed; 272 + } 271 273 else if (nfreed) 272 274 memcpy(get_cqe(cq, (prod_index - 1 + nfreed) & 273 275 cq->ibcq.cqe), ··· 459 455 >> wq->wqe_shift); 460 456 entry->wr_id = (*cur_qp)->wrid[wqe_index + 461 457 (*cur_qp)->rq.max]; 458 + } else if ((*cur_qp)->ibqp.srq) { 459 + struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); 460 + u32 wqe = be32_to_cpu(cqe->wqe); 461 + wq = NULL; 462 + wqe_index = wqe >> srq->wqe_shift; 463 + entry->wr_id = srq->wrid[wqe_index]; 464 + mthca_free_srq_wqe(srq, wqe); 462 465 } else { 463 466 wq = &(*cur_qp)->rq; 464 467 wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift; 465 468 entry->wr_id = (*cur_qp)->wrid[wqe_index]; 466 469 } 467 470 468 - if (wq->last_comp < wqe_index) 469 - wq->tail += wqe_index - wq->last_comp; 470 - else 471 - wq->tail += wqe_index + wq->max - wq->last_comp; 471 + if (wq) { 472 + if (wq->last_comp < wqe_index) 473 + wq->tail += wqe_index - wq->last_comp; 474 + else 475 + wq->tail += wqe_index + wq->max - wq->last_comp; 472 476 473 - wq->last_comp = wqe_index; 474 - 475 - if (0) 476 - mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n", 477 - is_send ? "Send" : "Receive", 478 - (*cur_qp)->qpn, wqe_index, wq->max); 477 + wq->last_comp = wqe_index; 478 + } 479 479 480 480 if (is_error) { 481 481 err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
+23 -1
drivers/infiniband/hw/mthca/mthca_dev.h
··· 218 218 struct mthca_icm_table *table; 219 219 }; 220 220 221 + struct mthca_srq_table { 222 + struct mthca_alloc alloc; 223 + spinlock_t lock; 224 + struct mthca_array srq; 225 + struct mthca_icm_table *table; 226 + }; 227 + 221 228 struct mthca_qp_table { 222 229 struct mthca_alloc alloc; 223 230 u32 rdb_base; ··· 306 299 struct mthca_mr_table mr_table; 307 300 struct mthca_eq_table eq_table; 308 301 struct mthca_cq_table cq_table; 302 + struct mthca_srq_table srq_table; 309 303 struct mthca_qp_table qp_table; 310 304 struct mthca_av_table av_table; 311 305 struct mthca_mcg_table mcg_table; ··· 380 372 int mthca_init_mr_table(struct mthca_dev *dev); 381 373 int mthca_init_eq_table(struct mthca_dev *dev); 382 374 int mthca_init_cq_table(struct mthca_dev *dev); 375 + int mthca_init_srq_table(struct mthca_dev *dev); 383 376 int mthca_init_qp_table(struct mthca_dev *dev); 384 377 int mthca_init_av_table(struct mthca_dev *dev); 385 378 int mthca_init_mcg_table(struct mthca_dev *dev); ··· 390 381 void mthca_cleanup_mr_table(struct mthca_dev *dev); 391 382 void mthca_cleanup_eq_table(struct mthca_dev *dev); 392 383 void mthca_cleanup_cq_table(struct mthca_dev *dev); 384 + void mthca_cleanup_srq_table(struct mthca_dev *dev); 393 385 void mthca_cleanup_qp_table(struct mthca_dev *dev); 394 386 void mthca_cleanup_av_table(struct mthca_dev *dev); 395 387 void mthca_cleanup_mcg_table(struct mthca_dev *dev); ··· 441 431 void mthca_free_cq(struct mthca_dev *dev, 442 432 struct mthca_cq *cq); 443 433 void mthca_cq_event(struct mthca_dev *dev, u32 cqn); 444 - void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn); 434 + void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, 435 + struct mthca_srq *srq); 436 + 437 + int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 438 + struct ib_srq_attr *attr, struct mthca_srq *srq); 439 + void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 440 + void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 441 + enum ib_event_type event_type); 442 + void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); 443 + int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 444 + struct ib_recv_wr **bad_wr); 445 + int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr, 446 + struct ib_recv_wr **bad_wr); 445 447 446 448 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 447 449 enum ib_event_type event_type);
+43 -5
drivers/infiniband/hw/mthca/mthca_main.c
··· 253 253 profile = default_profile; 254 254 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 255 255 profile.uarc_size = 0; 256 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 257 + profile.num_srq = dev_lim.max_srqs; 256 258 257 259 err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 258 260 if (err < 0) ··· 426 424 } 427 425 428 426 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, 429 - dev_lim->cqc_entry_sz, 430 - mdev->limits.num_cqs, 431 - mdev->limits.reserved_cqs, 0); 427 + dev_lim->cqc_entry_sz, 428 + mdev->limits.num_cqs, 429 + mdev->limits.reserved_cqs, 0); 432 430 if (!mdev->cq_table.table) { 433 431 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n"); 434 432 err = -ENOMEM; 435 433 goto err_unmap_rdb; 434 + } 435 + 436 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { 437 + mdev->srq_table.table = 438 + mthca_alloc_icm_table(mdev, init_hca->srqc_base, 439 + dev_lim->srq_entry_sz, 440 + mdev->limits.num_srqs, 441 + mdev->limits.reserved_srqs, 0); 442 + if (!mdev->srq_table.table) { 443 + mthca_err(mdev, "Failed to map SRQ context memory, " 444 + "aborting.\n"); 445 + err = -ENOMEM; 446 + goto err_unmap_cq; 447 + } 436 448 } 437 449 438 450 /* ··· 464 448 if (!mdev->mcg_table.table) { 465 449 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n"); 466 450 err = -ENOMEM; 467 - goto err_unmap_cq; 451 + goto err_unmap_srq; 468 452 } 469 453 470 454 return 0; 455 + 456 + err_unmap_srq: 457 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 458 + mthca_free_icm_table(mdev, mdev->srq_table.table); 471 459 472 460 err_unmap_cq: 473 461 mthca_free_icm_table(mdev, mdev->cq_table.table); ··· 552 532 profile = default_profile; 553 533 profile.num_uar = dev_lim.uar_size / PAGE_SIZE; 554 534 profile.num_udav = 0; 535 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 536 + profile.num_srq = dev_lim.max_srqs; 555 537 556 538 icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); 557 539 if ((int) icm_size < 0) { ··· 580 558 return 0; 581 559 582 560 err_free_icm: 561 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 562 + mthca_free_icm_table(mdev, mdev->srq_table.table); 583 563 mthca_free_icm_table(mdev, mdev->cq_table.table); 584 564 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 585 565 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); ··· 611 587 mthca_CLOSE_HCA(mdev, 0, &status); 612 588 613 589 if (mthca_is_memfree(mdev)) { 590 + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 591 + mthca_free_icm_table(mdev, mdev->srq_table.table); 614 592 mthca_free_icm_table(mdev, mdev->cq_table.table); 615 593 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); 616 594 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); ··· 757 731 goto err_cmd_poll; 758 732 } 759 733 734 + err = mthca_init_srq_table(dev); 735 + if (err) { 736 + mthca_err(dev, "Failed to initialize " 737 + "shared receive queue table, aborting.\n"); 738 + goto err_cq_table_free; 739 + } 740 + 760 741 err = mthca_init_qp_table(dev); 761 742 if (err) { 762 743 mthca_err(dev, "Failed to initialize " 763 744 "queue pair table, aborting.\n"); 764 - goto err_cq_table_free; 745 + goto err_srq_table_free; 765 746 } 766 747 767 748 err = mthca_init_av_table(dev); ··· 792 759 793 760 err_qp_table_free: 794 761 mthca_cleanup_qp_table(dev); 762 + 763 + err_srq_table_free: 764 + mthca_cleanup_srq_table(dev); 795 765 796 766 err_cq_table_free: 797 767 mthca_cleanup_cq_table(dev); ··· 1082 1046 mthca_cleanup_mcg_table(mdev); 1083 1047 mthca_cleanup_av_table(mdev); 1084 1048 mthca_cleanup_qp_table(mdev); 1049 + mthca_cleanup_srq_table(mdev); 1085 1050 mthca_cleanup_cq_table(mdev); 1086 1051 mthca_cmd_use_polling(mdev); 1087 1052 mthca_cleanup_eq_table(mdev); ··· 1132 1095 mthca_cleanup_mcg_table(mdev); 1133 1096 mthca_cleanup_av_table(mdev); 1134 1097 mthca_cleanup_qp_table(mdev); 1098 + mthca_cleanup_srq_table(mdev); 1135 1099 mthca_cleanup_cq_table(mdev); 1136 1100 mthca_cmd_use_polling(mdev); 1137 1101 mthca_cleanup_eq_table(mdev);
+1
drivers/infiniband/hw/mthca/mthca_profile.c
··· 102 102 profile[MTHCA_RES_UARC].size = request->uarc_size; 103 103 104 104 profile[MTHCA_RES_QP].num = request->num_qp; 105 + profile[MTHCA_RES_SRQ].num = request->num_srq; 105 106 profile[MTHCA_RES_EQP].num = request->num_qp; 106 107 profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; 107 108 profile[MTHCA_RES_CQ].num = request->num_cq;
+1
drivers/infiniband/hw/mthca/mthca_profile.h
··· 42 42 struct mthca_profile { 43 43 int num_qp; 44 44 int rdb_per_qp; 45 + int num_srq; 45 46 int num_cq; 46 47 int num_mcg; 47 48 int num_mpt;
+82
drivers/infiniband/hw/mthca/mthca_provider.c
··· 425 425 return 0; 426 426 } 427 427 428 + static struct ib_srq *mthca_create_srq(struct ib_pd *pd, 429 + struct ib_srq_init_attr *init_attr, 430 + struct ib_udata *udata) 431 + { 432 + struct mthca_create_srq ucmd; 433 + struct mthca_ucontext *context = NULL; 434 + struct mthca_srq *srq; 435 + int err; 436 + 437 + srq = kmalloc(sizeof *srq, GFP_KERNEL); 438 + if (!srq) 439 + return ERR_PTR(-ENOMEM); 440 + 441 + if (pd->uobject) { 442 + context = to_mucontext(pd->uobject->context); 443 + 444 + if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 445 + return ERR_PTR(-EFAULT); 446 + 447 + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, 448 + context->db_tab, ucmd.db_index, 449 + ucmd.db_page); 450 + 451 + if (err) 452 + goto err_free; 453 + 454 + srq->mr.ibmr.lkey = ucmd.lkey; 455 + srq->db_index = ucmd.db_index; 456 + } 457 + 458 + err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), 459 + &init_attr->attr, srq); 460 + 461 + if (err && pd->uobject) 462 + mthca_unmap_user_db(to_mdev(pd->device), &context->uar, 463 + context->db_tab, ucmd.db_index); 464 + 465 + if (err) 466 + goto err_free; 467 + 468 + if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) { 469 + mthca_free_srq(to_mdev(pd->device), srq); 470 + err = -EFAULT; 471 + goto err_free; 472 + } 473 + 474 + return &srq->ibsrq; 475 + 476 + err_free: 477 + kfree(srq); 478 + 479 + return ERR_PTR(err); 480 + } 481 + 482 + static int mthca_destroy_srq(struct ib_srq *srq) 483 + { 484 + struct mthca_ucontext *context; 485 + 486 + if (srq->uobject) { 487 + context = to_mucontext(srq->uobject->context); 488 + 489 + mthca_unmap_user_db(to_mdev(srq->device), &context->uar, 490 + context->db_tab, to_msrq(srq)->db_index); 491 + } 492 + 493 + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); 494 + kfree(srq); 495 + 496 + return 0; 497 + } 498 + 428 499 static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 429 500 struct ib_qp_init_attr *init_attr, 430 501 struct ib_udata *udata) ··· 1074 1003 dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 1075 1004 dev->ib_dev.create_ah = mthca_ah_create; 1076 1005 dev->ib_dev.destroy_ah = mthca_ah_destroy; 1006 + 1007 + if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 1008 + dev->ib_dev.create_srq = mthca_create_srq; 1009 + dev->ib_dev.destroy_srq = mthca_destroy_srq; 1010 + 1011 + if (mthca_is_memfree(dev)) 1012 + dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; 1013 + else 1014 + dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; 1015 + } 1016 + 1077 1017 dev->ib_dev.create_qp = mthca_create_qp; 1078 1018 dev->ib_dev.modify_qp = mthca_modify_qp; 1079 1019 dev->ib_dev.destroy_qp = mthca_destroy_qp;
+28
drivers/infiniband/hw/mthca/mthca_provider.h
··· 197 197 wait_queue_head_t wait; 198 198 }; 199 199 200 + struct mthca_srq { 201 + struct ib_srq ibsrq; 202 + spinlock_t lock; 203 + atomic_t refcount; 204 + int srqn; 205 + int max; 206 + int max_gs; 207 + int wqe_shift; 208 + int first_free; 209 + int last_free; 210 + u16 counter; /* Arbel only */ 211 + int db_index; /* Arbel only */ 212 + __be32 *db; /* Arbel only */ 213 + void *last; 214 + 215 + int is_direct; 216 + u64 *wrid; 217 + union mthca_buf queue; 218 + struct mthca_mr mr; 219 + 220 + wait_queue_head_t wait; 221 + }; 222 + 200 223 struct mthca_wq { 201 224 spinlock_t lock; 202 225 int max; ··· 298 275 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) 299 276 { 300 277 return container_of(ibcq, struct mthca_cq, ibcq); 278 + } 279 + 280 + static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) 281 + { 282 + return container_of(ibsrq, struct mthca_srq, ibsrq); 301 283 } 302 284 303 285 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
+27 -6
drivers/infiniband/hw/mthca/mthca_qp.c
··· 612 612 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 613 613 614 614 if (mthca_is_memfree(dev)) { 615 - qp_context->rq_size_stride = 616 - ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4); 617 - qp_context->sq_size_stride = 618 - ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4); 615 + if (qp->rq.max) 616 + qp_context->rq_size_stride = long_log2(qp->rq.max) << 3; 617 + qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 618 + 619 + if (qp->sq.max) 620 + qp_context->sq_size_stride = long_log2(qp->sq.max) << 3; 621 + qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 619 622 } 620 623 621 624 /* leave arbel_sched_queue as 0 */ ··· 787 784 788 785 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 789 786 787 + if (ibqp->srq) 788 + qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 789 + 790 790 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 791 791 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 792 792 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); ··· 811 805 qp_context->qkey = cpu_to_be32(attr->qkey); 812 806 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 813 807 } 808 + 809 + if (ibqp->srq) 810 + qp_context->srqn = cpu_to_be32(1 << 24 | 811 + to_msrq(ibqp->srq)->srqn); 814 812 815 813 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 816 814 qp->qpn, 0, mailbox, 0, &status); ··· 1270 1260 * unref the mem-free tables and free the QPN in our table. 1271 1261 */ 1272 1262 if (!qp->ibqp.uobject) { 1273 - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1263 + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 1264 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1274 1265 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1275 - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1266 + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 1267 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1276 1268 1277 1269 mthca_free_memfree(dev, qp); 1278 1270 mthca_free_wqe_buf(dev, qp); ··· 2019 2007 int index, int *dbd, __be32 *new_wqe) 2020 2008 { 2021 2009 struct mthca_next_seg *next; 2010 + 2011 + /* 2012 + * For SRQs, all WQEs generate a CQE, so we're always at the 2013 + * end of the doorbell chain. 2014 + */ 2015 + if (qp->ibqp.srq) { 2016 + *new_wqe = 0; 2017 + return 0; 2018 + } 2022 2019 2023 2020 if (is_send) 2024 2021 next = get_send_wqe(qp, index);
+591
drivers/infiniband/hw/mthca/mthca_srq.c
··· 1 + /* 2 + * Copyright (c) 2005 Cisco Systems. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + * 32 + * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $ 33 + */ 34 + 35 + #include "mthca_dev.h" 36 + #include "mthca_cmd.h" 37 + #include "mthca_memfree.h" 38 + #include "mthca_wqe.h" 39 + 40 + enum { 41 + MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE 42 + }; 43 + 44 + struct mthca_tavor_srq_context { 45 + __be64 wqe_base_ds; /* low 6 bits is descriptor size */ 46 + __be32 state_pd; 47 + __be32 lkey; 48 + __be32 uar; 49 + __be32 wqe_cnt; 50 + u32 reserved[2]; 51 + }; 52 + 53 + struct mthca_arbel_srq_context { 54 + __be32 state_logsize_srqn; 55 + __be32 lkey; 56 + __be32 db_index; 57 + __be32 logstride_usrpage; 58 + __be64 wqe_base; 59 + __be32 eq_pd; 60 + __be16 limit_watermark; 61 + __be16 wqe_cnt; 62 + u16 reserved1; 63 + __be16 wqe_counter; 64 + u32 reserved2[3]; 65 + }; 66 + 67 + static void *get_wqe(struct mthca_srq *srq, int n) 68 + { 69 + if (srq->is_direct) 70 + return srq->queue.direct.buf + (n << srq->wqe_shift); 71 + else 72 + return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf + 73 + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); 74 + } 75 + 76 + /* 77 + * Return a pointer to the location within a WQE that we're using as a 78 + * link when the WQE is in the free list. We use an offset of 4 79 + * because in the Tavor case, posting a WQE may overwrite the first 80 + * four bytes of the previous WQE. The offset avoids corrupting our 81 + * free list if the WQE has already completed and been put on the free 82 + * list when we post the next WQE. 83 + */ 84 + static inline int *wqe_to_link(void *wqe) 85 + { 86 + return (int *) (wqe + 4); 87 + } 88 + 89 + static void mthca_tavor_init_srq_context(struct mthca_dev *dev, 90 + struct mthca_pd *pd, 91 + struct mthca_srq *srq, 92 + struct mthca_tavor_srq_context *context) 93 + { 94 + memset(context, 0, sizeof *context); 95 + 96 + context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4)); 97 + context->state_pd = cpu_to_be32(pd->pd_num); 98 + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 99 + 100 + if (pd->ibpd.uobject) 101 + context->uar = 102 + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 103 + else 104 + context->uar = cpu_to_be32(dev->driver_uar.index); 105 + } 106 + 107 + static void mthca_arbel_init_srq_context(struct mthca_dev *dev, 108 + struct mthca_pd *pd, 109 + struct mthca_srq *srq, 110 + struct mthca_arbel_srq_context *context) 111 + { 112 + int logsize; 113 + 114 + memset(context, 0, sizeof *context); 115 + 116 + logsize = long_log2(srq->max) + srq->wqe_shift; 117 + context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn); 118 + context->lkey = cpu_to_be32(srq->mr.ibmr.lkey); 119 + context->db_index = cpu_to_be32(srq->db_index); 120 + context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29); 121 + if (pd->ibpd.uobject) 122 + context->logstride_usrpage |= 123 + cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index); 124 + else 125 + context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index); 126 + context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); 127 + } 128 + 129 + static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) 130 + { 131 + mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, 132 + srq->is_direct, &srq->mr); 133 + kfree(srq->wrid); 134 + } 135 + 136 + static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, 137 + struct mthca_srq *srq) 138 + { 139 + struct mthca_data_seg *scatter; 140 + void *wqe; 141 + int err; 142 + int i; 143 + 144 + if (pd->ibpd.uobject) 145 + return 0; 146 + 147 + srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); 148 + if (!srq->wrid) 149 + return -ENOMEM; 150 + 151 + err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, 152 + MTHCA_MAX_DIRECT_SRQ_SIZE, 153 + &srq->queue, &srq->is_direct, pd, 1, &srq->mr); 154 + if (err) { 155 + kfree(srq->wrid); 156 + return err; 157 + } 158 + 159 + /* 160 + * Now initialize the SRQ buffer so that all of the WQEs are 161 + * linked into the list of free WQEs. In addition, set the 162 + * scatter list L_Keys to the sentry value of 0x100. 163 + */ 164 + for (i = 0; i < srq->max; ++i) { 165 + wqe = get_wqe(srq, i); 166 + 167 + *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; 168 + 169 + for (scatter = wqe + sizeof (struct mthca_next_seg); 170 + (void *) scatter < wqe + (1 << srq->wqe_shift); 171 + ++scatter) 172 + scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 173 + } 174 + 175 + return 0; 176 + } 177 + 178 + int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 179 + struct ib_srq_attr *attr, struct mthca_srq *srq) 180 + { 181 + struct mthca_mailbox *mailbox; 182 + u8 status; 183 + int ds; 184 + int err; 185 + 186 + /* Sanity check SRQ size before proceeding */ 187 + if (attr->max_wr > 16 << 20 || attr->max_sge > 64) 188 + return -EINVAL; 189 + 190 + srq->max = attr->max_wr; 191 + srq->max_gs = attr->max_sge; 192 + srq->last = NULL; 193 + srq->counter = 0; 194 + 195 + if (mthca_is_memfree(dev)) 196 + srq->max = roundup_pow_of_two(srq->max + 1); 197 + 198 + ds = min(64UL, 199 + roundup_pow_of_two(sizeof (struct mthca_next_seg) + 200 + srq->max_gs * sizeof (struct mthca_data_seg))); 201 + srq->wqe_shift = long_log2(ds); 202 + 203 + srq->srqn = mthca_alloc(&dev->srq_table.alloc); 204 + if (srq->srqn == -1) 205 + return -ENOMEM; 206 + 207 + if (mthca_is_memfree(dev)) { 208 + err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); 209 + if (err) 210 + goto err_out; 211 + 212 + if (!pd->ibpd.uobject) { 213 + srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, 214 + srq->srqn, &srq->db); 215 + if (srq->db_index < 0) { 216 + err = -ENOMEM; 217 + goto err_out_icm; 218 + } 219 + } 220 + } 221 + 222 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 223 + if (IS_ERR(mailbox)) { 224 + err = PTR_ERR(mailbox); 225 + goto err_out_db; 226 + } 227 + 228 + err = mthca_alloc_srq_buf(dev, pd, srq); 229 + if (err) 230 + goto err_out_mailbox; 231 + 232 + spin_lock_init(&srq->lock); 233 + atomic_set(&srq->refcount, 1); 234 + init_waitqueue_head(&srq->wait); 235 + 236 + if (mthca_is_memfree(dev)) 237 + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); 238 + else 239 + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 240 + 241 + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 242 + 243 + if (err) { 244 + mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 245 + goto err_out_free_buf; 246 + } 247 + if (status) { 248 + mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 249 + status); 250 + err = -EINVAL; 251 + goto err_out_free_buf; 252 + } 253 + 254 + spin_lock_irq(&dev->srq_table.lock); 255 + if (mthca_array_set(&dev->srq_table.srq, 256 + srq->srqn & (dev->limits.num_srqs - 1), 257 + srq)) { 258 + spin_unlock_irq(&dev->srq_table.lock); 259 + goto err_out_free_srq; 260 + } 261 + spin_unlock_irq(&dev->srq_table.lock); 262 + 263 + mthca_free_mailbox(dev, mailbox); 264 + 265 + srq->first_free = 0; 266 + srq->last_free = srq->max - 1; 267 + 268 + return 0; 269 + 270 + err_out_free_srq: 271 + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 272 + if (err) 273 + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 274 + else if (status) 275 + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 276 + 277 + err_out_free_buf: 278 + if (!pd->ibpd.uobject) 279 + mthca_free_srq_buf(dev, srq); 280 + 281 + err_out_mailbox: 282 + mthca_free_mailbox(dev, mailbox); 283 + 284 + err_out_db: 285 + if (!pd->ibpd.uobject && mthca_is_memfree(dev)) 286 + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 287 + 288 + err_out_icm: 289 + mthca_table_put(dev, dev->srq_table.table, srq->srqn); 290 + 291 + err_out: 292 + mthca_free(&dev->srq_table.alloc, srq->srqn); 293 + 294 + return err; 295 + } 296 + 297 + void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) 298 + { 299 + struct mthca_mailbox *mailbox; 300 + int err; 301 + u8 status; 302 + 303 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 304 + if (IS_ERR(mailbox)) { 305 + mthca_warn(dev, "No memory for mailbox to free SRQ.\n"); 306 + return; 307 + } 308 + 309 + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 310 + if (err) 311 + mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 312 + else if (status) 313 + mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 314 + 315 + spin_lock_irq(&dev->srq_table.lock); 316 + mthca_array_clear(&dev->srq_table.srq, 317 + srq->srqn & (dev->limits.num_srqs - 1)); 318 + spin_unlock_irq(&dev->srq_table.lock); 319 + 320 + atomic_dec(&srq->refcount); 321 + wait_event(srq->wait, !atomic_read(&srq->refcount)); 322 + 323 + if (!srq->ibsrq.uobject) { 324 + mthca_free_srq_buf(dev, srq); 325 + if (mthca_is_memfree(dev)) 326 + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); 327 + } 328 + 329 + mthca_table_put(dev, dev->srq_table.table, srq->srqn); 330 + mthca_free(&dev->srq_table.alloc, srq->srqn); 331 + mthca_free_mailbox(dev, mailbox); 332 + } 333 + 334 + void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 335 + enum ib_event_type event_type) 336 + { 337 + struct mthca_srq *srq; 338 + struct ib_event event; 339 + 340 + spin_lock(&dev->srq_table.lock); 341 + srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); 342 + if (srq) 343 + atomic_inc(&srq->refcount); 344 + spin_unlock(&dev->srq_table.lock); 345 + 346 + if (!srq) { 347 + mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn); 348 + return; 349 + } 350 + 351 + if (!srq->ibsrq.event_handler) 352 + goto out; 353 + 354 + event.device = &dev->ib_dev; 355 + event.event = event_type; 356 + event.element.srq = &srq->ibsrq; 357 + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 358 + 359 + out: 360 + if (atomic_dec_and_test(&srq->refcount)) 361 + wake_up(&srq->wait); 362 + } 363 + 364 + /* 365 + * This function must be called with IRQs disabled. 366 + */ 367 + void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) 368 + { 369 + int ind; 370 + 371 + ind = wqe_addr >> srq->wqe_shift; 372 + 373 + spin_lock(&srq->lock); 374 + 375 + if (likely(srq->first_free >= 0)) 376 + *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; 377 + else 378 + srq->first_free = ind; 379 + 380 + *wqe_to_link(get_wqe(srq, ind)) = -1; 381 + srq->last_free = ind; 382 + 383 + spin_unlock(&srq->lock); 384 + } 385 + 386 + int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 387 + struct ib_recv_wr **bad_wr) 388 + { 389 + struct mthca_dev *dev = to_mdev(ibsrq->device); 390 + struct mthca_srq *srq = to_msrq(ibsrq); 391 + unsigned long flags; 392 + int err = 0; 393 + int first_ind; 394 + int ind; 395 + int next_ind; 396 + int nreq; 397 + int i; 398 + void *wqe; 399 + void *prev_wqe; 400 + 401 + spin_lock_irqsave(&srq->lock, flags); 402 + 403 + first_ind = srq->first_free; 404 + 405 + for (nreq = 0; wr; ++nreq, wr = wr->next) { 406 + ind = srq->first_free; 407 + 408 + if (ind < 0) { 409 + mthca_err(dev, "SRQ %06x full\n", srq->srqn); 410 + err = -ENOMEM; 411 + *bad_wr = wr; 412 + return nreq; 413 + } 414 + 415 + wqe = get_wqe(srq, ind); 416 + next_ind = *wqe_to_link(wqe); 417 + prev_wqe = srq->last; 418 + srq->last = wqe; 419 + 420 + ((struct mthca_next_seg *) wqe)->nda_op = 0; 421 + ((struct mthca_next_seg *) wqe)->ee_nds = 0; 422 + /* flags field will always remain 0 */ 423 + 424 + wqe += sizeof (struct mthca_next_seg); 425 + 426 + if (unlikely(wr->num_sge > srq->max_gs)) { 427 + err = -EINVAL; 428 + *bad_wr = wr; 429 + srq->last = prev_wqe; 430 + return nreq; 431 + } 432 + 433 + for (i = 0; i < wr->num_sge; ++i) { 434 + ((struct mthca_data_seg *) wqe)->byte_count = 435 + cpu_to_be32(wr->sg_list[i].length); 436 + ((struct mthca_data_seg *) wqe)->lkey = 437 + cpu_to_be32(wr->sg_list[i].lkey); 438 + ((struct mthca_data_seg *) wqe)->addr = 439 + cpu_to_be64(wr->sg_list[i].addr); 440 + wqe += sizeof (struct mthca_data_seg); 441 + } 442 + 443 + if (i < srq->max_gs) { 444 + ((struct mthca_data_seg *) wqe)->byte_count = 0; 445 + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 446 + ((struct mthca_data_seg *) wqe)->addr = 0; 447 + } 448 + 449 + if (likely(prev_wqe)) { 450 + ((struct mthca_next_seg *) prev_wqe)->nda_op = 451 + cpu_to_be32((ind << srq->wqe_shift) | 1); 452 + wmb(); 453 + ((struct mthca_next_seg *) prev_wqe)->ee_nds = 454 + cpu_to_be32(MTHCA_NEXT_DBD); 455 + } 456 + 457 + srq->wrid[ind] = wr->wr_id; 458 + srq->first_free = next_ind; 459 + } 460 + 461 + return nreq; 462 + 463 + if (likely(nreq)) { 464 + __be32 doorbell[2]; 465 + 466 + doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift); 467 + doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq); 468 + 469 + /* 470 + * Make sure that descriptors are written before 471 + * doorbell is rung. 472 + */ 473 + wmb(); 474 + 475 + mthca_write64(doorbell, 476 + dev->kar + MTHCA_RECEIVE_DOORBELL, 477 + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 478 + } 479 + 480 + spin_unlock_irqrestore(&srq->lock, flags); 481 + return err; 482 + } 483 + 484 + int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr, 485 + struct ib_recv_wr **bad_wr) 486 + { 487 + struct mthca_dev *dev = to_mdev(ibsrq->device); 488 + struct mthca_srq *srq = to_msrq(ibsrq); 489 + unsigned long flags; 490 + int err = 0; 491 + int ind; 492 + int next_ind; 493 + int nreq; 494 + int i; 495 + void *wqe; 496 + 497 + spin_lock_irqsave(&srq->lock, flags); 498 + 499 + for (nreq = 0; wr; ++nreq, wr = wr->next) { 500 + ind = srq->first_free; 501 + 502 + if (ind < 0) { 503 + mthca_err(dev, "SRQ %06x full\n", srq->srqn); 504 + err = -ENOMEM; 505 + *bad_wr = wr; 506 + return nreq; 507 + } 508 + 509 + wqe = get_wqe(srq, ind); 510 + next_ind = *wqe_to_link(wqe); 511 + 512 + ((struct mthca_next_seg *) wqe)->nda_op = 513 + cpu_to_be32((next_ind << srq->wqe_shift) | 1); 514 + ((struct mthca_next_seg *) wqe)->ee_nds = 0; 515 + /* flags field will always remain 0 */ 516 + 517 + wqe += sizeof (struct mthca_next_seg); 518 + 519 + if (unlikely(wr->num_sge > srq->max_gs)) { 520 + err = -EINVAL; 521 + *bad_wr = wr; 522 + return nreq; 523 + } 524 + 525 + for (i = 0; i < wr->num_sge; ++i) { 526 + ((struct mthca_data_seg *) wqe)->byte_count = 527 + cpu_to_be32(wr->sg_list[i].length); 528 + ((struct mthca_data_seg *) wqe)->lkey = 529 + cpu_to_be32(wr->sg_list[i].lkey); 530 + ((struct mthca_data_seg *) wqe)->addr = 531 + cpu_to_be64(wr->sg_list[i].addr); 532 + wqe += sizeof (struct mthca_data_seg); 533 + } 534 + 535 + if (i < srq->max_gs) { 536 + ((struct mthca_data_seg *) wqe)->byte_count = 0; 537 + ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 538 + ((struct mthca_data_seg *) wqe)->addr = 0; 539 + } 540 + 541 + srq->wrid[ind] = wr->wr_id; 542 + srq->first_free = next_ind; 543 + } 544 + 545 + if (likely(nreq)) { 546 + srq->counter += nreq; 547 + 548 + /* 549 + * Make sure that descriptors are written before 550 + * we write doorbell record. 551 + */ 552 + wmb(); 553 + *srq->db = cpu_to_be32(srq->counter); 554 + } 555 + 556 + spin_unlock_irqrestore(&srq->lock, flags); 557 + return err; 558 + } 559 + 560 + int __devinit mthca_init_srq_table(struct mthca_dev *dev) 561 + { 562 + int err; 563 + 564 + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 565 + return 0; 566 + 567 + spin_lock_init(&dev->srq_table.lock); 568 + 569 + err = mthca_alloc_init(&dev->srq_table.alloc, 570 + dev->limits.num_srqs, 571 + dev->limits.num_srqs - 1, 572 + dev->limits.reserved_srqs); 573 + if (err) 574 + return err; 575 + 576 + err = mthca_array_init(&dev->srq_table.srq, 577 + dev->limits.num_srqs); 578 + if (err) 579 + mthca_alloc_cleanup(&dev->srq_table.alloc); 580 + 581 + return err; 582 + } 583 + 584 + void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev) 585 + { 586 + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) 587 + return; 588 + 589 + mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); 590 + mthca_alloc_cleanup(&dev->srq_table.alloc); 591 + }
+11
drivers/infiniband/hw/mthca/mthca_user.h
··· 69 69 __u32 reserved; 70 70 }; 71 71 72 + struct mthca_create_srq { 73 + __u32 lkey; 74 + __u32 db_index; 75 + __u64 db_page; 76 + }; 77 + 78 + struct mthca_create_srq_resp { 79 + __u32 srqn; 80 + __u32 reserved; 81 + }; 82 + 72 83 struct mthca_create_qp { 73 84 __u32 lkey; 74 85 __u32 reserved;