Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] IB: Add user-supplied context to userspace CM ABI

- Add user specified context to all uCM events. Users will not retrieve
any events associated with the context after destroying the corresponding
cm_id.
- Provide the ib_cm_init_qp_attr() call to userspace clients of the CM.
This call may be used to set QP attributes properly before modifying the QP.
- Fixes some error handling synchonization and cleanup issues.
- Performs some minor code cleanup.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Sean Hefty and committed by
Roland Dreier
0b2b35f6 1d6801f9

+262 -110
+189 -100
drivers/infiniband/core/ucm.c
··· 72 72 73 73 static struct semaphore ctx_id_mutex; 74 74 static struct idr ctx_id_table; 75 - static int ctx_id_rover = 0; 76 75 77 76 static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) 78 77 { ··· 96 97 wake_up(&ctx->wait); 97 98 } 98 99 99 - static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) 100 + static inline int ib_ucm_new_cm_id(int event) 100 101 { 101 - struct ib_ucm_context *ctx; 102 + return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED; 103 + } 104 + 105 + static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) 106 + { 102 107 struct ib_ucm_event *uevent; 103 108 104 - down(&ctx_id_mutex); 105 - ctx = idr_find(&ctx_id_table, id); 106 - if (!ctx) 107 - ctx = ERR_PTR(-ENOENT); 108 - else if (ctx->file != file) 109 - ctx = ERR_PTR(-EINVAL); 110 - else 111 - idr_remove(&ctx_id_table, ctx->id); 112 - up(&ctx_id_mutex); 113 - 114 - if (IS_ERR(ctx)) 115 - return PTR_ERR(ctx); 116 - 117 - atomic_dec(&ctx->ref); 118 - wait_event(ctx->wait, !atomic_read(&ctx->ref)); 119 - 120 - /* No new events will be generated after destroying the cm_id. */ 121 - if (!IS_ERR(ctx->cm_id)) 122 - ib_destroy_cm_id(ctx->cm_id); 123 - 124 - /* Cleanup events not yet reported to the user. */ 125 - down(&file->mutex); 109 + down(&ctx->file->mutex); 126 110 list_del(&ctx->file_list); 127 111 while (!list_empty(&ctx->events)) { 128 112 ··· 115 133 list_del(&uevent->ctx_list); 116 134 117 135 /* clear incoming connections. */ 118 - if (uevent->cm_id) 136 + if (ib_ucm_new_cm_id(uevent->resp.event)) 119 137 ib_destroy_cm_id(uevent->cm_id); 120 138 121 139 kfree(uevent); 122 140 } 123 - up(&file->mutex); 124 - 125 - kfree(ctx); 126 - return 0; 141 + up(&ctx->file->mutex); 127 142 } 128 143 129 144 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ··· 132 153 if (!ctx) 133 154 return NULL; 134 155 156 + memset(ctx, 0, sizeof *ctx); 135 157 atomic_set(&ctx->ref, 1); 136 158 init_waitqueue_head(&ctx->wait); 137 159 ctx->file = file; 138 - 139 160 INIT_LIST_HEAD(&ctx->events); 140 161 141 - list_add_tail(&ctx->file_list, &file->ctxs); 162 + do { 163 + result = idr_pre_get(&ctx_id_table, GFP_KERNEL); 164 + if (!result) 165 + goto error; 142 166 143 - ctx_id_rover = (ctx_id_rover + 1) & INT_MAX; 144 - retry: 145 - result = idr_pre_get(&ctx_id_table, GFP_KERNEL); 146 - if (!result) 147 - goto error; 167 + down(&ctx_id_mutex); 168 + result = idr_get_new(&ctx_id_table, ctx, &ctx->id); 169 + up(&ctx_id_mutex); 170 + } while (result == -EAGAIN); 148 171 149 - down(&ctx_id_mutex); 150 - result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id); 151 - up(&ctx_id_mutex); 152 - 153 - if (result == -EAGAIN) 154 - goto retry; 155 172 if (result) 156 173 goto error; 157 174 175 + list_add_tail(&ctx->file_list, &file->ctxs); 158 176 ucm_dbg("Allocated CM ID <%d>\n", ctx->id); 159 - 160 177 return ctx; 161 - error: 162 - list_del(&ctx->file_list); 163 - kfree(ctx); 164 178 179 + error: 180 + kfree(ctx); 165 181 return NULL; 166 182 } 167 183 /* ··· 193 219 kpath->packet_life_time_selector; 194 220 } 195 221 196 - static void ib_ucm_event_req_get(struct ib_ucm_context *ctx, 197 - struct ib_ucm_req_event_resp *ureq, 222 + static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, 198 223 struct ib_cm_req_event_param *kreq) 199 224 { 200 - ureq->listen_id = ctx->id; 201 - 202 225 ureq->remote_ca_guid = kreq->remote_ca_guid; 203 226 ureq->remote_qkey = kreq->remote_qkey; 204 227 ureq->remote_qpn = kreq->remote_qpn; ··· 230 259 urep->srq = krep->srq; 231 260 } 232 261 233 - static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx, 234 - struct ib_ucm_sidr_req_event_resp *ureq, 235 - struct ib_cm_sidr_req_event_param *kreq) 236 - { 237 - ureq->listen_id = ctx->id; 238 - ureq->pkey = kreq->pkey; 239 - } 240 - 241 262 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, 242 263 struct ib_cm_sidr_rep_event_param *krep) 243 264 { ··· 238 275 urep->qpn = krep->qpn; 239 276 }; 240 277 241 - static int ib_ucm_event_process(struct ib_ucm_context *ctx, 242 - struct ib_cm_event *evt, 278 + static int ib_ucm_event_process(struct ib_cm_event *evt, 243 279 struct ib_ucm_event *uvt) 244 280 { 245 281 void *info = NULL; 246 282 247 283 switch (evt->event) { 248 284 case IB_CM_REQ_RECEIVED: 249 - ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp, 285 + ib_ucm_event_req_get(&uvt->resp.u.req_resp, 250 286 &evt->param.req_rcvd); 251 287 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; 252 288 uvt->resp.present = IB_UCM_PRES_PRIMARY; ··· 293 331 info = evt->param.apr_rcvd.apr_info; 294 332 break; 295 333 case IB_CM_SIDR_REQ_RECEIVED: 296 - ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp, 297 - &evt->param.sidr_req_rcvd); 334 + uvt->resp.u.sidr_req_resp.pkey = 335 + evt->param.sidr_req_rcvd.pkey; 298 336 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 299 337 break; 300 338 case IB_CM_SIDR_REP_RECEIVED: ··· 340 378 struct ib_ucm_event *uevent; 341 379 struct ib_ucm_context *ctx; 342 380 int result = 0; 343 - int id; 344 381 345 382 ctx = cm_id->context; 346 - 347 - if (event->event == IB_CM_REQ_RECEIVED || 348 - event->event == IB_CM_SIDR_REQ_RECEIVED) 349 - id = IB_UCM_CM_ID_INVALID; 350 - else 351 - id = ctx->id; 352 383 353 384 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 354 385 if (!uevent) 355 386 goto err1; 356 387 357 388 memset(uevent, 0, sizeof(*uevent)); 358 - uevent->resp.id = id; 389 + uevent->ctx = ctx; 390 + uevent->cm_id = cm_id; 391 + uevent->resp.uid = ctx->uid; 392 + uevent->resp.id = ctx->id; 359 393 uevent->resp.event = event->event; 360 394 361 - result = ib_ucm_event_process(ctx, event, uevent); 395 + result = ib_ucm_event_process(event, uevent); 362 396 if (result) 363 397 goto err2; 364 - 365 - uevent->ctx = ctx; 366 - uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL; 367 398 368 399 down(&ctx->file->mutex); 369 400 list_add_tail(&uevent->file_list, &ctx->file->events); ··· 369 414 kfree(uevent); 370 415 err1: 371 416 /* Destroy new cm_id's */ 372 - return (id == IB_UCM_CM_ID_INVALID); 417 + return ib_ucm_new_cm_id(event->event); 373 418 } 374 419 375 420 static ssize_t ib_ucm_event(struct ib_ucm_file *file, ··· 378 423 { 379 424 struct ib_ucm_context *ctx; 380 425 struct ib_ucm_event_get cmd; 381 - struct ib_ucm_event *uevent = NULL; 426 + struct ib_ucm_event *uevent; 382 427 int result = 0; 383 428 DEFINE_WAIT(wait); 384 429 ··· 391 436 * wait 392 437 */ 393 438 down(&file->mutex); 394 - 395 439 while (list_empty(&file->events)) { 396 440 397 441 if (file->filp->f_flags & O_NONBLOCK) { ··· 417 463 418 464 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); 419 465 420 - if (!uevent->cm_id) 421 - goto user; 466 + if (ib_ucm_new_cm_id(uevent->resp.event)) { 467 + ctx = ib_ucm_ctx_alloc(file); 468 + if (!ctx) { 469 + result = -ENOMEM; 470 + goto done; 471 + } 422 472 423 - ctx = ib_ucm_ctx_alloc(file); 424 - if (!ctx) { 425 - result = -ENOMEM; 426 - goto done; 473 + ctx->cm_id = uevent->cm_id; 474 + ctx->cm_id->context = ctx; 475 + uevent->resp.id = ctx->id; 427 476 } 428 477 429 - ctx->cm_id = uevent->cm_id; 430 - ctx->cm_id->context = ctx; 431 - 432 - uevent->resp.id = ctx->id; 433 - 434 - user: 435 478 if (copy_to_user((void __user *)(unsigned long)cmd.response, 436 479 &uevent->resp, sizeof(uevent->resp))) { 437 480 result = -EFAULT; ··· 436 485 } 437 486 438 487 if (uevent->data) { 439 - 440 488 if (cmd.data_len < uevent->data_len) { 441 489 result = -ENOMEM; 442 490 goto done; 443 491 } 444 - 445 492 if (copy_to_user((void __user *)(unsigned long)cmd.data, 446 493 uevent->data, uevent->data_len)) { 447 494 result = -EFAULT; ··· 448 499 } 449 500 450 501 if (uevent->info) { 451 - 452 502 if (cmd.info_len < uevent->info_len) { 453 503 result = -ENOMEM; 454 504 goto done; 455 505 } 456 - 457 506 if (copy_to_user((void __user *)(unsigned long)cmd.info, 458 507 uevent->info, uevent->info_len)) { 459 508 result = -EFAULT; ··· 461 514 462 515 list_del(&uevent->file_list); 463 516 list_del(&uevent->ctx_list); 517 + uevent->ctx->events_reported++; 464 518 465 519 kfree(uevent->data); 466 520 kfree(uevent->info); ··· 493 545 if (!ctx) 494 546 return -ENOMEM; 495 547 548 + ctx->uid = cmd.uid; 496 549 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); 497 550 if (IS_ERR(ctx->cm_id)) { 498 551 result = PTR_ERR(ctx->cm_id); ··· 510 561 return 0; 511 562 512 563 err: 513 - ib_ucm_destroy_ctx(file, ctx->id); 564 + down(&ctx_id_mutex); 565 + idr_remove(&ctx_id_table, ctx->id); 566 + up(&ctx_id_mutex); 567 + 568 + if (!IS_ERR(ctx->cm_id)) 569 + ib_destroy_cm_id(ctx->cm_id); 570 + 571 + kfree(ctx); 514 572 return result; 515 573 } 516 574 ··· 526 570 int in_len, int out_len) 527 571 { 528 572 struct ib_ucm_destroy_id cmd; 573 + struct ib_ucm_destroy_id_resp resp; 574 + struct ib_ucm_context *ctx; 575 + int result = 0; 576 + 577 + if (out_len < sizeof(resp)) 578 + return -ENOSPC; 529 579 530 580 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 531 581 return -EFAULT; 532 582 533 - return ib_ucm_destroy_ctx(file, cmd.id); 583 + down(&ctx_id_mutex); 584 + ctx = idr_find(&ctx_id_table, cmd.id); 585 + if (!ctx) 586 + ctx = ERR_PTR(-ENOENT); 587 + else if (ctx->file != file) 588 + ctx = ERR_PTR(-EINVAL); 589 + else 590 + idr_remove(&ctx_id_table, ctx->id); 591 + up(&ctx_id_mutex); 592 + 593 + if (IS_ERR(ctx)) 594 + return PTR_ERR(ctx); 595 + 596 + atomic_dec(&ctx->ref); 597 + wait_event(ctx->wait, !atomic_read(&ctx->ref)); 598 + 599 + /* No new events will be generated after destroying the cm_id. */ 600 + ib_destroy_cm_id(ctx->cm_id); 601 + /* Cleanup events not yet reported to the user. */ 602 + ib_ucm_cleanup_events(ctx); 603 + 604 + resp.events_reported = ctx->events_reported; 605 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 606 + &resp, sizeof(resp))) 607 + result = -EFAULT; 608 + 609 + kfree(ctx); 610 + return result; 534 611 } 535 612 536 613 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, ··· 594 605 &resp, sizeof(resp))) 595 606 result = -EFAULT; 596 607 608 + ib_ucm_ctx_put(ctx); 609 + return result; 610 + } 611 + 612 + static void ib_ucm_copy_ah_attr(struct ib_ucm_ah_attr *dest_attr, 613 + struct ib_ah_attr *src_attr) 614 + { 615 + memcpy(dest_attr->grh_dgid, src_attr->grh.dgid.raw, 616 + sizeof src_attr->grh.dgid); 617 + dest_attr->grh_flow_label = src_attr->grh.flow_label; 618 + dest_attr->grh_sgid_index = src_attr->grh.sgid_index; 619 + dest_attr->grh_hop_limit = src_attr->grh.hop_limit; 620 + dest_attr->grh_traffic_class = src_attr->grh.traffic_class; 621 + 622 + dest_attr->dlid = src_attr->dlid; 623 + dest_attr->sl = src_attr->sl; 624 + dest_attr->src_path_bits = src_attr->src_path_bits; 625 + dest_attr->static_rate = src_attr->static_rate; 626 + dest_attr->is_global = (src_attr->ah_flags & IB_AH_GRH); 627 + dest_attr->port_num = src_attr->port_num; 628 + } 629 + 630 + static void ib_ucm_copy_qp_attr(struct ib_ucm_init_qp_attr_resp *dest_attr, 631 + struct ib_qp_attr *src_attr) 632 + { 633 + dest_attr->cur_qp_state = src_attr->cur_qp_state; 634 + dest_attr->path_mtu = src_attr->path_mtu; 635 + dest_attr->path_mig_state = src_attr->path_mig_state; 636 + dest_attr->qkey = src_attr->qkey; 637 + dest_attr->rq_psn = src_attr->rq_psn; 638 + dest_attr->sq_psn = src_attr->sq_psn; 639 + dest_attr->dest_qp_num = src_attr->dest_qp_num; 640 + dest_attr->qp_access_flags = src_attr->qp_access_flags; 641 + 642 + dest_attr->max_send_wr = src_attr->cap.max_send_wr; 643 + dest_attr->max_recv_wr = src_attr->cap.max_recv_wr; 644 + dest_attr->max_send_sge = src_attr->cap.max_send_sge; 645 + dest_attr->max_recv_sge = src_attr->cap.max_recv_sge; 646 + dest_attr->max_inline_data = src_attr->cap.max_inline_data; 647 + 648 + ib_ucm_copy_ah_attr(&dest_attr->ah_attr, &src_attr->ah_attr); 649 + ib_ucm_copy_ah_attr(&dest_attr->alt_ah_attr, &src_attr->alt_ah_attr); 650 + 651 + dest_attr->pkey_index = src_attr->pkey_index; 652 + dest_attr->alt_pkey_index = src_attr->alt_pkey_index; 653 + dest_attr->en_sqd_async_notify = src_attr->en_sqd_async_notify; 654 + dest_attr->sq_draining = src_attr->sq_draining; 655 + dest_attr->max_rd_atomic = src_attr->max_rd_atomic; 656 + dest_attr->max_dest_rd_atomic = src_attr->max_dest_rd_atomic; 657 + dest_attr->min_rnr_timer = src_attr->min_rnr_timer; 658 + dest_attr->port_num = src_attr->port_num; 659 + dest_attr->timeout = src_attr->timeout; 660 + dest_attr->retry_cnt = src_attr->retry_cnt; 661 + dest_attr->rnr_retry = src_attr->rnr_retry; 662 + dest_attr->alt_port_num = src_attr->alt_port_num; 663 + dest_attr->alt_timeout = src_attr->alt_timeout; 664 + } 665 + 666 + static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file, 667 + const char __user *inbuf, 668 + int in_len, int out_len) 669 + { 670 + struct ib_ucm_init_qp_attr_resp resp; 671 + struct ib_ucm_init_qp_attr cmd; 672 + struct ib_ucm_context *ctx; 673 + struct ib_qp_attr qp_attr; 674 + int result = 0; 675 + 676 + if (out_len < sizeof(resp)) 677 + return -ENOSPC; 678 + 679 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 680 + return -EFAULT; 681 + 682 + ctx = ib_ucm_ctx_get(file, cmd.id); 683 + if (IS_ERR(ctx)) 684 + return PTR_ERR(ctx); 685 + 686 + resp.qp_attr_mask = 0; 687 + memset(&qp_attr, 0, sizeof qp_attr); 688 + qp_attr.qp_state = cmd.qp_state; 689 + result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 690 + if (result) 691 + goto out; 692 + 693 + ib_ucm_copy_qp_attr(&resp, &qp_attr); 694 + 695 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 696 + &resp, sizeof(resp))) 697 + result = -EFAULT; 698 + 699 + out: 597 700 ib_ucm_ctx_put(ctx); 598 701 return result; 599 702 } ··· 889 808 890 809 ctx = ib_ucm_ctx_get(file, cmd.id); 891 810 if (!IS_ERR(ctx)) { 811 + ctx->uid = cmd.uid; 892 812 result = ib_send_cm_rep(ctx->cm_id, &param); 893 813 ib_ucm_ctx_put(ctx); 894 814 } else ··· 1168 1086 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req, 1169 1087 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep, 1170 1088 [IB_USER_CM_CMD_EVENT] = ib_ucm_event, 1089 + [IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr, 1171 1090 }; 1172 1091 1173 1092 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, ··· 1244 1161 1245 1162 down(&file->mutex); 1246 1163 while (!list_empty(&file->ctxs)) { 1247 - 1248 1164 ctx = list_entry(file->ctxs.next, 1249 1165 struct ib_ucm_context, file_list); 1250 - 1251 1166 up(&file->mutex); 1252 - ib_ucm_destroy_ctx(file, ctx->id); 1167 + 1168 + down(&ctx_id_mutex); 1169 + idr_remove(&ctx_id_table, ctx->id); 1170 + up(&ctx_id_mutex); 1171 + 1172 + ib_destroy_cm_id(ctx->cm_id); 1173 + ib_ucm_cleanup_events(ctx); 1174 + kfree(ctx); 1175 + 1253 1176 down(&file->mutex); 1254 1177 } 1255 1178 up(&file->mutex);
+4 -7
drivers/infiniband/core/ucm.h
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 44 43 #include <rdma/ib_cm.h> 45 44 #include <rdma/ib_user_cm.h> 46 45 47 - #define IB_UCM_CM_ID_INVALID 0xffffffff 48 - 49 46 struct ib_ucm_file { 50 47 struct semaphore mutex; 51 48 struct file *filp; ··· 57 58 int id; 58 59 wait_queue_head_t wait; 59 60 atomic_t ref; 61 + int events_reported; 60 62 61 63 struct ib_ucm_file *file; 62 64 struct ib_cm_id *cm_id; 65 + __u64 uid; 63 66 64 67 struct list_head events; /* list of pending events. */ 65 68 struct list_head file_list; /* member in file ctx list */ ··· 72 71 struct list_head file_list; /* member in file event list */ 73 72 struct list_head ctx_list; /* member in ctx event list */ 74 73 74 + struct ib_cm_id *cm_id; 75 75 struct ib_ucm_event_resp resp; 76 76 void *data; 77 77 void *info; 78 78 int data_len; 79 79 int info_len; 80 - /* 81 - * new connection identifiers needs to be saved until 82 - * userspace can get a handle on them. 83 - */ 84 - struct ib_cm_id *cm_id; 85 80 }; 86 81 87 82 #endif /* UCM_H */
+69 -3
include/rdma/ib_user_cm.h
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 38 37 39 38 #include <linux/types.h> 40 39 41 - #define IB_USER_CM_ABI_VERSION 1 40 + #define IB_USER_CM_ABI_VERSION 2 42 41 43 42 enum { 44 43 IB_USER_CM_CMD_CREATE_ID, ··· 61 60 IB_USER_CM_CMD_SEND_SIDR_REP, 62 61 63 62 IB_USER_CM_CMD_EVENT, 63 + IB_USER_CM_CMD_INIT_QP_ATTR, 64 64 }; 65 65 /* 66 66 * command ABI structures. ··· 73 71 }; 74 72 75 73 struct ib_ucm_create_id { 74 + __u64 uid; 76 75 __u64 response; 77 76 }; 78 77 ··· 82 79 }; 83 80 84 81 struct ib_ucm_destroy_id { 82 + __u64 response; 85 83 __u32 id; 84 + }; 85 + 86 + struct ib_ucm_destroy_id_resp { 87 + __u32 events_reported; 86 88 }; 87 89 88 90 struct ib_ucm_attr_id { ··· 100 92 __be64 service_mask; 101 93 __be32 local_id; 102 94 __be32 remote_id; 95 + }; 96 + 97 + struct ib_ucm_init_qp_attr { 98 + __u64 response; 99 + __u32 id; 100 + __u32 qp_state; 101 + }; 102 + 103 + struct ib_ucm_ah_attr { 104 + __u8 grh_dgid[16]; 105 + __u32 grh_flow_label; 106 + __u16 dlid; 107 + __u16 reserved; 108 + __u8 grh_sgid_index; 109 + __u8 grh_hop_limit; 110 + __u8 grh_traffic_class; 111 + __u8 sl; 112 + __u8 src_path_bits; 113 + __u8 static_rate; 114 + __u8 is_global; 115 + __u8 port_num; 116 + }; 117 + 118 + struct ib_ucm_init_qp_attr_resp { 119 + __u32 qp_attr_mask; 120 + __u32 qp_state; 121 + __u32 cur_qp_state; 122 + __u32 path_mtu; 123 + __u32 path_mig_state; 124 + __u32 qkey; 125 + __u32 rq_psn; 126 + __u32 sq_psn; 127 + __u32 dest_qp_num; 128 + __u32 qp_access_flags; 129 + 130 + struct ib_ucm_ah_attr ah_attr; 131 + struct ib_ucm_ah_attr alt_ah_attr; 132 + 133 + /* ib_qp_cap */ 134 + __u32 max_send_wr; 135 + __u32 max_recv_wr; 136 + __u32 max_send_sge; 137 + __u32 max_recv_sge; 138 + __u32 max_inline_data; 139 + 140 + __u16 pkey_index; 141 + __u16 alt_pkey_index; 142 + __u8 en_sqd_async_notify; 143 + __u8 sq_draining; 144 + __u8 max_rd_atomic; 145 + __u8 max_dest_rd_atomic; 146 + __u8 min_rnr_timer; 147 + __u8 port_num; 148 + __u8 timeout; 149 + __u8 retry_cnt; 150 + __u8 rnr_retry; 151 + __u8 alt_port_num; 152 + __u8 alt_timeout; 103 153 }; 104 154 105 155 struct ib_ucm_listen { ··· 223 157 }; 224 158 225 159 struct ib_ucm_rep { 160 + __u64 uid; 226 161 __u64 data; 227 162 __u32 id; 228 163 __u32 qpn; ··· 299 232 }; 300 233 301 234 struct ib_ucm_req_event_resp { 302 - __u32 listen_id; 303 235 /* device */ 304 236 /* port */ 305 237 struct ib_ucm_path_rec primary_path; ··· 353 287 }; 354 288 355 289 struct ib_ucm_sidr_req_event_resp { 356 - __u32 listen_id; 357 290 /* device */ 358 291 /* port */ 359 292 __u16 pkey; ··· 372 307 #define IB_UCM_PRES_ALTERNATE 0x08 373 308 374 309 struct ib_ucm_event_resp { 310 + __u64 uid; 375 311 __u32 id; 376 312 __u32 event; 377 313 __u32 present;