Merge master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

+590 -251
+18 -7
drivers/infiniband/Kconfig
··· 8 8 any protocols you wish to use as well as drivers for your 9 9 InfiniBand hardware. 10 10 11 - config INFINIBAND_USER_VERBS 12 - tristate "InfiniBand userspace verbs support" 11 + config INFINIBAND_USER_MAD 12 + tristate "InfiniBand userspace MAD support" 13 13 depends on INFINIBAND 14 14 ---help--- 15 - Userspace InfiniBand verbs support. This is the kernel side 16 - of userspace verbs, which allows userspace processes to 17 - directly access InfiniBand hardware for fast-path 18 - operations. You will also need libibverbs and a hardware 19 - driver library from <http://www.openib.org>. 15 + Userspace InfiniBand Management Datagram (MAD) support. This 16 + is the kernel side of the userspace MAD support, which allows 17 + userspace processes to send and receive MADs. You will also 18 + need libibumad from <http://www.openib.org>. 19 + 20 + config INFINIBAND_USER_ACCESS 21 + tristate "InfiniBand userspace access (verbs and CM)" 22 + depends on INFINIBAND 23 + ---help--- 24 + Userspace InfiniBand access support. This enables the 25 + kernel side of userspace verbs and the userspace 26 + communication manager (CM). This allows userspace processes 27 + to set up connections and directly access InfiniBand 28 + hardware for fast-path operations. You will also need 29 + libibverbs, libibcm and a hardware driver library from 30 + <http://www.openib.org>. 20 31 21 32 source "drivers/infiniband/hw/mthca/Kconfig" 22 33
+3 -2
drivers/infiniband/core/Makefile
··· 1 1 obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 2 - ib_cm.o ib_umad.o ib_ucm.o 3 - obj-$(CONFIG_INFINIBAND_USER_VERBS) += ib_uverbs.o 2 + ib_cm.o 3 + obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 4 + obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o 4 5 5 6 ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 6 7 device.o fmr_pool.o cache.o
+4 -1
drivers/infiniband/core/cm.c
··· 173 173 if (IS_ERR(ah)) 174 174 return PTR_ERR(ah); 175 175 176 - m = ib_create_send_mad(mad_agent, 1, cm_id_priv->av.pkey_index, 176 + m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 177 + cm_id_priv->av.pkey_index, 177 178 ah, 0, sizeof(struct ib_mad_hdr), 178 179 sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr), 179 180 GFP_ATOMIC); ··· 537 536 cm_id_priv->id.state = IB_CM_IDLE; 538 537 cm_id_priv->id.cm_handler = cm_handler; 539 538 cm_id_priv->id.context = context; 539 + cm_id_priv->id.remote_cm_qpn = 1; 540 540 ret = cm_alloc_id(cm_id_priv); 541 541 if (ret) 542 542 goto error; ··· 1315 1313 cm_deref_id(listen_cm_id_priv); 1316 1314 cm_cleanup_timewait(cm_id_priv->timewait_info); 1317 1315 error2: kfree(cm_id_priv->timewait_info); 1316 + cm_id_priv->timewait_info = NULL; 1318 1317 error1: ib_destroy_cm_id(&cm_id_priv->id); 1319 1318 return ret; 1320 1319 }
+3 -1
drivers/infiniband/core/mad_rmpp.c
··· 593 593 rmpp_mad->rmpp_hdr.paylen_newwin = 594 594 cpu_to_be32(mad_send_wr->total_seg * 595 595 (sizeof(struct ib_rmpp_mad) - 596 - offsetof(struct ib_rmpp_mad, data))); 596 + offsetof(struct ib_rmpp_mad, data)) - 597 + mad_send_wr->pad); 597 598 mad_send_wr->sg_list[0].length = sizeof(struct ib_rmpp_mad); 598 599 } else { 599 600 mad_send_wr->send_wr.num_sge = 2; ··· 603 602 mad_send_wr->sg_list[1].length = sizeof(struct ib_rmpp_mad) - 604 603 mad_send_wr->data_offset; 605 604 mad_send_wr->sg_list[1].lkey = mad_send_wr->sg_list[0].lkey; 605 + rmpp_mad->rmpp_hdr.paylen_newwin = 0; 606 606 } 607 607 608 608 if (mad_send_wr->seg_num == mad_send_wr->total_seg) {
+2 -28
drivers/infiniband/core/sa_query.c
··· 113 113 static spinlock_t tid_lock; 114 114 static u32 tid; 115 115 116 - enum { 117 - IB_SA_ATTR_CLASS_PORTINFO = 0x01, 118 - IB_SA_ATTR_NOTICE = 0x02, 119 - IB_SA_ATTR_INFORM_INFO = 0x03, 120 - IB_SA_ATTR_NODE_REC = 0x11, 121 - IB_SA_ATTR_PORT_INFO_REC = 0x12, 122 - IB_SA_ATTR_SL2VL_REC = 0x13, 123 - IB_SA_ATTR_SWITCH_REC = 0x14, 124 - IB_SA_ATTR_LINEAR_FDB_REC = 0x15, 125 - IB_SA_ATTR_RANDOM_FDB_REC = 0x16, 126 - IB_SA_ATTR_MCAST_FDB_REC = 0x17, 127 - IB_SA_ATTR_SM_INFO_REC = 0x18, 128 - IB_SA_ATTR_LINK_REC = 0x20, 129 - IB_SA_ATTR_GUID_INFO_REC = 0x30, 130 - IB_SA_ATTR_SERVICE_REC = 0x31, 131 - IB_SA_ATTR_PARTITION_REC = 0x33, 132 - IB_SA_ATTR_RANGE_REC = 0x34, 133 - IB_SA_ATTR_PATH_REC = 0x35, 134 - IB_SA_ATTR_VL_ARB_REC = 0x36, 135 - IB_SA_ATTR_MC_GROUP_REC = 0x37, 136 - IB_SA_ATTR_MC_MEMBER_REC = 0x38, 137 - IB_SA_ATTR_TRACE_REC = 0x39, 138 - IB_SA_ATTR_MULTI_PATH_REC = 0x3a, 139 - IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b 140 - }; 141 - 142 116 #define PATH_REC_FIELD(field) \ 143 117 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ 144 118 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ ··· 405 431 event->event == IB_EVENT_LID_CHANGE || 406 432 event->event == IB_EVENT_PKEY_CHANGE || 407 433 event->event == IB_EVENT_SM_CHANGE) { 408 - struct ib_sa_device *sa_dev = 409 - ib_get_client_data(event->device, &sa_client); 434 + struct ib_sa_device *sa_dev; 435 + sa_dev = container_of(handler, typeof(*sa_dev), event_handler); 410 436 411 437 schedule_work(&sa_dev->port[event->element.port_num - 412 438 sa_dev->start_port].update_task);
+189 -100
drivers/infiniband/core/ucm.c
··· 72 72 73 73 static struct semaphore ctx_id_mutex; 74 74 static struct idr ctx_id_table; 75 - static int ctx_id_rover = 0; 76 75 77 76 static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id) 78 77 { ··· 96 97 wake_up(&ctx->wait); 97 98 } 98 99 99 - static ssize_t ib_ucm_destroy_ctx(struct ib_ucm_file *file, int id) 100 + static inline int ib_ucm_new_cm_id(int event) 100 101 { 101 - struct ib_ucm_context *ctx; 102 + return event == IB_CM_REQ_RECEIVED || event == IB_CM_SIDR_REQ_RECEIVED; 103 + } 104 + 105 + static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) 106 + { 102 107 struct ib_ucm_event *uevent; 103 108 104 - down(&ctx_id_mutex); 105 - ctx = idr_find(&ctx_id_table, id); 106 - if (!ctx) 107 - ctx = ERR_PTR(-ENOENT); 108 - else if (ctx->file != file) 109 - ctx = ERR_PTR(-EINVAL); 110 - else 111 - idr_remove(&ctx_id_table, ctx->id); 112 - up(&ctx_id_mutex); 113 - 114 - if (IS_ERR(ctx)) 115 - return PTR_ERR(ctx); 116 - 117 - atomic_dec(&ctx->ref); 118 - wait_event(ctx->wait, !atomic_read(&ctx->ref)); 119 - 120 - /* No new events will be generated after destroying the cm_id. */ 121 - if (!IS_ERR(ctx->cm_id)) 122 - ib_destroy_cm_id(ctx->cm_id); 123 - 124 - /* Cleanup events not yet reported to the user. */ 125 - down(&file->mutex); 109 + down(&ctx->file->mutex); 126 110 list_del(&ctx->file_list); 127 111 while (!list_empty(&ctx->events)) { 128 112 ··· 115 133 list_del(&uevent->ctx_list); 116 134 117 135 /* clear incoming connections. */ 118 - if (uevent->cm_id) 136 + if (ib_ucm_new_cm_id(uevent->resp.event)) 119 137 ib_destroy_cm_id(uevent->cm_id); 120 138 121 139 kfree(uevent); 122 140 } 123 - up(&file->mutex); 124 - 125 - kfree(ctx); 126 - return 0; 141 + up(&ctx->file->mutex); 127 142 } 128 143 129 144 static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ··· 132 153 if (!ctx) 133 154 return NULL; 134 155 156 + memset(ctx, 0, sizeof *ctx); 135 157 atomic_set(&ctx->ref, 1); 136 158 init_waitqueue_head(&ctx->wait); 137 159 ctx->file = file; 138 - 139 160 INIT_LIST_HEAD(&ctx->events); 140 161 141 - list_add_tail(&ctx->file_list, &file->ctxs); 162 + do { 163 + result = idr_pre_get(&ctx_id_table, GFP_KERNEL); 164 + if (!result) 165 + goto error; 142 166 143 - ctx_id_rover = (ctx_id_rover + 1) & INT_MAX; 144 - retry: 145 - result = idr_pre_get(&ctx_id_table, GFP_KERNEL); 146 - if (!result) 147 - goto error; 167 + down(&ctx_id_mutex); 168 + result = idr_get_new(&ctx_id_table, ctx, &ctx->id); 169 + up(&ctx_id_mutex); 170 + } while (result == -EAGAIN); 148 171 149 - down(&ctx_id_mutex); 150 - result = idr_get_new_above(&ctx_id_table, ctx, ctx_id_rover, &ctx->id); 151 - up(&ctx_id_mutex); 152 - 153 - if (result == -EAGAIN) 154 - goto retry; 155 172 if (result) 156 173 goto error; 157 174 175 + list_add_tail(&ctx->file_list, &file->ctxs); 158 176 ucm_dbg("Allocated CM ID <%d>\n", ctx->id); 159 - 160 177 return ctx; 161 - error: 162 - list_del(&ctx->file_list); 163 - kfree(ctx); 164 178 179 + error: 180 + kfree(ctx); 165 181 return NULL; 166 182 } 167 183 /* ··· 193 219 kpath->packet_life_time_selector; 194 220 } 195 221 196 - static void ib_ucm_event_req_get(struct ib_ucm_context *ctx, 197 - struct ib_ucm_req_event_resp *ureq, 222 + static void ib_ucm_event_req_get(struct ib_ucm_req_event_resp *ureq, 198 223 struct ib_cm_req_event_param *kreq) 199 224 { 200 - ureq->listen_id = ctx->id; 201 - 202 225 ureq->remote_ca_guid = kreq->remote_ca_guid; 203 226 ureq->remote_qkey = kreq->remote_qkey; 204 227 ureq->remote_qpn = kreq->remote_qpn; ··· 230 259 urep->srq = krep->srq; 231 260 } 232 261 233 - static void ib_ucm_event_sidr_req_get(struct ib_ucm_context *ctx, 234 - struct ib_ucm_sidr_req_event_resp *ureq, 235 - struct ib_cm_sidr_req_event_param *kreq) 236 - { 237 - ureq->listen_id = ctx->id; 238 - ureq->pkey = kreq->pkey; 239 - } 240 - 241 262 static void ib_ucm_event_sidr_rep_get(struct ib_ucm_sidr_rep_event_resp *urep, 242 263 struct ib_cm_sidr_rep_event_param *krep) 243 264 { ··· 238 275 urep->qpn = krep->qpn; 239 276 }; 240 277 241 - static int ib_ucm_event_process(struct ib_ucm_context *ctx, 242 - struct ib_cm_event *evt, 278 + static int ib_ucm_event_process(struct ib_cm_event *evt, 243 279 struct ib_ucm_event *uvt) 244 280 { 245 281 void *info = NULL; 246 282 247 283 switch (evt->event) { 248 284 case IB_CM_REQ_RECEIVED: 249 - ib_ucm_event_req_get(ctx, &uvt->resp.u.req_resp, 285 + ib_ucm_event_req_get(&uvt->resp.u.req_resp, 250 286 &evt->param.req_rcvd); 251 287 uvt->data_len = IB_CM_REQ_PRIVATE_DATA_SIZE; 252 288 uvt->resp.present = IB_UCM_PRES_PRIMARY; ··· 293 331 info = evt->param.apr_rcvd.apr_info; 294 332 break; 295 333 case IB_CM_SIDR_REQ_RECEIVED: 296 - ib_ucm_event_sidr_req_get(ctx, &uvt->resp.u.sidr_req_resp, 297 - &evt->param.sidr_req_rcvd); 334 + uvt->resp.u.sidr_req_resp.pkey = 335 + evt->param.sidr_req_rcvd.pkey; 298 336 uvt->data_len = IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE; 299 337 break; 300 338 case IB_CM_SIDR_REP_RECEIVED: ··· 340 378 struct ib_ucm_event *uevent; 341 379 struct ib_ucm_context *ctx; 342 380 int result = 0; 343 - int id; 344 381 345 382 ctx = cm_id->context; 346 - 347 - if (event->event == IB_CM_REQ_RECEIVED || 348 - event->event == IB_CM_SIDR_REQ_RECEIVED) 349 - id = IB_UCM_CM_ID_INVALID; 350 - else 351 - id = ctx->id; 352 383 353 384 uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); 354 385 if (!uevent) 355 386 goto err1; 356 387 357 388 memset(uevent, 0, sizeof(*uevent)); 358 - uevent->resp.id = id; 389 + uevent->ctx = ctx; 390 + uevent->cm_id = cm_id; 391 + uevent->resp.uid = ctx->uid; 392 + uevent->resp.id = ctx->id; 359 393 uevent->resp.event = event->event; 360 394 361 - result = ib_ucm_event_process(ctx, event, uevent); 395 + result = ib_ucm_event_process(event, uevent); 362 396 if (result) 363 397 goto err2; 364 - 365 - uevent->ctx = ctx; 366 - uevent->cm_id = (id == IB_UCM_CM_ID_INVALID) ? cm_id : NULL; 367 398 368 399 down(&ctx->file->mutex); 369 400 list_add_tail(&uevent->file_list, &ctx->file->events); ··· 369 414 kfree(uevent); 370 415 err1: 371 416 /* Destroy new cm_id's */ 372 - return (id == IB_UCM_CM_ID_INVALID); 417 + return ib_ucm_new_cm_id(event->event); 373 418 } 374 419 375 420 static ssize_t ib_ucm_event(struct ib_ucm_file *file, ··· 378 423 { 379 424 struct ib_ucm_context *ctx; 380 425 struct ib_ucm_event_get cmd; 381 - struct ib_ucm_event *uevent = NULL; 426 + struct ib_ucm_event *uevent; 382 427 int result = 0; 383 428 DEFINE_WAIT(wait); 384 429 ··· 391 436 * wait 392 437 */ 393 438 down(&file->mutex); 394 - 395 439 while (list_empty(&file->events)) { 396 440 397 441 if (file->filp->f_flags & O_NONBLOCK) { ··· 417 463 418 464 uevent = list_entry(file->events.next, struct ib_ucm_event, file_list); 419 465 420 - if (!uevent->cm_id) 421 - goto user; 466 + if (ib_ucm_new_cm_id(uevent->resp.event)) { 467 + ctx = ib_ucm_ctx_alloc(file); 468 + if (!ctx) { 469 + result = -ENOMEM; 470 + goto done; 471 + } 422 472 423 - ctx = ib_ucm_ctx_alloc(file); 424 - if (!ctx) { 425 - result = -ENOMEM; 426 - goto done; 473 + ctx->cm_id = uevent->cm_id; 474 + ctx->cm_id->context = ctx; 475 + uevent->resp.id = ctx->id; 427 476 } 428 477 429 - ctx->cm_id = uevent->cm_id; 430 - ctx->cm_id->context = ctx; 431 - 432 - uevent->resp.id = ctx->id; 433 - 434 - user: 435 478 if (copy_to_user((void __user *)(unsigned long)cmd.response, 436 479 &uevent->resp, sizeof(uevent->resp))) { 437 480 result = -EFAULT; ··· 436 485 } 437 486 438 487 if (uevent->data) { 439 - 440 488 if (cmd.data_len < uevent->data_len) { 441 489 result = -ENOMEM; 442 490 goto done; 443 491 } 444 - 445 492 if (copy_to_user((void __user *)(unsigned long)cmd.data, 446 493 uevent->data, uevent->data_len)) { 447 494 result = -EFAULT; ··· 448 499 } 449 500 450 501 if (uevent->info) { 451 - 452 502 if (cmd.info_len < uevent->info_len) { 453 503 result = -ENOMEM; 454 504 goto done; 455 505 } 456 - 457 506 if (copy_to_user((void __user *)(unsigned long)cmd.info, 458 507 uevent->info, uevent->info_len)) { 459 508 result = -EFAULT; ··· 461 514 462 515 list_del(&uevent->file_list); 463 516 list_del(&uevent->ctx_list); 517 + uevent->ctx->events_reported++; 464 518 465 519 kfree(uevent->data); 466 520 kfree(uevent->info); ··· 493 545 if (!ctx) 494 546 return -ENOMEM; 495 547 548 + ctx->uid = cmd.uid; 496 549 ctx->cm_id = ib_create_cm_id(ib_ucm_event_handler, ctx); 497 550 if (IS_ERR(ctx->cm_id)) { 498 551 result = PTR_ERR(ctx->cm_id); ··· 510 561 return 0; 511 562 512 563 err: 513 - ib_ucm_destroy_ctx(file, ctx->id); 564 + down(&ctx_id_mutex); 565 + idr_remove(&ctx_id_table, ctx->id); 566 + up(&ctx_id_mutex); 567 + 568 + if (!IS_ERR(ctx->cm_id)) 569 + ib_destroy_cm_id(ctx->cm_id); 570 + 571 + kfree(ctx); 514 572 return result; 515 573 } 516 574 ··· 526 570 int in_len, int out_len) 527 571 { 528 572 struct ib_ucm_destroy_id cmd; 573 + struct ib_ucm_destroy_id_resp resp; 574 + struct ib_ucm_context *ctx; 575 + int result = 0; 576 + 577 + if (out_len < sizeof(resp)) 578 + return -ENOSPC; 529 579 530 580 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 531 581 return -EFAULT; 532 582 533 - return ib_ucm_destroy_ctx(file, cmd.id); 583 + down(&ctx_id_mutex); 584 + ctx = idr_find(&ctx_id_table, cmd.id); 585 + if (!ctx) 586 + ctx = ERR_PTR(-ENOENT); 587 + else if (ctx->file != file) 588 + ctx = ERR_PTR(-EINVAL); 589 + else 590 + idr_remove(&ctx_id_table, ctx->id); 591 + up(&ctx_id_mutex); 592 + 593 + if (IS_ERR(ctx)) 594 + return PTR_ERR(ctx); 595 + 596 + atomic_dec(&ctx->ref); 597 + wait_event(ctx->wait, !atomic_read(&ctx->ref)); 598 + 599 + /* No new events will be generated after destroying the cm_id. */ 600 + ib_destroy_cm_id(ctx->cm_id); 601 + /* Cleanup events not yet reported to the user. */ 602 + ib_ucm_cleanup_events(ctx); 603 + 604 + resp.events_reported = ctx->events_reported; 605 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 606 + &resp, sizeof(resp))) 607 + result = -EFAULT; 608 + 609 + kfree(ctx); 610 + return result; 534 611 } 535 612 536 613 static ssize_t ib_ucm_attr_id(struct ib_ucm_file *file, ··· 594 605 &resp, sizeof(resp))) 595 606 result = -EFAULT; 596 607 608 + ib_ucm_ctx_put(ctx); 609 + return result; 610 + } 611 + 612 + static void ib_ucm_copy_ah_attr(struct ib_ucm_ah_attr *dest_attr, 613 + struct ib_ah_attr *src_attr) 614 + { 615 + memcpy(dest_attr->grh_dgid, src_attr->grh.dgid.raw, 616 + sizeof src_attr->grh.dgid); 617 + dest_attr->grh_flow_label = src_attr->grh.flow_label; 618 + dest_attr->grh_sgid_index = src_attr->grh.sgid_index; 619 + dest_attr->grh_hop_limit = src_attr->grh.hop_limit; 620 + dest_attr->grh_traffic_class = src_attr->grh.traffic_class; 621 + 622 + dest_attr->dlid = src_attr->dlid; 623 + dest_attr->sl = src_attr->sl; 624 + dest_attr->src_path_bits = src_attr->src_path_bits; 625 + dest_attr->static_rate = src_attr->static_rate; 626 + dest_attr->is_global = (src_attr->ah_flags & IB_AH_GRH); 627 + dest_attr->port_num = src_attr->port_num; 628 + } 629 + 630 + static void ib_ucm_copy_qp_attr(struct ib_ucm_init_qp_attr_resp *dest_attr, 631 + struct ib_qp_attr *src_attr) 632 + { 633 + dest_attr->cur_qp_state = src_attr->cur_qp_state; 634 + dest_attr->path_mtu = src_attr->path_mtu; 635 + dest_attr->path_mig_state = src_attr->path_mig_state; 636 + dest_attr->qkey = src_attr->qkey; 637 + dest_attr->rq_psn = src_attr->rq_psn; 638 + dest_attr->sq_psn = src_attr->sq_psn; 639 + dest_attr->dest_qp_num = src_attr->dest_qp_num; 640 + dest_attr->qp_access_flags = src_attr->qp_access_flags; 641 + 642 + dest_attr->max_send_wr = src_attr->cap.max_send_wr; 643 + dest_attr->max_recv_wr = src_attr->cap.max_recv_wr; 644 + dest_attr->max_send_sge = src_attr->cap.max_send_sge; 645 + dest_attr->max_recv_sge = src_attr->cap.max_recv_sge; 646 + dest_attr->max_inline_data = src_attr->cap.max_inline_data; 647 + 648 + ib_ucm_copy_ah_attr(&dest_attr->ah_attr, &src_attr->ah_attr); 649 + ib_ucm_copy_ah_attr(&dest_attr->alt_ah_attr, &src_attr->alt_ah_attr); 650 + 651 + dest_attr->pkey_index = src_attr->pkey_index; 652 + dest_attr->alt_pkey_index = src_attr->alt_pkey_index; 653 + dest_attr->en_sqd_async_notify = src_attr->en_sqd_async_notify; 654 + dest_attr->sq_draining = src_attr->sq_draining; 655 + dest_attr->max_rd_atomic = src_attr->max_rd_atomic; 656 + dest_attr->max_dest_rd_atomic = src_attr->max_dest_rd_atomic; 657 + dest_attr->min_rnr_timer = src_attr->min_rnr_timer; 658 + dest_attr->port_num = src_attr->port_num; 659 + dest_attr->timeout = src_attr->timeout; 660 + dest_attr->retry_cnt = src_attr->retry_cnt; 661 + dest_attr->rnr_retry = src_attr->rnr_retry; 662 + dest_attr->alt_port_num = src_attr->alt_port_num; 663 + dest_attr->alt_timeout = src_attr->alt_timeout; 664 + } 665 + 666 + static ssize_t ib_ucm_init_qp_attr(struct ib_ucm_file *file, 667 + const char __user *inbuf, 668 + int in_len, int out_len) 669 + { 670 + struct ib_ucm_init_qp_attr_resp resp; 671 + struct ib_ucm_init_qp_attr cmd; 672 + struct ib_ucm_context *ctx; 673 + struct ib_qp_attr qp_attr; 674 + int result = 0; 675 + 676 + if (out_len < sizeof(resp)) 677 + return -ENOSPC; 678 + 679 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 680 + return -EFAULT; 681 + 682 + ctx = ib_ucm_ctx_get(file, cmd.id); 683 + if (IS_ERR(ctx)) 684 + return PTR_ERR(ctx); 685 + 686 + resp.qp_attr_mask = 0; 687 + memset(&qp_attr, 0, sizeof qp_attr); 688 + qp_attr.qp_state = cmd.qp_state; 689 + result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 690 + if (result) 691 + goto out; 692 + 693 + ib_ucm_copy_qp_attr(&resp, &qp_attr); 694 + 695 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 696 + &resp, sizeof(resp))) 697 + result = -EFAULT; 698 + 699 + out: 597 700 ib_ucm_ctx_put(ctx); 598 701 return result; 599 702 } ··· 889 808 890 809 ctx = ib_ucm_ctx_get(file, cmd.id); 891 810 if (!IS_ERR(ctx)) { 811 + ctx->uid = cmd.uid; 892 812 result = ib_send_cm_rep(ctx->cm_id, &param); 893 813 ib_ucm_ctx_put(ctx); 894 814 } else ··· 1168 1086 [IB_USER_CM_CMD_SEND_SIDR_REQ] = ib_ucm_send_sidr_req, 1169 1087 [IB_USER_CM_CMD_SEND_SIDR_REP] = ib_ucm_send_sidr_rep, 1170 1088 [IB_USER_CM_CMD_EVENT] = ib_ucm_event, 1089 + [IB_USER_CM_CMD_INIT_QP_ATTR] = ib_ucm_init_qp_attr, 1171 1090 }; 1172 1091 1173 1092 static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, ··· 1244 1161 1245 1162 down(&file->mutex); 1246 1163 while (!list_empty(&file->ctxs)) { 1247 - 1248 1164 ctx = list_entry(file->ctxs.next, 1249 1165 struct ib_ucm_context, file_list); 1250 - 1251 1166 up(&file->mutex); 1252 - ib_ucm_destroy_ctx(file, ctx->id); 1167 + 1168 + down(&ctx_id_mutex); 1169 + idr_remove(&ctx_id_table, ctx->id); 1170 + up(&ctx_id_mutex); 1171 + 1172 + ib_destroy_cm_id(ctx->cm_id); 1173 + ib_ucm_cleanup_events(ctx); 1174 + kfree(ctx); 1175 + 1253 1176 down(&file->mutex); 1254 1177 } 1255 1178 up(&file->mutex);
+4 -7
drivers/infiniband/core/ucm.h
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 44 43 #include <rdma/ib_cm.h> 45 44 #include <rdma/ib_user_cm.h> 46 45 47 - #define IB_UCM_CM_ID_INVALID 0xffffffff 48 - 49 46 struct ib_ucm_file { 50 47 struct semaphore mutex; 51 48 struct file *filp; ··· 57 58 int id; 58 59 wait_queue_head_t wait; 59 60 atomic_t ref; 61 + int events_reported; 60 62 61 63 struct ib_ucm_file *file; 62 64 struct ib_cm_id *cm_id; 65 + __u64 uid; 63 66 64 67 struct list_head events; /* list of pending events. */ 65 68 struct list_head file_list; /* member in file ctx list */ ··· 72 71 struct list_head file_list; /* member in file event list */ 73 72 struct list_head ctx_list; /* member in ctx event list */ 74 73 74 + struct ib_cm_id *cm_id; 75 75 struct ib_ucm_event_resp resp; 76 76 void *data; 77 77 void *info; 78 78 int data_len; 79 79 int info_len; 80 - /* 81 - * new connection identifiers needs to be saved until 82 - * userspace can get a handle on them. 83 - */ 84 - struct ib_cm_id *cm_id; 85 80 }; 86 81 87 82 #endif /* UCM_H */
+17 -9
drivers/infiniband/core/uverbs.h
··· 76 76 struct ib_uverbs_event_file comp_file[1]; 77 77 }; 78 78 79 - struct ib_uverbs_async_event { 80 - struct ib_uverbs_async_event_desc desc; 79 + struct ib_uverbs_event { 80 + union { 81 + struct ib_uverbs_async_event_desc async; 82 + struct ib_uverbs_comp_event_desc comp; 83 + } desc; 81 84 struct list_head list; 85 + struct list_head obj_list; 86 + u32 *counter; 82 87 }; 83 88 84 - struct ib_uverbs_comp_event { 85 - struct ib_uverbs_comp_event_desc desc; 86 - struct list_head list; 89 + struct ib_uevent_object { 90 + struct ib_uobject uobject; 91 + struct list_head event_list; 92 + u32 events_reported; 87 93 }; 88 94 89 - struct ib_uobject_mr { 90 - struct ib_uobject uobj; 91 - struct page *page_list; 92 - struct scatterlist *sg_list; 95 + struct ib_ucq_object { 96 + struct ib_uobject uobject; 97 + struct list_head comp_list; 98 + struct list_head async_list; 99 + u32 comp_events_reported; 100 + u32 async_events_reported; 93 101 }; 94 102 95 103 extern struct semaphore ib_uverbs_idr_mutex;
+112 -43
drivers/infiniband/core/uverbs_cmd.c
··· 590 590 struct ib_uverbs_create_cq cmd; 591 591 struct ib_uverbs_create_cq_resp resp; 592 592 struct ib_udata udata; 593 - struct ib_uobject *uobj; 593 + struct ib_ucq_object *uobj; 594 594 struct ib_cq *cq; 595 595 int ret; 596 596 ··· 611 611 if (!uobj) 612 612 return -ENOMEM; 613 613 614 - uobj->user_handle = cmd.user_handle; 615 - uobj->context = file->ucontext; 614 + uobj->uobject.user_handle = cmd.user_handle; 615 + uobj->uobject.context = file->ucontext; 616 + uobj->comp_events_reported = 0; 617 + uobj->async_events_reported = 0; 618 + INIT_LIST_HEAD(&uobj->comp_list); 619 + INIT_LIST_HEAD(&uobj->async_list); 616 620 617 621 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 618 622 file->ucontext, &udata); ··· 626 622 } 627 623 628 624 cq->device = file->device->ib_dev; 629 - cq->uobject = uobj; 625 + cq->uobject = &uobj->uobject; 630 626 cq->comp_handler = ib_uverbs_comp_handler; 631 627 cq->event_handler = ib_uverbs_cq_event_handler; 632 628 cq->cq_context = file; ··· 639 635 } 640 636 641 637 down(&ib_uverbs_idr_mutex); 642 - ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->id); 638 + ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id); 643 639 up(&ib_uverbs_idr_mutex); 644 640 645 641 if (ret == -EAGAIN) ··· 648 644 goto err_cq; 649 645 650 646 spin_lock_irq(&file->ucontext->lock); 651 - list_add_tail(&uobj->list, &file->ucontext->cq_list); 647 + list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 652 648 spin_unlock_irq(&file->ucontext->lock); 653 649 654 650 memset(&resp, 0, sizeof resp); 655 - resp.cq_handle = uobj->id; 651 + resp.cq_handle = uobj->uobject.id; 656 652 resp.cqe = cq->cqe; 657 653 658 654 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 665 661 666 662 err_list: 667 663 spin_lock_irq(&file->ucontext->lock); 668 - list_del(&uobj->list); 664 + list_del(&uobj->uobject.list); 669 665 spin_unlock_irq(&file->ucontext->lock); 670 666 671 667 down(&ib_uverbs_idr_mutex); 672 - idr_remove(&ib_uverbs_cq_idr, uobj->id); 668 + idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 673 669 up(&ib_uverbs_idr_mutex); 674 670 675 671 err_cq: ··· 684 680 const char __user *buf, int in_len, 685 681 int out_len) 686 682 { 687 - struct ib_uverbs_destroy_cq cmd; 688 - struct ib_cq *cq; 689 - struct ib_uobject *uobj; 690 - int ret = -EINVAL; 683 + struct ib_uverbs_destroy_cq cmd; 684 + struct ib_uverbs_destroy_cq_resp resp; 685 + struct ib_cq *cq; 686 + struct ib_ucq_object *uobj; 687 + struct ib_uverbs_event *evt, *tmp; 688 + u64 user_handle; 689 + int ret = -EINVAL; 691 690 692 691 if (copy_from_user(&cmd, buf, sizeof cmd)) 693 692 return -EFAULT; 693 + 694 + memset(&resp, 0, sizeof resp); 694 695 695 696 down(&ib_uverbs_idr_mutex); 696 697 ··· 703 694 if (!cq || cq->uobject->context != file->ucontext) 704 695 goto out; 705 696 706 - uobj = cq->uobject; 697 + user_handle = cq->uobject->user_handle; 698 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 707 699 708 700 ret = ib_destroy_cq(cq); 709 701 if (ret) ··· 713 703 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 714 704 715 705 spin_lock_irq(&file->ucontext->lock); 716 - list_del(&uobj->list); 706 + list_del(&uobj->uobject.list); 717 707 spin_unlock_irq(&file->ucontext->lock); 718 708 709 + spin_lock_irq(&file->comp_file[0].lock); 710 + list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 711 + list_del(&evt->list); 712 + kfree(evt); 713 + } 714 + spin_unlock_irq(&file->comp_file[0].lock); 715 + 716 + spin_lock_irq(&file->async_file.lock); 717 + list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { 718 + list_del(&evt->list); 719 + kfree(evt); 720 + } 721 + spin_unlock_irq(&file->async_file.lock); 722 + 723 + resp.comp_events_reported = uobj->comp_events_reported; 724 + resp.async_events_reported = uobj->async_events_reported; 725 + 719 726 kfree(uobj); 727 + 728 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 729 + &resp, sizeof resp)) 730 + ret = -EFAULT; 720 731 721 732 out: 722 733 up(&ib_uverbs_idr_mutex); ··· 752 721 struct ib_uverbs_create_qp cmd; 753 722 struct ib_uverbs_create_qp_resp resp; 754 723 struct ib_udata udata; 755 - struct ib_uobject *uobj; 724 + struct ib_uevent_object *uobj; 756 725 struct ib_pd *pd; 757 726 struct ib_cq *scq, *rcq; 758 727 struct ib_srq *srq; ··· 803 772 attr.cap.max_recv_sge = cmd.max_recv_sge; 804 773 attr.cap.max_inline_data = cmd.max_inline_data; 805 774 806 - uobj->user_handle = cmd.user_handle; 807 - uobj->context = file->ucontext; 775 + uobj->uobject.user_handle = cmd.user_handle; 776 + uobj->uobject.context = file->ucontext; 777 + uobj->events_reported = 0; 778 + INIT_LIST_HEAD(&uobj->event_list); 808 779 809 780 qp = pd->device->create_qp(pd, &attr, &udata); 810 781 if (IS_ERR(qp)) { ··· 819 786 qp->send_cq = attr.send_cq; 820 787 qp->recv_cq = attr.recv_cq; 821 788 qp->srq = attr.srq; 822 - qp->uobject = uobj; 789 + qp->uobject = &uobj->uobject; 823 790 qp->event_handler = attr.event_handler; 824 791 qp->qp_context = attr.qp_context; 825 792 qp->qp_type = attr.qp_type; ··· 838 805 goto err_destroy; 839 806 } 840 807 841 - ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->id); 808 + ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 842 809 843 810 if (ret == -EAGAIN) 844 811 goto retry; 845 812 if (ret) 846 813 goto err_destroy; 847 814 848 - resp.qp_handle = uobj->id; 815 + resp.qp_handle = uobj->uobject.id; 849 816 850 817 spin_lock_irq(&file->ucontext->lock); 851 - list_add_tail(&uobj->list, &file->ucontext->qp_list); 818 + list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 852 819 spin_unlock_irq(&file->ucontext->lock); 853 820 854 821 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 863 830 864 831 err_list: 865 832 spin_lock_irq(&file->ucontext->lock); 866 - list_del(&uobj->list); 833 + list_del(&uobj->uobject.list); 867 834 spin_unlock_irq(&file->ucontext->lock); 868 835 869 836 err_destroy: ··· 963 930 const char __user *buf, int in_len, 964 931 int out_len) 965 932 { 966 - struct ib_uverbs_destroy_qp cmd; 967 - struct ib_qp *qp; 968 - struct ib_uobject *uobj; 969 - int ret = -EINVAL; 933 + struct ib_uverbs_destroy_qp cmd; 934 + struct ib_uverbs_destroy_qp_resp resp; 935 + struct ib_qp *qp; 936 + struct ib_uevent_object *uobj; 937 + struct ib_uverbs_event *evt, *tmp; 938 + int ret = -EINVAL; 970 939 971 940 if (copy_from_user(&cmd, buf, sizeof cmd)) 972 941 return -EFAULT; 942 + 943 + memset(&resp, 0, sizeof resp); 973 944 974 945 down(&ib_uverbs_idr_mutex); 975 946 ··· 981 944 if (!qp || qp->uobject->context != file->ucontext) 982 945 goto out; 983 946 984 - uobj = qp->uobject; 947 + uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 985 948 986 949 ret = ib_destroy_qp(qp); 987 950 if (ret) ··· 990 953 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 991 954 992 955 spin_lock_irq(&file->ucontext->lock); 993 - list_del(&uobj->list); 956 + list_del(&uobj->uobject.list); 994 957 spin_unlock_irq(&file->ucontext->lock); 995 958 959 + spin_lock_irq(&file->async_file.lock); 960 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 961 + list_del(&evt->list); 962 + kfree(evt); 963 + } 964 + spin_unlock_irq(&file->async_file.lock); 965 + 966 + resp.events_reported = uobj->events_reported; 967 + 996 968 kfree(uobj); 969 + 970 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 971 + &resp, sizeof resp)) 972 + ret = -EFAULT; 997 973 998 974 out: 999 975 up(&ib_uverbs_idr_mutex); ··· 1065 1015 struct ib_uverbs_create_srq cmd; 1066 1016 struct ib_uverbs_create_srq_resp resp; 1067 1017 struct ib_udata udata; 1068 - struct ib_uobject *uobj; 1018 + struct ib_uevent_object *uobj; 1069 1019 struct ib_pd *pd; 1070 1020 struct ib_srq *srq; 1071 1021 struct ib_srq_init_attr attr; ··· 1100 1050 attr.attr.max_sge = cmd.max_sge; 1101 1051 attr.attr.srq_limit = cmd.srq_limit; 1102 1052 1103 - uobj->user_handle = cmd.user_handle; 1104 - uobj->context = file->ucontext; 1053 + uobj->uobject.user_handle = cmd.user_handle; 1054 + uobj->uobject.context = file->ucontext; 1055 + uobj->events_reported = 0; 1056 + INIT_LIST_HEAD(&uobj->event_list); 1105 1057 1106 1058 srq = pd->device->create_srq(pd, &attr, &udata); 1107 1059 if (IS_ERR(srq)) { ··· 1113 1061 1114 1062 srq->device = pd->device; 1115 1063 srq->pd = pd; 1116 - srq->uobject = uobj; 1064 + srq->uobject = &uobj->uobject; 1117 1065 srq->event_handler = attr.event_handler; 1118 1066 srq->srq_context = attr.srq_context; 1119 1067 atomic_inc(&pd->usecnt); ··· 1127 1075 goto err_destroy; 1128 1076 } 1129 1077 1130 - ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); 1078 + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id); 1131 1079 1132 1080 if (ret == -EAGAIN) 1133 1081 goto retry; 1134 1082 if (ret) 1135 1083 goto err_destroy; 1136 1084 1137 - resp.srq_handle = uobj->id; 1085 + resp.srq_handle = uobj->uobject.id; 1138 1086 1139 1087 spin_lock_irq(&file->ucontext->lock); 1140 - list_add_tail(&uobj->list, &file->ucontext->srq_list); 1088 + list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1141 1089 spin_unlock_irq(&file->ucontext->lock); 1142 1090 1143 1091 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 1152 1100 1153 1101 err_list: 1154 1102 spin_lock_irq(&file->ucontext->lock); 1155 - list_del(&uobj->list); 1103 + list_del(&uobj->uobject.list); 1156 1104 spin_unlock_irq(&file->ucontext->lock); 1157 1105 1158 1106 err_destroy: ··· 1201 1149 const char __user *buf, int in_len, 1202 1150 int out_len) 1203 1151 { 1204 - struct ib_uverbs_destroy_srq cmd; 1205 - struct ib_srq *srq; 1206 - struct ib_uobject *uobj; 1207 - int ret = -EINVAL; 1152 + struct ib_uverbs_destroy_srq cmd; 1153 + struct ib_uverbs_destroy_srq_resp resp; 1154 + struct ib_srq *srq; 1155 + struct ib_uevent_object *uobj; 1156 + struct ib_uverbs_event *evt, *tmp; 1157 + int ret = -EINVAL; 1208 1158 1209 1159 if (copy_from_user(&cmd, buf, sizeof cmd)) 1210 1160 return -EFAULT; 1211 1161 1212 1162 down(&ib_uverbs_idr_mutex); 1213 1163 1164 + memset(&resp, 0, sizeof resp); 1165 + 1214 1166 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1215 1167 if (!srq || srq->uobject->context != file->ucontext) 1216 1168 goto out; 1217 1169 1218 - uobj = srq->uobject; 1170 + uobj = container_of(srq->uobject, struct ib_uevent_object, uobject); 1219 1171 1220 1172 ret = ib_destroy_srq(srq); 1221 1173 if (ret) ··· 1228 1172 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1229 1173 1230 1174 spin_lock_irq(&file->ucontext->lock); 1231 - list_del(&uobj->list); 1175 + list_del(&uobj->uobject.list); 1232 1176 spin_unlock_irq(&file->ucontext->lock); 1233 1177 1178 + spin_lock_irq(&file->async_file.lock); 1179 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1180 + list_del(&evt->list); 1181 + kfree(evt); 1182 + } 1183 + spin_unlock_irq(&file->async_file.lock); 1184 + 1185 + resp.events_reported = uobj->events_reported; 1186 + 1234 1187 kfree(uobj); 1188 + 1189 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 1190 + &resp, sizeof resp)) 1191 + ret = -EFAULT; 1235 1192 1236 1193 out: 1237 1194 up(&ib_uverbs_idr_mutex);
+62 -36
drivers/infiniband/core/uverbs_main.c
··· 120 120 idr_remove(&ib_uverbs_qp_idr, uobj->id); 121 121 ib_destroy_qp(qp); 122 122 list_del(&uobj->list); 123 - kfree(uobj); 123 + kfree(container_of(uobj, struct ib_uevent_object, uobject)); 124 124 } 125 125 126 126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { ··· 128 128 idr_remove(&ib_uverbs_cq_idr, uobj->id); 129 129 ib_destroy_cq(cq); 130 130 list_del(&uobj->list); 131 - kfree(uobj); 131 + kfree(container_of(uobj, struct ib_ucq_object, uobject)); 132 132 } 133 133 134 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { ··· 136 136 idr_remove(&ib_uverbs_srq_idr, uobj->id); 137 137 ib_destroy_srq(srq); 138 138 list_del(&uobj->list); 139 - kfree(uobj); 139 + kfree(container_of(uobj, struct ib_uevent_object, uobject)); 140 140 } 141 141 142 142 /* XXX Free MWs */ ··· 182 182 size_t count, loff_t *pos) 183 183 { 184 184 struct ib_uverbs_event_file *file = filp->private_data; 185 - void *event; 185 + struct ib_uverbs_event *event; 186 186 int eventsz; 187 187 int ret = 0; 188 188 ··· 207 207 return -ENODEV; 208 208 } 209 209 210 - if (file->is_async) { 211 - event = list_entry(file->event_list.next, 212 - struct ib_uverbs_async_event, list); 210 + event = list_entry(file->event_list.next, struct ib_uverbs_event, list); 211 + 212 + if (file->is_async) 213 213 eventsz = sizeof (struct ib_uverbs_async_event_desc); 214 - } else { 215 - event = list_entry(file->event_list.next, 216 - struct ib_uverbs_comp_event, list); 214 + else 217 215 eventsz = sizeof (struct ib_uverbs_comp_event_desc); 218 - } 219 216 220 217 if (eventsz > count) { 221 218 ret = -EINVAL; 222 219 event = NULL; 223 - } else 220 + } else { 224 221 list_del(file->event_list.next); 222 + if (event->counter) { 223 + ++(*event->counter); 224 + list_del(&event->obj_list); 225 + } 226 + } 225 227 226 228 spin_unlock_irq(&file->lock); 227 229 ··· 259 257 260 258 static void ib_uverbs_event_release(struct ib_uverbs_event_file *file) 261 259 { 262 - struct list_head *entry, *tmp; 260 + struct ib_uverbs_event *entry, *tmp; 263 261 264 262 spin_lock_irq(&file->lock); 265 263 if (file->fd != -1) { 266 264 file->fd = -1; 267 - list_for_each_safe(entry, tmp, &file->event_list) 268 - if (file->is_async) 269 - kfree(list_entry(entry, struct ib_uverbs_async_event, list)); 270 - else 271 - kfree(list_entry(entry, struct ib_uverbs_comp_event, list)); 265 + list_for_each_entry_safe(entry, tmp, &file->event_list, list) 266 + kfree(entry); 272 267 } 273 268 spin_unlock_irq(&file->lock); 274 269 } ··· 303 304 304 305 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 305 306 { 306 - struct ib_uverbs_file *file = cq_context; 307 - struct ib_uverbs_comp_event *entry; 308 - unsigned long flags; 307 + struct ib_uverbs_file *file = cq_context; 308 + struct ib_ucq_object *uobj; 309 + struct ib_uverbs_event *entry; 310 + unsigned long flags; 309 311 310 312 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 311 313 if (!entry) 312 314 return; 313 315 314 - entry->desc.cq_handle = cq->uobject->user_handle; 316 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 317 + 318 + entry->desc.comp.cq_handle = cq->uobject->user_handle; 319 + entry->counter = &uobj->comp_events_reported; 315 320 316 321 spin_lock_irqsave(&file->comp_file[0].lock, flags); 317 322 list_add_tail(&entry->list, &file->comp_file[0].event_list); 323 + list_add_tail(&entry->obj_list, &uobj->comp_list); 318 324 spin_unlock_irqrestore(&file->comp_file[0].lock, flags); 319 325 320 326 wake_up_interruptible(&file->comp_file[0].poll_wait); ··· 327 323 } 328 324 329 325 static void ib_uverbs_async_handler(struct ib_uverbs_file *file, 330 - __u64 element, __u64 event) 326 + __u64 element, __u64 event, 327 + struct list_head *obj_list, 328 + u32 *counter) 331 329 { 332 - struct ib_uverbs_async_event *entry; 330 + struct ib_uverbs_event *entry; 333 331 unsigned long flags; 334 332 335 333 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 336 334 if (!entry) 337 335 return; 338 336 339 - entry->desc.element = element; 340 - entry->desc.event_type = event; 337 + entry->desc.async.element = element; 338 + entry->desc.async.event_type = event; 339 + entry->counter = counter; 341 340 342 341 spin_lock_irqsave(&file->async_file.lock, flags); 343 342 list_add_tail(&entry->list, &file->async_file.event_list); 343 + if (obj_list) 344 + list_add_tail(&entry->obj_list, obj_list); 344 345 spin_unlock_irqrestore(&file->async_file.lock, flags); 345 346 346 347 wake_up_interruptible(&file->async_file.poll_wait); ··· 354 345 355 346 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 356 347 { 357 - ib_uverbs_async_handler(context_ptr, 358 - event->element.cq->uobject->user_handle, 359 - event->event); 348 + struct ib_ucq_object *uobj; 349 + 350 + uobj = container_of(event->element.cq->uobject, 351 + struct ib_ucq_object, uobject); 352 + 353 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 354 + event->event, &uobj->async_list, 355 + &uobj->async_events_reported); 356 + 360 357 } 361 358 362 359 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) 363 360 { 364 - ib_uverbs_async_handler(context_ptr, 365 - event->element.qp->uobject->user_handle, 366 - event->event); 361 + struct ib_uevent_object *uobj; 362 + 363 + uobj = container_of(event->element.qp->uobject, 364 + struct ib_uevent_object, uobject); 365 + 366 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 367 + event->event, &uobj->event_list, 368 + &uobj->events_reported); 367 369 } 368 370 369 371 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 370 372 { 371 - ib_uverbs_async_handler(context_ptr, 372 - event->element.srq->uobject->user_handle, 373 - event->event); 373 + struct ib_uevent_object *uobj; 374 + 375 + uobj = container_of(event->element.srq->uobject, 376 + struct ib_uevent_object, uobject); 377 + 378 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 379 + event->event, &uobj->event_list, 380 + &uobj->events_reported); 374 381 } 375 382 376 383 static void ib_uverbs_event_handler(struct ib_event_handler *handler, ··· 395 370 struct ib_uverbs_file *file = 396 371 container_of(handler, struct ib_uverbs_file, event_handler); 397 372 398 - ib_uverbs_async_handler(file, event->element.port_num, event->event); 373 + ib_uverbs_async_handler(file, event->element.port_num, event->event, 374 + NULL, NULL); 399 375 } 400 376 401 377 static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,
+33 -12
drivers/infiniband/hw/mthca/mthca_qp.c
··· 220 220 (PAGE_SIZE - 1)); 221 221 } 222 222 223 + static void mthca_wq_init(struct mthca_wq *wq) 224 + { 225 + spin_lock_init(&wq->lock); 226 + wq->next_ind = 0; 227 + wq->last_comp = wq->max - 1; 228 + wq->head = 0; 229 + wq->tail = 0; 230 + wq->last = NULL; 231 + } 232 + 223 233 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 224 234 enum ib_event_type event_type) 225 235 { ··· 843 833 store_attrs(to_msqp(qp), attr, attr_mask); 844 834 845 835 /* 846 - * If we are moving QP0 to RTR, bring the IB link up; if we 847 - * are moving QP0 to RESET or ERROR, bring the link back down. 836 + * If we moved QP0 to RTR, bring the IB link up; if we moved 837 + * QP0 to RESET or ERROR, bring the link back down. 848 838 */ 849 839 if (is_qp0(dev, qp)) { 850 840 if (cur_state != IB_QPS_RTR && ··· 856 846 (new_state == IB_QPS_RESET || 857 847 new_state == IB_QPS_ERR)) 858 848 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); 849 + } 850 + 851 + /* 852 + * If we moved a kernel QP to RESET, clean up all old CQ 853 + * entries and reinitialize the QP. 854 + */ 855 + if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 856 + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, 857 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 858 + if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 859 + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, 860 + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 861 + 862 + mthca_wq_init(&qp->sq); 863 + mthca_wq_init(&qp->rq); 864 + 865 + if (mthca_is_memfree(dev)) { 866 + *qp->sq.db = 0; 867 + *qp->rq.db = 0; 868 + } 859 869 } 860 870 861 871 return err; ··· 1033 1003 } 1034 1004 } 1035 1005 1036 - static void mthca_wq_init(struct mthca_wq* wq) 1037 - { 1038 - spin_lock_init(&wq->lock); 1039 - wq->next_ind = 0; 1040 - wq->last_comp = wq->max - 1; 1041 - wq->head = 0; 1042 - wq->tail = 0; 1043 - wq->last = NULL; 1044 - } 1045 - 1046 1006 static int mthca_alloc_qp_common(struct mthca_dev *dev, 1047 1007 struct mthca_pd *pd, 1048 1008 struct mthca_cq *send_cq, ··· 1044 1024 int i; 1045 1025 1046 1026 atomic_set(&qp->refcount, 1); 1027 + init_waitqueue_head(&qp->wait); 1047 1028 qp->state = IB_QPS_RESET; 1048 1029 qp->atomic_rd_en = 0; 1049 1030 qp->resp_depth = 0;
+2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1062 1062 ipoib_dev_cleanup(priv->dev); 1063 1063 free_netdev(priv->dev); 1064 1064 } 1065 + 1066 + kfree(dev_list); 1065 1067 } 1066 1068 1067 1069 static int __init ipoib_init_module(void)
+1
include/rdma/ib_cm.h
··· 290 290 enum ib_cm_lap_state lap_state; /* internal CM/debug use */ 291 291 __be32 local_id; 292 292 __be32 remote_id; 293 + u32 remote_cm_qpn; /* 1 unless redirected */ 293 294 }; 294 295 295 296 /**
+21
include/rdma/ib_mad.h
··· 173 173 u8 data[216]; 174 174 }; 175 175 176 + struct ib_class_port_info 177 + { 178 + u8 base_version; 179 + u8 class_version; 180 + __be16 capability_mask; 181 + u8 reserved[3]; 182 + u8 resp_time_value; 183 + u8 redirect_gid[16]; 184 + __be32 redirect_tcslfl; 185 + __be16 redirect_lid; 186 + __be16 redirect_pkey; 187 + __be32 redirect_qp; 188 + __be32 redirect_qkey; 189 + u8 trap_gid[16]; 190 + __be32 trap_tcslfl; 191 + __be16 trap_lid; 192 + __be16 trap_pkey; 193 + __be32 trap_hlqp; 194 + __be32 trap_qkey; 195 + }; 196 + 176 197 /** 177 198 * ib_mad_send_buf - MAD data buffer and work request for sends. 178 199 * @mad: References an allocated MAD data buffer. The size of the data
+30 -1
include/rdma/ib_sa.h
··· 46 46 47 47 IB_SA_METHOD_GET_TABLE = 0x12, 48 48 IB_SA_METHOD_GET_TABLE_RESP = 0x92, 49 - IB_SA_METHOD_DELETE = 0x15 49 + IB_SA_METHOD_DELETE = 0x15, 50 + IB_SA_METHOD_DELETE_RESP = 0x95, 51 + IB_SA_METHOD_GET_MULTI = 0x14, 52 + IB_SA_METHOD_GET_MULTI_RESP = 0x94, 53 + IB_SA_METHOD_GET_TRACE_TBL = 0x13 54 + }; 55 + 56 + enum { 57 + IB_SA_ATTR_CLASS_PORTINFO = 0x01, 58 + IB_SA_ATTR_NOTICE = 0x02, 59 + IB_SA_ATTR_INFORM_INFO = 0x03, 60 + IB_SA_ATTR_NODE_REC = 0x11, 61 + IB_SA_ATTR_PORT_INFO_REC = 0x12, 62 + IB_SA_ATTR_SL2VL_REC = 0x13, 63 + IB_SA_ATTR_SWITCH_REC = 0x14, 64 + IB_SA_ATTR_LINEAR_FDB_REC = 0x15, 65 + IB_SA_ATTR_RANDOM_FDB_REC = 0x16, 66 + IB_SA_ATTR_MCAST_FDB_REC = 0x17, 67 + IB_SA_ATTR_SM_INFO_REC = 0x18, 68 + IB_SA_ATTR_LINK_REC = 0x20, 69 + IB_SA_ATTR_GUID_INFO_REC = 0x30, 70 + IB_SA_ATTR_SERVICE_REC = 0x31, 71 + IB_SA_ATTR_PARTITION_REC = 0x33, 72 + IB_SA_ATTR_PATH_REC = 0x35, 73 + IB_SA_ATTR_VL_ARB_REC = 0x36, 74 + IB_SA_ATTR_MC_MEMBER_REC = 0x38, 75 + IB_SA_ATTR_TRACE_REC = 0x39, 76 + IB_SA_ATTR_MULTI_PATH_REC = 0x3a, 77 + IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b, 78 + IB_SA_ATTR_INFORM_INFO_REC = 0xf3 50 79 }; 51 80 52 81 enum ib_sa_selector {
+69 -3
include/rdma/ib_user_cm.h
··· 1 1 /* 2 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 + * Copyright (c) 2005 Intel Corporation. All rights reserved. 3 4 * 4 5 * This software is available to you under a choice of one of two 5 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 38 37 39 38 #include <linux/types.h> 40 39 41 - #define IB_USER_CM_ABI_VERSION 1 40 + #define IB_USER_CM_ABI_VERSION 2 42 41 43 42 enum { 44 43 IB_USER_CM_CMD_CREATE_ID, ··· 61 60 IB_USER_CM_CMD_SEND_SIDR_REP, 62 61 63 62 IB_USER_CM_CMD_EVENT, 63 + IB_USER_CM_CMD_INIT_QP_ATTR, 64 64 }; 65 65 /* 66 66 * command ABI structures. ··· 73 71 }; 74 72 75 73 struct ib_ucm_create_id { 74 + __u64 uid; 76 75 __u64 response; 77 76 }; 78 77 ··· 82 79 }; 83 80 84 81 struct ib_ucm_destroy_id { 82 + __u64 response; 85 83 __u32 id; 84 + }; 85 + 86 + struct ib_ucm_destroy_id_resp { 87 + __u32 events_reported; 86 88 }; 87 89 88 90 struct ib_ucm_attr_id { ··· 100 92 __be64 service_mask; 101 93 __be32 local_id; 102 94 __be32 remote_id; 95 + }; 96 + 97 + struct ib_ucm_init_qp_attr { 98 + __u64 response; 99 + __u32 id; 100 + __u32 qp_state; 101 + }; 102 + 103 + struct ib_ucm_ah_attr { 104 + __u8 grh_dgid[16]; 105 + __u32 grh_flow_label; 106 + __u16 dlid; 107 + __u16 reserved; 108 + __u8 grh_sgid_index; 109 + __u8 grh_hop_limit; 110 + __u8 grh_traffic_class; 111 + __u8 sl; 112 + __u8 src_path_bits; 113 + __u8 static_rate; 114 + __u8 is_global; 115 + __u8 port_num; 116 + }; 117 + 118 + struct ib_ucm_init_qp_attr_resp { 119 + __u32 qp_attr_mask; 120 + __u32 qp_state; 121 + __u32 cur_qp_state; 122 + __u32 path_mtu; 123 + __u32 path_mig_state; 124 + __u32 qkey; 125 + __u32 rq_psn; 126 + __u32 sq_psn; 127 + __u32 dest_qp_num; 128 + __u32 qp_access_flags; 129 + 130 + struct ib_ucm_ah_attr ah_attr; 131 + struct ib_ucm_ah_attr alt_ah_attr; 132 + 133 + /* ib_qp_cap */ 134 + __u32 max_send_wr; 135 + __u32 max_recv_wr; 136 + __u32 max_send_sge; 137 + __u32 max_recv_sge; 138 + __u32 max_inline_data; 139 + 140 + __u16 pkey_index; 141 + __u16 alt_pkey_index; 142 + __u8 en_sqd_async_notify; 143 + __u8 sq_draining; 144 + __u8 max_rd_atomic; 145 + __u8 max_dest_rd_atomic; 146 + __u8 min_rnr_timer; 147 + __u8 port_num; 148 + __u8 timeout; 149 + __u8 retry_cnt; 150 + __u8 rnr_retry; 151 + __u8 alt_port_num; 152 + __u8 alt_timeout; 103 153 }; 104 154 105 155 struct ib_ucm_listen { ··· 223 157 }; 224 158 225 159 struct ib_ucm_rep { 160 + __u64 uid; 226 161 __u64 data; 227 162 __u32 id; 228 163 __u32 qpn; ··· 299 232 }; 300 233 301 234 struct ib_ucm_req_event_resp { 302 - __u32 listen_id; 303 235 /* device */ 304 236 /* port */ 305 237 struct ib_ucm_path_rec primary_path; ··· 353 287 }; 354 288 355 289 struct ib_ucm_sidr_req_event_resp { 356 - __u32 listen_id; 357 290 /* device */ 358 291 /* port */ 359 292 __u16 pkey; ··· 372 307 #define IB_UCM_PRES_ALTERNATE 0x08 373 308 374 309 struct ib_ucm_event_resp { 310 + __u64 uid; 375 311 __u32 id; 376 312 __u32 event; 377 313 __u32 present;
+20 -1
include/rdma/ib_user_verbs.h
··· 42 42 * Increment this value if any changes that break userspace ABI 43 43 * compatibility are made. 44 44 */ 45 - #define IB_USER_VERBS_ABI_VERSION 1 45 + #define IB_USER_VERBS_ABI_VERSION 2 46 46 47 47 enum { 48 48 IB_USER_VERBS_CMD_QUERY_PARAMS, ··· 292 292 }; 293 293 294 294 struct ib_uverbs_destroy_cq { 295 + __u64 response; 295 296 __u32 cq_handle; 297 + __u32 reserved; 298 + }; 299 + 300 + struct ib_uverbs_destroy_cq_resp { 301 + __u32 comp_events_reported; 302 + __u32 async_events_reported; 296 303 }; 297 304 298 305 struct ib_uverbs_create_qp { ··· 379 372 }; 380 373 381 374 struct ib_uverbs_destroy_qp { 375 + __u64 response; 382 376 __u32 qp_handle; 377 + __u32 reserved; 378 + }; 379 + 380 + struct ib_uverbs_destroy_qp_resp { 381 + __u32 events_reported; 383 382 }; 384 383 385 384 struct ib_uverbs_attach_mcast { ··· 429 416 }; 430 417 431 418 struct ib_uverbs_destroy_srq { 419 + __u64 response; 432 420 __u32 srq_handle; 421 + __u32 reserved; 422 + }; 423 + 424 + struct ib_uverbs_destroy_srq_resp { 425 + __u32 events_reported; 433 426 }; 434 427 435 428 #endif /* IB_USER_VERBS_H */