Make sure that userspace does not retrieve stale asynchronous or completion events after destroying a CQ, QP or SRQ. We do this by sweeping the event lists before returning from a destroy calls, and then return the number of events already reported before the destroy call. This allows userspace wait until it has processed all events for an object returned from the kernel before it frees its context for the object.

The ABI of the destroy CQ, destroy QP and destroy SRQ commands has to
change to return the event count, so bump the ABI version from 1 to 2.
The userspace libibverbs library has already been updated to handle
both the old and new ABI versions.

Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Roland Dreier and committed by
Roland Dreier
63aaf647 2e9f7cb7

+211 -89
+17 -9
drivers/infiniband/core/uverbs.h
··· 76 76 struct ib_uverbs_event_file comp_file[1]; 77 77 }; 78 78 79 - struct ib_uverbs_async_event { 80 - struct ib_uverbs_async_event_desc desc; 79 + struct ib_uverbs_event { 80 + union { 81 + struct ib_uverbs_async_event_desc async; 82 + struct ib_uverbs_comp_event_desc comp; 83 + } desc; 81 84 struct list_head list; 85 + struct list_head obj_list; 86 + u32 *counter; 82 87 }; 83 88 84 - struct ib_uverbs_comp_event { 85 - struct ib_uverbs_comp_event_desc desc; 86 - struct list_head list; 89 + struct ib_uevent_object { 90 + struct ib_uobject uobject; 91 + struct list_head event_list; 92 + u32 events_reported; 87 93 }; 88 94 89 - struct ib_uobject_mr { 90 - struct ib_uobject uobj; 91 - struct page *page_list; 92 - struct scatterlist *sg_list; 95 + struct ib_ucq_object { 96 + struct ib_uobject uobject; 97 + struct list_head comp_list; 98 + struct list_head async_list; 99 + u32 comp_events_reported; 100 + u32 async_events_reported; 93 101 }; 94 102 95 103 extern struct semaphore ib_uverbs_idr_mutex;
+112 -43
drivers/infiniband/core/uverbs_cmd.c
··· 590 590 struct ib_uverbs_create_cq cmd; 591 591 struct ib_uverbs_create_cq_resp resp; 592 592 struct ib_udata udata; 593 - struct ib_uobject *uobj; 593 + struct ib_ucq_object *uobj; 594 594 struct ib_cq *cq; 595 595 int ret; 596 596 ··· 611 611 if (!uobj) 612 612 return -ENOMEM; 613 613 614 - uobj->user_handle = cmd.user_handle; 615 - uobj->context = file->ucontext; 614 + uobj->uobject.user_handle = cmd.user_handle; 615 + uobj->uobject.context = file->ucontext; 616 + uobj->comp_events_reported = 0; 617 + uobj->async_events_reported = 0; 618 + INIT_LIST_HEAD(&uobj->comp_list); 619 + INIT_LIST_HEAD(&uobj->async_list); 616 620 617 621 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 618 622 file->ucontext, &udata); ··· 626 622 } 627 623 628 624 cq->device = file->device->ib_dev; 629 - cq->uobject = uobj; 625 + cq->uobject = &uobj->uobject; 630 626 cq->comp_handler = ib_uverbs_comp_handler; 631 627 cq->event_handler = ib_uverbs_cq_event_handler; 632 628 cq->cq_context = file; ··· 639 635 } 640 636 641 637 down(&ib_uverbs_idr_mutex); 642 - ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->id); 638 + ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id); 643 639 up(&ib_uverbs_idr_mutex); 644 640 645 641 if (ret == -EAGAIN) ··· 648 644 goto err_cq; 649 645 650 646 spin_lock_irq(&file->ucontext->lock); 651 - list_add_tail(&uobj->list, &file->ucontext->cq_list); 647 + list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 652 648 spin_unlock_irq(&file->ucontext->lock); 653 649 654 650 memset(&resp, 0, sizeof resp); 655 - resp.cq_handle = uobj->id; 651 + resp.cq_handle = uobj->uobject.id; 656 652 resp.cqe = cq->cqe; 657 653 658 654 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 665 661 666 662 err_list: 667 663 spin_lock_irq(&file->ucontext->lock); 668 - list_del(&uobj->list); 664 + list_del(&uobj->uobject.list); 669 665 spin_unlock_irq(&file->ucontext->lock); 670 666 671 667 down(&ib_uverbs_idr_mutex); 672 - idr_remove(&ib_uverbs_cq_idr, uobj->id); 668 + idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 673 669 up(&ib_uverbs_idr_mutex); 674 670 675 671 err_cq: ··· 684 680 const char __user *buf, int in_len, 685 681 int out_len) 686 682 { 687 - struct ib_uverbs_destroy_cq cmd; 688 - struct ib_cq *cq; 689 - struct ib_uobject *uobj; 690 - int ret = -EINVAL; 683 + struct ib_uverbs_destroy_cq cmd; 684 + struct ib_uverbs_destroy_cq_resp resp; 685 + struct ib_cq *cq; 686 + struct ib_ucq_object *uobj; 687 + struct ib_uverbs_event *evt, *tmp; 688 + u64 user_handle; 689 + int ret = -EINVAL; 691 690 692 691 if (copy_from_user(&cmd, buf, sizeof cmd)) 693 692 return -EFAULT; 693 + 694 + memset(&resp, 0, sizeof resp); 694 695 695 696 down(&ib_uverbs_idr_mutex); 696 697 ··· 703 694 if (!cq || cq->uobject->context != file->ucontext) 704 695 goto out; 705 696 706 - uobj = cq->uobject; 697 + user_handle = cq->uobject->user_handle; 698 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 707 699 708 700 ret = ib_destroy_cq(cq); 709 701 if (ret) ··· 713 703 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 714 704 715 705 spin_lock_irq(&file->ucontext->lock); 716 - list_del(&uobj->list); 706 + list_del(&uobj->uobject.list); 717 707 spin_unlock_irq(&file->ucontext->lock); 718 708 709 + spin_lock_irq(&file->comp_file[0].lock); 710 + list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 711 + list_del(&evt->list); 712 + kfree(evt); 713 + } 714 + spin_unlock_irq(&file->comp_file[0].lock); 715 + 716 + spin_lock_irq(&file->async_file.lock); 717 + list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { 718 + list_del(&evt->list); 719 + kfree(evt); 720 + } 721 + spin_unlock_irq(&file->async_file.lock); 722 + 723 + resp.comp_events_reported = uobj->comp_events_reported; 724 + resp.async_events_reported = uobj->async_events_reported; 725 + 719 726 kfree(uobj); 727 + 728 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 729 + &resp, sizeof resp)) 730 + ret = -EFAULT; 720 731 721 732 out: 722 733 up(&ib_uverbs_idr_mutex); ··· 752 721 struct ib_uverbs_create_qp cmd; 753 722 struct ib_uverbs_create_qp_resp resp; 754 723 struct ib_udata udata; 755 - struct ib_uobject *uobj; 724 + struct ib_uevent_object *uobj; 756 725 struct ib_pd *pd; 757 726 struct ib_cq *scq, *rcq; 758 727 struct ib_srq *srq; ··· 803 772 attr.cap.max_recv_sge = cmd.max_recv_sge; 804 773 attr.cap.max_inline_data = cmd.max_inline_data; 805 774 806 - uobj->user_handle = cmd.user_handle; 807 - uobj->context = file->ucontext; 775 + uobj->uobject.user_handle = cmd.user_handle; 776 + uobj->uobject.context = file->ucontext; 777 + uobj->events_reported = 0; 778 + INIT_LIST_HEAD(&uobj->event_list); 808 779 809 780 qp = pd->device->create_qp(pd, &attr, &udata); 810 781 if (IS_ERR(qp)) { ··· 819 786 qp->send_cq = attr.send_cq; 820 787 qp->recv_cq = attr.recv_cq; 821 788 qp->srq = attr.srq; 822 - qp->uobject = uobj; 789 + qp->uobject = &uobj->uobject; 823 790 qp->event_handler = attr.event_handler; 824 791 qp->qp_context = attr.qp_context; 825 792 qp->qp_type = attr.qp_type; ··· 838 805 goto err_destroy; 839 806 } 840 807 841 - ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->id); 808 + ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 842 809 843 810 if (ret == -EAGAIN) 844 811 goto retry; 845 812 if (ret) 846 813 goto err_destroy; 847 814 848 - resp.qp_handle = uobj->id; 815 + resp.qp_handle = uobj->uobject.id; 849 816 850 817 spin_lock_irq(&file->ucontext->lock); 851 - list_add_tail(&uobj->list, &file->ucontext->qp_list); 818 + list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 852 819 spin_unlock_irq(&file->ucontext->lock); 853 820 854 821 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 863 830 864 831 err_list: 865 832 spin_lock_irq(&file->ucontext->lock); 866 - list_del(&uobj->list); 833 + list_del(&uobj->uobject.list); 867 834 spin_unlock_irq(&file->ucontext->lock); 868 835 869 836 err_destroy: ··· 963 930 const char __user *buf, int in_len, 964 931 int out_len) 965 932 { 966 - struct ib_uverbs_destroy_qp cmd; 967 - struct ib_qp *qp; 968 - struct ib_uobject *uobj; 969 - int ret = -EINVAL; 933 + struct ib_uverbs_destroy_qp cmd; 934 + struct ib_uverbs_destroy_qp_resp resp; 935 + struct ib_qp *qp; 936 + struct ib_uevent_object *uobj; 937 + struct ib_uverbs_event *evt, *tmp; 938 + int ret = -EINVAL; 970 939 971 940 if (copy_from_user(&cmd, buf, sizeof cmd)) 972 941 return -EFAULT; 942 + 943 + memset(&resp, 0, sizeof resp); 973 944 974 945 down(&ib_uverbs_idr_mutex); 975 946 ··· 981 944 if (!qp || qp->uobject->context != file->ucontext) 982 945 goto out; 983 946 984 - uobj = qp->uobject; 947 + uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 985 948 986 949 ret = ib_destroy_qp(qp); 987 950 if (ret) ··· 990 953 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 991 954 992 955 spin_lock_irq(&file->ucontext->lock); 993 - list_del(&uobj->list); 956 + list_del(&uobj->uobject.list); 994 957 spin_unlock_irq(&file->ucontext->lock); 995 958 959 + spin_lock_irq(&file->async_file.lock); 960 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 961 + list_del(&evt->list); 962 + kfree(evt); 963 + } 964 + spin_unlock_irq(&file->async_file.lock); 965 + 966 + resp.events_reported = uobj->events_reported; 967 + 996 968 kfree(uobj); 969 + 970 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 971 + &resp, sizeof resp)) 972 + ret = -EFAULT; 997 973 998 974 out: 999 975 up(&ib_uverbs_idr_mutex); ··· 1065 1015 struct ib_uverbs_create_srq cmd; 1066 1016 struct ib_uverbs_create_srq_resp resp; 1067 1017 struct ib_udata udata; 1068 - struct ib_uobject *uobj; 1018 + struct ib_uevent_object *uobj; 1069 1019 struct ib_pd *pd; 1070 1020 struct ib_srq *srq; 1071 1021 struct ib_srq_init_attr attr; ··· 1100 1050 attr.attr.max_sge = cmd.max_sge; 1101 1051 attr.attr.srq_limit = cmd.srq_limit; 1102 1052 1103 - uobj->user_handle = cmd.user_handle; 1104 - uobj->context = file->ucontext; 1053 + uobj->uobject.user_handle = cmd.user_handle; 1054 + uobj->uobject.context = file->ucontext; 1055 + uobj->events_reported = 0; 1056 + INIT_LIST_HEAD(&uobj->event_list); 1105 1057 1106 1058 srq = pd->device->create_srq(pd, &attr, &udata); 1107 1059 if (IS_ERR(srq)) { ··· 1113 1061 1114 1062 srq->device = pd->device; 1115 1063 srq->pd = pd; 1116 - srq->uobject = uobj; 1064 + srq->uobject = &uobj->uobject; 1117 1065 srq->event_handler = attr.event_handler; 1118 1066 srq->srq_context = attr.srq_context; 1119 1067 atomic_inc(&pd->usecnt); ··· 1127 1075 goto err_destroy; 1128 1076 } 1129 1077 1130 - ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); 1078 + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id); 1131 1079 1132 1080 if (ret == -EAGAIN) 1133 1081 goto retry; 1134 1082 if (ret) 1135 1083 goto err_destroy; 1136 1084 1137 - resp.srq_handle = uobj->id; 1085 + resp.srq_handle = uobj->uobject.id; 1138 1086 1139 1087 spin_lock_irq(&file->ucontext->lock); 1140 - list_add_tail(&uobj->list, &file->ucontext->srq_list); 1088 + list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1141 1089 spin_unlock_irq(&file->ucontext->lock); 1142 1090 1143 1091 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 1152 1100 1153 1101 err_list: 1154 1102 spin_lock_irq(&file->ucontext->lock); 1155 - list_del(&uobj->list); 1103 + list_del(&uobj->uobject.list); 1156 1104 spin_unlock_irq(&file->ucontext->lock); 1157 1105 1158 1106 err_destroy: ··· 1201 1149 const char __user *buf, int in_len, 1202 1150 int out_len) 1203 1151 { 1204 - struct ib_uverbs_destroy_srq cmd; 1205 - struct ib_srq *srq; 1206 - struct ib_uobject *uobj; 1207 - int ret = -EINVAL; 1152 + struct ib_uverbs_destroy_srq cmd; 1153 + struct ib_uverbs_destroy_srq_resp resp; 1154 + struct ib_srq *srq; 1155 + struct ib_uevent_object *uobj; 1156 + struct ib_uverbs_event *evt, *tmp; 1157 + int ret = -EINVAL; 1208 1158 1209 1159 if (copy_from_user(&cmd, buf, sizeof cmd)) 1210 1160 return -EFAULT; 1211 1161 1212 1162 down(&ib_uverbs_idr_mutex); 1213 1163 1164 + memset(&resp, 0, sizeof resp); 1165 + 1214 1166 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1215 1167 if (!srq || srq->uobject->context != file->ucontext) 1216 1168 goto out; 1217 1169 1218 - uobj = srq->uobject; 1170 + uobj = container_of(srq->uobject, struct ib_uevent_object, uobject); 1219 1171 1220 1172 ret = ib_destroy_srq(srq); 1221 1173 if (ret) ··· 1228 1172 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1229 1173 1230 1174 spin_lock_irq(&file->ucontext->lock); 1231 - list_del(&uobj->list); 1175 + list_del(&uobj->uobject.list); 1232 1176 spin_unlock_irq(&file->ucontext->lock); 1233 1177 1178 + spin_lock_irq(&file->async_file.lock); 1179 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1180 + list_del(&evt->list); 1181 + kfree(evt); 1182 + } 1183 + spin_unlock_irq(&file->async_file.lock); 1184 + 1185 + resp.events_reported = uobj->events_reported; 1186 + 1234 1187 kfree(uobj); 1188 + 1189 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 1190 + &resp, sizeof resp)) 1191 + ret = -EFAULT; 1235 1192 1236 1193 out: 1237 1194 up(&ib_uverbs_idr_mutex);
+62 -36
drivers/infiniband/core/uverbs_main.c
··· 120 120 idr_remove(&ib_uverbs_qp_idr, uobj->id); 121 121 ib_destroy_qp(qp); 122 122 list_del(&uobj->list); 123 - kfree(uobj); 123 + kfree(container_of(uobj, struct ib_uevent_object, uobject)); 124 124 } 125 125 126 126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { ··· 128 128 idr_remove(&ib_uverbs_cq_idr, uobj->id); 129 129 ib_destroy_cq(cq); 130 130 list_del(&uobj->list); 131 - kfree(uobj); 131 + kfree(container_of(uobj, struct ib_ucq_object, uobject)); 132 132 } 133 133 134 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { ··· 136 136 idr_remove(&ib_uverbs_srq_idr, uobj->id); 137 137 ib_destroy_srq(srq); 138 138 list_del(&uobj->list); 139 - kfree(uobj); 139 + kfree(container_of(uobj, struct ib_uevent_object, uobject)); 140 140 } 141 141 142 142 /* XXX Free MWs */ ··· 182 182 size_t count, loff_t *pos) 183 183 { 184 184 struct ib_uverbs_event_file *file = filp->private_data; 185 - void *event; 185 + struct ib_uverbs_event *event; 186 186 int eventsz; 187 187 int ret = 0; 188 188 ··· 207 207 return -ENODEV; 208 208 } 209 209 210 - if (file->is_async) { 211 - event = list_entry(file->event_list.next, 212 - struct ib_uverbs_async_event, list); 210 + event = list_entry(file->event_list.next, struct ib_uverbs_event, list); 211 + 212 + if (file->is_async) 213 213 eventsz = sizeof (struct ib_uverbs_async_event_desc); 214 - } else { 215 - event = list_entry(file->event_list.next, 216 - struct ib_uverbs_comp_event, list); 214 + else 217 215 eventsz = sizeof (struct ib_uverbs_comp_event_desc); 218 - } 219 216 220 217 if (eventsz > count) { 221 218 ret = -EINVAL; 222 219 event = NULL; 223 - } else 220 + } else { 224 221 list_del(file->event_list.next); 222 + if (event->counter) { 223 + ++(*event->counter); 224 + list_del(&event->obj_list); 225 + } 226 + } 225 227 226 228 spin_unlock_irq(&file->lock); 227 229 ··· 259 257 260 258 static void ib_uverbs_event_release(struct ib_uverbs_event_file *file) 261 259 { 262 - struct list_head *entry, *tmp; 260 + struct ib_uverbs_event *entry, *tmp; 263 261 264 262 spin_lock_irq(&file->lock); 265 263 if (file->fd != -1) { 266 264 file->fd = -1; 267 - list_for_each_safe(entry, tmp, &file->event_list) 268 - if (file->is_async) 269 - kfree(list_entry(entry, struct ib_uverbs_async_event, list)); 270 - else 271 - kfree(list_entry(entry, struct ib_uverbs_comp_event, list)); 265 + list_for_each_entry_safe(entry, tmp, &file->event_list, list) 266 + kfree(entry); 272 267 } 273 268 spin_unlock_irq(&file->lock); 274 269 } ··· 303 304 304 305 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 305 306 { 306 - struct ib_uverbs_file *file = cq_context; 307 - struct ib_uverbs_comp_event *entry; 308 - unsigned long flags; 307 + struct ib_uverbs_file *file = cq_context; 308 + struct ib_ucq_object *uobj; 309 + struct ib_uverbs_event *entry; 310 + unsigned long flags; 309 311 310 312 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 311 313 if (!entry) 312 314 return; 313 315 314 - entry->desc.cq_handle = cq->uobject->user_handle; 316 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 317 + 318 + entry->desc.comp.cq_handle = cq->uobject->user_handle; 319 + entry->counter = &uobj->comp_events_reported; 315 320 316 321 spin_lock_irqsave(&file->comp_file[0].lock, flags); 317 322 list_add_tail(&entry->list, &file->comp_file[0].event_list); 323 + list_add_tail(&entry->obj_list, &uobj->comp_list); 318 324 spin_unlock_irqrestore(&file->comp_file[0].lock, flags); 319 325 320 326 wake_up_interruptible(&file->comp_file[0].poll_wait); ··· 327 323 } 328 324 329 325 static void ib_uverbs_async_handler(struct ib_uverbs_file *file, 330 - __u64 element, __u64 event) 326 + __u64 element, __u64 event, 327 + struct list_head *obj_list, 328 + u32 *counter) 331 329 { 332 - struct ib_uverbs_async_event *entry; 330 + struct ib_uverbs_event *entry; 333 331 unsigned long flags; 334 332 335 333 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 336 334 if (!entry) 337 335 return; 338 336 339 - entry->desc.element = element; 340 - entry->desc.event_type = event; 337 + entry->desc.async.element = element; 338 + entry->desc.async.event_type = event; 339 + entry->counter = counter; 341 340 342 341 spin_lock_irqsave(&file->async_file.lock, flags); 343 342 list_add_tail(&entry->list, &file->async_file.event_list); 343 + if (obj_list) 344 + list_add_tail(&entry->obj_list, obj_list); 344 345 spin_unlock_irqrestore(&file->async_file.lock, flags); 345 346 346 347 wake_up_interruptible(&file->async_file.poll_wait); ··· 354 345 355 346 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 356 347 { 357 - ib_uverbs_async_handler(context_ptr, 358 - event->element.cq->uobject->user_handle, 359 - event->event); 348 + struct ib_ucq_object *uobj; 349 + 350 + uobj = container_of(event->element.cq->uobject, 351 + struct ib_ucq_object, uobject); 352 + 353 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 354 + event->event, &uobj->async_list, 355 + &uobj->async_events_reported); 356 + 360 357 } 361 358 362 359 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) 363 360 { 364 - ib_uverbs_async_handler(context_ptr, 365 - event->element.qp->uobject->user_handle, 366 - event->event); 361 + struct ib_uevent_object *uobj; 362 + 363 + uobj = container_of(event->element.qp->uobject, 364 + struct ib_uevent_object, uobject); 365 + 366 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 367 + event->event, &uobj->event_list, 368 + &uobj->events_reported); 367 369 } 368 370 369 371 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 370 372 { 371 - ib_uverbs_async_handler(context_ptr, 372 - event->element.srq->uobject->user_handle, 373 - event->event); 373 + struct ib_uevent_object *uobj; 374 + 375 + uobj = container_of(event->element.srq->uobject, 376 + struct ib_uevent_object, uobject); 377 + 378 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 379 + event->event, &uobj->event_list, 380 + &uobj->events_reported); 374 381 } 375 382 376 383 static void ib_uverbs_event_handler(struct ib_event_handler *handler, ··· 395 370 struct ib_uverbs_file *file = 396 371 container_of(handler, struct ib_uverbs_file, event_handler); 397 372 398 - ib_uverbs_async_handler(file, event->element.port_num, event->event); 373 + ib_uverbs_async_handler(file, event->element.port_num, event->event, 374 + NULL, NULL); 399 375 } 400 376 401 377 static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,
+20 -1
include/rdma/ib_user_verbs.h
··· 42 42 * Increment this value if any changes that break userspace ABI 43 43 * compatibility are made. 44 44 */ 45 - #define IB_USER_VERBS_ABI_VERSION 1 45 + #define IB_USER_VERBS_ABI_VERSION 2 46 46 47 47 enum { 48 48 IB_USER_VERBS_CMD_QUERY_PARAMS, ··· 292 292 }; 293 293 294 294 struct ib_uverbs_destroy_cq { 295 + __u64 response; 295 296 __u32 cq_handle; 297 + __u32 reserved; 298 + }; 299 + 300 + struct ib_uverbs_destroy_cq_resp { 301 + __u32 comp_events_reported; 302 + __u32 async_events_reported; 296 303 }; 297 304 298 305 struct ib_uverbs_create_qp { ··· 379 372 }; 380 373 381 374 struct ib_uverbs_destroy_qp { 375 + __u64 response; 382 376 __u32 qp_handle; 377 + __u32 reserved; 378 + }; 379 + 380 + struct ib_uverbs_destroy_qp_resp { 381 + __u32 events_reported; 383 382 }; 384 383 385 384 struct ib_uverbs_attach_mcast { ··· 429 416 }; 430 417 431 418 struct ib_uverbs_destroy_srq { 419 + __u64 response; 432 420 __u32 srq_handle; 421 + __u32 reserved; 422 + }; 423 + 424 + struct ib_uverbs_destroy_srq_resp { 425 + __u32 events_reported; 433 426 }; 434 427 435 428 #endif /* IB_USER_VERBS_H */