Make sure that userspace does not retrieve stale asynchronous or completion events after destroying a CQ, QP or SRQ. We do this by sweeping the event lists before returning from a destroy calls, and then return the number of events already reported before the destroy call. This allows userspace wait until it has processed all events for an object returned from the kernel before it frees its context for the object.

The ABI of the destroy CQ, destroy QP and destroy SRQ commands has to
change to return the event count, so bump the ABI version from 1 to 2.
The userspace libibverbs library has already been updated to handle
both the old and new ABI versions.

Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Roland Dreier and committed by
Roland Dreier
63aaf647 2e9f7cb7

+211 -89
+17 -9
drivers/infiniband/core/uverbs.h
··· 76 struct ib_uverbs_event_file comp_file[1]; 77 }; 78 79 - struct ib_uverbs_async_event { 80 - struct ib_uverbs_async_event_desc desc; 81 struct list_head list; 82 }; 83 84 - struct ib_uverbs_comp_event { 85 - struct ib_uverbs_comp_event_desc desc; 86 - struct list_head list; 87 }; 88 89 - struct ib_uobject_mr { 90 - struct ib_uobject uobj; 91 - struct page *page_list; 92 - struct scatterlist *sg_list; 93 }; 94 95 extern struct semaphore ib_uverbs_idr_mutex;
··· 76 struct ib_uverbs_event_file comp_file[1]; 77 }; 78 79 + struct ib_uverbs_event { 80 + union { 81 + struct ib_uverbs_async_event_desc async; 82 + struct ib_uverbs_comp_event_desc comp; 83 + } desc; 84 struct list_head list; 85 + struct list_head obj_list; 86 + u32 *counter; 87 }; 88 89 + struct ib_uevent_object { 90 + struct ib_uobject uobject; 91 + struct list_head event_list; 92 + u32 events_reported; 93 }; 94 95 + struct ib_ucq_object { 96 + struct ib_uobject uobject; 97 + struct list_head comp_list; 98 + struct list_head async_list; 99 + u32 comp_events_reported; 100 + u32 async_events_reported; 101 }; 102 103 extern struct semaphore ib_uverbs_idr_mutex;
+112 -43
drivers/infiniband/core/uverbs_cmd.c
··· 590 struct ib_uverbs_create_cq cmd; 591 struct ib_uverbs_create_cq_resp resp; 592 struct ib_udata udata; 593 - struct ib_uobject *uobj; 594 struct ib_cq *cq; 595 int ret; 596 ··· 611 if (!uobj) 612 return -ENOMEM; 613 614 - uobj->user_handle = cmd.user_handle; 615 - uobj->context = file->ucontext; 616 617 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 618 file->ucontext, &udata); ··· 626 } 627 628 cq->device = file->device->ib_dev; 629 - cq->uobject = uobj; 630 cq->comp_handler = ib_uverbs_comp_handler; 631 cq->event_handler = ib_uverbs_cq_event_handler; 632 cq->cq_context = file; ··· 639 } 640 641 down(&ib_uverbs_idr_mutex); 642 - ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->id); 643 up(&ib_uverbs_idr_mutex); 644 645 if (ret == -EAGAIN) ··· 648 goto err_cq; 649 650 spin_lock_irq(&file->ucontext->lock); 651 - list_add_tail(&uobj->list, &file->ucontext->cq_list); 652 spin_unlock_irq(&file->ucontext->lock); 653 654 memset(&resp, 0, sizeof resp); 655 - resp.cq_handle = uobj->id; 656 resp.cqe = cq->cqe; 657 658 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 665 666 err_list: 667 spin_lock_irq(&file->ucontext->lock); 668 - list_del(&uobj->list); 669 spin_unlock_irq(&file->ucontext->lock); 670 671 down(&ib_uverbs_idr_mutex); 672 - idr_remove(&ib_uverbs_cq_idr, uobj->id); 673 up(&ib_uverbs_idr_mutex); 674 675 err_cq: ··· 684 const char __user *buf, int in_len, 685 int out_len) 686 { 687 - struct ib_uverbs_destroy_cq cmd; 688 - struct ib_cq *cq; 689 - struct ib_uobject *uobj; 690 - int ret = -EINVAL; 691 692 if (copy_from_user(&cmd, buf, sizeof cmd)) 693 return -EFAULT; 694 695 down(&ib_uverbs_idr_mutex); 696 ··· 703 if (!cq || cq->uobject->context != file->ucontext) 704 goto out; 705 706 - uobj = cq->uobject; 707 708 ret = ib_destroy_cq(cq); 709 if (ret) ··· 713 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 714 715 spin_lock_irq(&file->ucontext->lock); 716 - list_del(&uobj->list); 717 spin_unlock_irq(&file->ucontext->lock); 718 719 kfree(uobj); 720 721 out: 722 up(&ib_uverbs_idr_mutex); ··· 752 struct ib_uverbs_create_qp cmd; 753 struct ib_uverbs_create_qp_resp resp; 754 struct ib_udata udata; 755 - struct ib_uobject *uobj; 756 struct ib_pd *pd; 757 struct ib_cq *scq, *rcq; 758 struct ib_srq *srq; ··· 803 attr.cap.max_recv_sge = cmd.max_recv_sge; 804 attr.cap.max_inline_data = cmd.max_inline_data; 805 806 - uobj->user_handle = cmd.user_handle; 807 - uobj->context = file->ucontext; 808 809 qp = pd->device->create_qp(pd, &attr, &udata); 810 if (IS_ERR(qp)) { ··· 819 qp->send_cq = attr.send_cq; 820 qp->recv_cq = attr.recv_cq; 821 qp->srq = attr.srq; 822 - qp->uobject = uobj; 823 qp->event_handler = attr.event_handler; 824 qp->qp_context = attr.qp_context; 825 qp->qp_type = attr.qp_type; ··· 838 goto err_destroy; 839 } 840 841 - ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->id); 842 843 if (ret == -EAGAIN) 844 goto retry; 845 if (ret) 846 goto err_destroy; 847 848 - resp.qp_handle = uobj->id; 849 850 spin_lock_irq(&file->ucontext->lock); 851 - list_add_tail(&uobj->list, &file->ucontext->qp_list); 852 spin_unlock_irq(&file->ucontext->lock); 853 854 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 863 864 err_list: 865 spin_lock_irq(&file->ucontext->lock); 866 - list_del(&uobj->list); 867 spin_unlock_irq(&file->ucontext->lock); 868 869 err_destroy: ··· 963 const char __user *buf, int in_len, 964 int out_len) 965 { 966 - struct ib_uverbs_destroy_qp cmd; 967 - struct ib_qp *qp; 968 - struct ib_uobject *uobj; 969 - int ret = -EINVAL; 970 971 if (copy_from_user(&cmd, buf, sizeof cmd)) 972 return -EFAULT; 973 974 down(&ib_uverbs_idr_mutex); 975 ··· 981 if (!qp || qp->uobject->context != file->ucontext) 982 goto out; 983 984 - uobj = qp->uobject; 985 986 ret = ib_destroy_qp(qp); 987 if (ret) ··· 990 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 991 992 spin_lock_irq(&file->ucontext->lock); 993 - list_del(&uobj->list); 994 spin_unlock_irq(&file->ucontext->lock); 995 996 kfree(uobj); 997 998 out: 999 up(&ib_uverbs_idr_mutex); ··· 1065 struct ib_uverbs_create_srq cmd; 1066 struct ib_uverbs_create_srq_resp resp; 1067 struct ib_udata udata; 1068 - struct ib_uobject *uobj; 1069 struct ib_pd *pd; 1070 struct ib_srq *srq; 1071 struct ib_srq_init_attr attr; ··· 1100 attr.attr.max_sge = cmd.max_sge; 1101 attr.attr.srq_limit = cmd.srq_limit; 1102 1103 - uobj->user_handle = cmd.user_handle; 1104 - uobj->context = file->ucontext; 1105 1106 srq = pd->device->create_srq(pd, &attr, &udata); 1107 if (IS_ERR(srq)) { ··· 1113 1114 srq->device = pd->device; 1115 srq->pd = pd; 1116 - srq->uobject = uobj; 1117 srq->event_handler = attr.event_handler; 1118 srq->srq_context = attr.srq_context; 1119 atomic_inc(&pd->usecnt); ··· 1127 goto err_destroy; 1128 } 1129 1130 - ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->id); 1131 1132 if (ret == -EAGAIN) 1133 goto retry; 1134 if (ret) 1135 goto err_destroy; 1136 1137 - resp.srq_handle = uobj->id; 1138 1139 spin_lock_irq(&file->ucontext->lock); 1140 - list_add_tail(&uobj->list, &file->ucontext->srq_list); 1141 spin_unlock_irq(&file->ucontext->lock); 1142 1143 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 1152 1153 err_list: 1154 spin_lock_irq(&file->ucontext->lock); 1155 - list_del(&uobj->list); 1156 spin_unlock_irq(&file->ucontext->lock); 1157 1158 err_destroy: ··· 1201 const char __user *buf, int in_len, 1202 int out_len) 1203 { 1204 - struct ib_uverbs_destroy_srq cmd; 1205 - struct ib_srq *srq; 1206 - struct ib_uobject *uobj; 1207 - int ret = -EINVAL; 1208 1209 if (copy_from_user(&cmd, buf, sizeof cmd)) 1210 return -EFAULT; 1211 1212 down(&ib_uverbs_idr_mutex); 1213 1214 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1215 if (!srq || srq->uobject->context != file->ucontext) 1216 goto out; 1217 1218 - uobj = srq->uobject; 1219 1220 ret = ib_destroy_srq(srq); 1221 if (ret) ··· 1228 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1229 1230 spin_lock_irq(&file->ucontext->lock); 1231 - list_del(&uobj->list); 1232 spin_unlock_irq(&file->ucontext->lock); 1233 1234 kfree(uobj); 1235 1236 out: 1237 up(&ib_uverbs_idr_mutex);
··· 590 struct ib_uverbs_create_cq cmd; 591 struct ib_uverbs_create_cq_resp resp; 592 struct ib_udata udata; 593 + struct ib_ucq_object *uobj; 594 struct ib_cq *cq; 595 int ret; 596 ··· 611 if (!uobj) 612 return -ENOMEM; 613 614 + uobj->uobject.user_handle = cmd.user_handle; 615 + uobj->uobject.context = file->ucontext; 616 + uobj->comp_events_reported = 0; 617 + uobj->async_events_reported = 0; 618 + INIT_LIST_HEAD(&uobj->comp_list); 619 + INIT_LIST_HEAD(&uobj->async_list); 620 621 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe, 622 file->ucontext, &udata); ··· 622 } 623 624 cq->device = file->device->ib_dev; 625 + cq->uobject = &uobj->uobject; 626 cq->comp_handler = ib_uverbs_comp_handler; 627 cq->event_handler = ib_uverbs_cq_event_handler; 628 cq->cq_context = file; ··· 635 } 636 637 down(&ib_uverbs_idr_mutex); 638 + ret = idr_get_new(&ib_uverbs_cq_idr, cq, &uobj->uobject.id); 639 up(&ib_uverbs_idr_mutex); 640 641 if (ret == -EAGAIN) ··· 644 goto err_cq; 645 646 spin_lock_irq(&file->ucontext->lock); 647 + list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list); 648 spin_unlock_irq(&file->ucontext->lock); 649 650 memset(&resp, 0, sizeof resp); 651 + resp.cq_handle = uobj->uobject.id; 652 resp.cqe = cq->cqe; 653 654 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 661 662 err_list: 663 spin_lock_irq(&file->ucontext->lock); 664 + list_del(&uobj->uobject.list); 665 spin_unlock_irq(&file->ucontext->lock); 666 667 down(&ib_uverbs_idr_mutex); 668 + idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id); 669 up(&ib_uverbs_idr_mutex); 670 671 err_cq: ··· 680 const char __user *buf, int in_len, 681 int out_len) 682 { 683 + struct ib_uverbs_destroy_cq cmd; 684 + struct ib_uverbs_destroy_cq_resp resp; 685 + struct ib_cq *cq; 686 + struct ib_ucq_object *uobj; 687 + struct ib_uverbs_event *evt, *tmp; 688 + u64 user_handle; 689 + int ret = -EINVAL; 690 691 if (copy_from_user(&cmd, buf, sizeof cmd)) 692 return -EFAULT; 693 + 694 + memset(&resp, 0, sizeof resp); 695 696 down(&ib_uverbs_idr_mutex); 697 ··· 694 if (!cq || cq->uobject->context != file->ucontext) 695 goto out; 696 697 + user_handle = cq->uobject->user_handle; 698 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 699 700 ret = ib_destroy_cq(cq); 701 if (ret) ··· 703 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle); 704 705 spin_lock_irq(&file->ucontext->lock); 706 + list_del(&uobj->uobject.list); 707 spin_unlock_irq(&file->ucontext->lock); 708 709 + spin_lock_irq(&file->comp_file[0].lock); 710 + list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) { 711 + list_del(&evt->list); 712 + kfree(evt); 713 + } 714 + spin_unlock_irq(&file->comp_file[0].lock); 715 + 716 + spin_lock_irq(&file->async_file.lock); 717 + list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) { 718 + list_del(&evt->list); 719 + kfree(evt); 720 + } 721 + spin_unlock_irq(&file->async_file.lock); 722 + 723 + resp.comp_events_reported = uobj->comp_events_reported; 724 + resp.async_events_reported = uobj->async_events_reported; 725 + 726 kfree(uobj); 727 + 728 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 729 + &resp, sizeof resp)) 730 + ret = -EFAULT; 731 732 out: 733 up(&ib_uverbs_idr_mutex); ··· 721 struct ib_uverbs_create_qp cmd; 722 struct ib_uverbs_create_qp_resp resp; 723 struct ib_udata udata; 724 + struct ib_uevent_object *uobj; 725 struct ib_pd *pd; 726 struct ib_cq *scq, *rcq; 727 struct ib_srq *srq; ··· 772 attr.cap.max_recv_sge = cmd.max_recv_sge; 773 attr.cap.max_inline_data = cmd.max_inline_data; 774 775 + uobj->uobject.user_handle = cmd.user_handle; 776 + uobj->uobject.context = file->ucontext; 777 + uobj->events_reported = 0; 778 + INIT_LIST_HEAD(&uobj->event_list); 779 780 qp = pd->device->create_qp(pd, &attr, &udata); 781 if (IS_ERR(qp)) { ··· 786 qp->send_cq = attr.send_cq; 787 qp->recv_cq = attr.recv_cq; 788 qp->srq = attr.srq; 789 + qp->uobject = &uobj->uobject; 790 qp->event_handler = attr.event_handler; 791 qp->qp_context = attr.qp_context; 792 qp->qp_type = attr.qp_type; ··· 805 goto err_destroy; 806 } 807 808 + ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 809 810 if (ret == -EAGAIN) 811 goto retry; 812 if (ret) 813 goto err_destroy; 814 815 + resp.qp_handle = uobj->uobject.id; 816 817 spin_lock_irq(&file->ucontext->lock); 818 + list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 819 spin_unlock_irq(&file->ucontext->lock); 820 821 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 830 831 err_list: 832 spin_lock_irq(&file->ucontext->lock); 833 + list_del(&uobj->uobject.list); 834 spin_unlock_irq(&file->ucontext->lock); 835 836 err_destroy: ··· 930 const char __user *buf, int in_len, 931 int out_len) 932 { 933 + struct ib_uverbs_destroy_qp cmd; 934 + struct ib_uverbs_destroy_qp_resp resp; 935 + struct ib_qp *qp; 936 + struct ib_uevent_object *uobj; 937 + struct ib_uverbs_event *evt, *tmp; 938 + int ret = -EINVAL; 939 940 if (copy_from_user(&cmd, buf, sizeof cmd)) 941 return -EFAULT; 942 + 943 + memset(&resp, 0, sizeof resp); 944 945 down(&ib_uverbs_idr_mutex); 946 ··· 944 if (!qp || qp->uobject->context != file->ucontext) 945 goto out; 946 947 + uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 948 949 ret = ib_destroy_qp(qp); 950 if (ret) ··· 953 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 954 955 spin_lock_irq(&file->ucontext->lock); 956 + list_del(&uobj->uobject.list); 957 spin_unlock_irq(&file->ucontext->lock); 958 959 + spin_lock_irq(&file->async_file.lock); 960 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 961 + list_del(&evt->list); 962 + kfree(evt); 963 + } 964 + spin_unlock_irq(&file->async_file.lock); 965 + 966 + resp.events_reported = uobj->events_reported; 967 + 968 kfree(uobj); 969 + 970 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 971 + &resp, sizeof resp)) 972 + ret = -EFAULT; 973 974 out: 975 up(&ib_uverbs_idr_mutex); ··· 1015 struct ib_uverbs_create_srq cmd; 1016 struct ib_uverbs_create_srq_resp resp; 1017 struct ib_udata udata; 1018 + struct ib_uevent_object *uobj; 1019 struct ib_pd *pd; 1020 struct ib_srq *srq; 1021 struct ib_srq_init_attr attr; ··· 1050 attr.attr.max_sge = cmd.max_sge; 1051 attr.attr.srq_limit = cmd.srq_limit; 1052 1053 + uobj->uobject.user_handle = cmd.user_handle; 1054 + uobj->uobject.context = file->ucontext; 1055 + uobj->events_reported = 0; 1056 + INIT_LIST_HEAD(&uobj->event_list); 1057 1058 srq = pd->device->create_srq(pd, &attr, &udata); 1059 if (IS_ERR(srq)) { ··· 1061 1062 srq->device = pd->device; 1063 srq->pd = pd; 1064 + srq->uobject = &uobj->uobject; 1065 srq->event_handler = attr.event_handler; 1066 srq->srq_context = attr.srq_context; 1067 atomic_inc(&pd->usecnt); ··· 1075 goto err_destroy; 1076 } 1077 1078 + ret = idr_get_new(&ib_uverbs_srq_idr, srq, &uobj->uobject.id); 1079 1080 if (ret == -EAGAIN) 1081 goto retry; 1082 if (ret) 1083 goto err_destroy; 1084 1085 + resp.srq_handle = uobj->uobject.id; 1086 1087 spin_lock_irq(&file->ucontext->lock); 1088 + list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list); 1089 spin_unlock_irq(&file->ucontext->lock); 1090 1091 if (copy_to_user((void __user *) (unsigned long) cmd.response, ··· 1100 1101 err_list: 1102 spin_lock_irq(&file->ucontext->lock); 1103 + list_del(&uobj->uobject.list); 1104 spin_unlock_irq(&file->ucontext->lock); 1105 1106 err_destroy: ··· 1149 const char __user *buf, int in_len, 1150 int out_len) 1151 { 1152 + struct ib_uverbs_destroy_srq cmd; 1153 + struct ib_uverbs_destroy_srq_resp resp; 1154 + struct ib_srq *srq; 1155 + struct ib_uevent_object *uobj; 1156 + struct ib_uverbs_event *evt, *tmp; 1157 + int ret = -EINVAL; 1158 1159 if (copy_from_user(&cmd, buf, sizeof cmd)) 1160 return -EFAULT; 1161 1162 down(&ib_uverbs_idr_mutex); 1163 1164 + memset(&resp, 0, sizeof resp); 1165 + 1166 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle); 1167 if (!srq || srq->uobject->context != file->ucontext) 1168 goto out; 1169 1170 + uobj = container_of(srq->uobject, struct ib_uevent_object, uobject); 1171 1172 ret = ib_destroy_srq(srq); 1173 if (ret) ··· 1172 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle); 1173 1174 spin_lock_irq(&file->ucontext->lock); 1175 + list_del(&uobj->uobject.list); 1176 spin_unlock_irq(&file->ucontext->lock); 1177 1178 + spin_lock_irq(&file->async_file.lock); 1179 + list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) { 1180 + list_del(&evt->list); 1181 + kfree(evt); 1182 + } 1183 + spin_unlock_irq(&file->async_file.lock); 1184 + 1185 + resp.events_reported = uobj->events_reported; 1186 + 1187 kfree(uobj); 1188 + 1189 + if (copy_to_user((void __user *) (unsigned long) cmd.response, 1190 + &resp, sizeof resp)) 1191 + ret = -EFAULT; 1192 1193 out: 1194 up(&ib_uverbs_idr_mutex);
+62 -36
drivers/infiniband/core/uverbs_main.c
··· 120 idr_remove(&ib_uverbs_qp_idr, uobj->id); 121 ib_destroy_qp(qp); 122 list_del(&uobj->list); 123 - kfree(uobj); 124 } 125 126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { ··· 128 idr_remove(&ib_uverbs_cq_idr, uobj->id); 129 ib_destroy_cq(cq); 130 list_del(&uobj->list); 131 - kfree(uobj); 132 } 133 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { ··· 136 idr_remove(&ib_uverbs_srq_idr, uobj->id); 137 ib_destroy_srq(srq); 138 list_del(&uobj->list); 139 - kfree(uobj); 140 } 141 142 /* XXX Free MWs */ ··· 182 size_t count, loff_t *pos) 183 { 184 struct ib_uverbs_event_file *file = filp->private_data; 185 - void *event; 186 int eventsz; 187 int ret = 0; 188 ··· 207 return -ENODEV; 208 } 209 210 - if (file->is_async) { 211 - event = list_entry(file->event_list.next, 212 - struct ib_uverbs_async_event, list); 213 eventsz = sizeof (struct ib_uverbs_async_event_desc); 214 - } else { 215 - event = list_entry(file->event_list.next, 216 - struct ib_uverbs_comp_event, list); 217 eventsz = sizeof (struct ib_uverbs_comp_event_desc); 218 - } 219 220 if (eventsz > count) { 221 ret = -EINVAL; 222 event = NULL; 223 - } else 224 list_del(file->event_list.next); 225 226 spin_unlock_irq(&file->lock); 227 ··· 259 260 static void ib_uverbs_event_release(struct ib_uverbs_event_file *file) 261 { 262 - struct list_head *entry, *tmp; 263 264 spin_lock_irq(&file->lock); 265 if (file->fd != -1) { 266 file->fd = -1; 267 - list_for_each_safe(entry, tmp, &file->event_list) 268 - if (file->is_async) 269 - kfree(list_entry(entry, struct ib_uverbs_async_event, list)); 270 - else 271 - kfree(list_entry(entry, struct ib_uverbs_comp_event, list)); 272 } 273 spin_unlock_irq(&file->lock); 274 } ··· 303 304 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 305 { 306 - struct ib_uverbs_file *file = cq_context; 307 - struct ib_uverbs_comp_event *entry; 308 - unsigned long flags; 309 310 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 311 if (!entry) 312 return; 313 314 - entry->desc.cq_handle = cq->uobject->user_handle; 315 316 spin_lock_irqsave(&file->comp_file[0].lock, flags); 317 list_add_tail(&entry->list, &file->comp_file[0].event_list); 318 spin_unlock_irqrestore(&file->comp_file[0].lock, flags); 319 320 wake_up_interruptible(&file->comp_file[0].poll_wait); ··· 327 } 328 329 static void ib_uverbs_async_handler(struct ib_uverbs_file *file, 330 - __u64 element, __u64 event) 331 { 332 - struct ib_uverbs_async_event *entry; 333 unsigned long flags; 334 335 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 336 if (!entry) 337 return; 338 339 - entry->desc.element = element; 340 - entry->desc.event_type = event; 341 342 spin_lock_irqsave(&file->async_file.lock, flags); 343 list_add_tail(&entry->list, &file->async_file.event_list); 344 spin_unlock_irqrestore(&file->async_file.lock, flags); 345 346 wake_up_interruptible(&file->async_file.poll_wait); ··· 354 355 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 356 { 357 - ib_uverbs_async_handler(context_ptr, 358 - event->element.cq->uobject->user_handle, 359 - event->event); 360 } 361 362 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) 363 { 364 - ib_uverbs_async_handler(context_ptr, 365 - event->element.qp->uobject->user_handle, 366 - event->event); 367 } 368 369 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 370 { 371 - ib_uverbs_async_handler(context_ptr, 372 - event->element.srq->uobject->user_handle, 373 - event->event); 374 } 375 376 static void ib_uverbs_event_handler(struct ib_event_handler *handler, ··· 395 struct ib_uverbs_file *file = 396 container_of(handler, struct ib_uverbs_file, event_handler); 397 398 - ib_uverbs_async_handler(file, event->element.port_num, event->event); 399 } 400 401 static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,
··· 120 idr_remove(&ib_uverbs_qp_idr, uobj->id); 121 ib_destroy_qp(qp); 122 list_del(&uobj->list); 123 + kfree(container_of(uobj, struct ib_uevent_object, uobject)); 124 } 125 126 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) { ··· 128 idr_remove(&ib_uverbs_cq_idr, uobj->id); 129 ib_destroy_cq(cq); 130 list_del(&uobj->list); 131 + kfree(container_of(uobj, struct ib_ucq_object, uobject)); 132 } 133 134 list_for_each_entry_safe(uobj, tmp, &context->srq_list, list) { ··· 136 idr_remove(&ib_uverbs_srq_idr, uobj->id); 137 ib_destroy_srq(srq); 138 list_del(&uobj->list); 139 + kfree(container_of(uobj, struct ib_uevent_object, uobject)); 140 } 141 142 /* XXX Free MWs */ ··· 182 size_t count, loff_t *pos) 183 { 184 struct ib_uverbs_event_file *file = filp->private_data; 185 + struct ib_uverbs_event *event; 186 int eventsz; 187 int ret = 0; 188 ··· 207 return -ENODEV; 208 } 209 210 + event = list_entry(file->event_list.next, struct ib_uverbs_event, list); 211 + 212 + if (file->is_async) 213 eventsz = sizeof (struct ib_uverbs_async_event_desc); 214 + else 215 eventsz = sizeof (struct ib_uverbs_comp_event_desc); 216 217 if (eventsz > count) { 218 ret = -EINVAL; 219 event = NULL; 220 + } else { 221 list_del(file->event_list.next); 222 + if (event->counter) { 223 + ++(*event->counter); 224 + list_del(&event->obj_list); 225 + } 226 + } 227 228 spin_unlock_irq(&file->lock); 229 ··· 257 258 static void ib_uverbs_event_release(struct ib_uverbs_event_file *file) 259 { 260 + struct ib_uverbs_event *entry, *tmp; 261 262 spin_lock_irq(&file->lock); 263 if (file->fd != -1) { 264 file->fd = -1; 265 + list_for_each_entry_safe(entry, tmp, &file->event_list, list) 266 + kfree(entry); 267 } 268 spin_unlock_irq(&file->lock); 269 } ··· 304 305 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context) 306 { 307 + struct ib_uverbs_file *file = cq_context; 308 + struct ib_ucq_object *uobj; 309 + struct ib_uverbs_event *entry; 310 + unsigned long flags; 311 312 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 313 if (!entry) 314 return; 315 316 + uobj = container_of(cq->uobject, struct ib_ucq_object, uobject); 317 + 318 + entry->desc.comp.cq_handle = cq->uobject->user_handle; 319 + entry->counter = &uobj->comp_events_reported; 320 321 spin_lock_irqsave(&file->comp_file[0].lock, flags); 322 list_add_tail(&entry->list, &file->comp_file[0].event_list); 323 + list_add_tail(&entry->obj_list, &uobj->comp_list); 324 spin_unlock_irqrestore(&file->comp_file[0].lock, flags); 325 326 wake_up_interruptible(&file->comp_file[0].poll_wait); ··· 323 } 324 325 static void ib_uverbs_async_handler(struct ib_uverbs_file *file, 326 + __u64 element, __u64 event, 327 + struct list_head *obj_list, 328 + u32 *counter) 329 { 330 + struct ib_uverbs_event *entry; 331 unsigned long flags; 332 333 entry = kmalloc(sizeof *entry, GFP_ATOMIC); 334 if (!entry) 335 return; 336 337 + entry->desc.async.element = element; 338 + entry->desc.async.event_type = event; 339 + entry->counter = counter; 340 341 spin_lock_irqsave(&file->async_file.lock, flags); 342 list_add_tail(&entry->list, &file->async_file.event_list); 343 + if (obj_list) 344 + list_add_tail(&entry->obj_list, obj_list); 345 spin_unlock_irqrestore(&file->async_file.lock, flags); 346 347 wake_up_interruptible(&file->async_file.poll_wait); ··· 345 346 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) 347 { 348 + struct ib_ucq_object *uobj; 349 + 350 + uobj = container_of(event->element.cq->uobject, 351 + struct ib_ucq_object, uobject); 352 + 353 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 354 + event->event, &uobj->async_list, 355 + &uobj->async_events_reported); 356 + 357 } 358 359 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr) 360 { 361 + struct ib_uevent_object *uobj; 362 + 363 + uobj = container_of(event->element.qp->uobject, 364 + struct ib_uevent_object, uobject); 365 + 366 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 367 + event->event, &uobj->event_list, 368 + &uobj->events_reported); 369 } 370 371 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr) 372 { 373 + struct ib_uevent_object *uobj; 374 + 375 + uobj = container_of(event->element.srq->uobject, 376 + struct ib_uevent_object, uobject); 377 + 378 + ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle, 379 + event->event, &uobj->event_list, 380 + &uobj->events_reported); 381 } 382 383 static void ib_uverbs_event_handler(struct ib_event_handler *handler, ··· 370 struct ib_uverbs_file *file = 371 container_of(handler, struct ib_uverbs_file, event_handler); 372 373 + ib_uverbs_async_handler(file, event->element.port_num, event->event, 374 + NULL, NULL); 375 } 376 377 static int ib_uverbs_event_init(struct ib_uverbs_event_file *file,
+20 -1
include/rdma/ib_user_verbs.h
··· 42 * Increment this value if any changes that break userspace ABI 43 * compatibility are made. 44 */ 45 - #define IB_USER_VERBS_ABI_VERSION 1 46 47 enum { 48 IB_USER_VERBS_CMD_QUERY_PARAMS, ··· 292 }; 293 294 struct ib_uverbs_destroy_cq { 295 __u32 cq_handle; 296 }; 297 298 struct ib_uverbs_create_qp { ··· 379 }; 380 381 struct ib_uverbs_destroy_qp { 382 __u32 qp_handle; 383 }; 384 385 struct ib_uverbs_attach_mcast { ··· 429 }; 430 431 struct ib_uverbs_destroy_srq { 432 __u32 srq_handle; 433 }; 434 435 #endif /* IB_USER_VERBS_H */
··· 42 * Increment this value if any changes that break userspace ABI 43 * compatibility are made. 44 */ 45 + #define IB_USER_VERBS_ABI_VERSION 2 46 47 enum { 48 IB_USER_VERBS_CMD_QUERY_PARAMS, ··· 292 }; 293 294 struct ib_uverbs_destroy_cq { 295 + __u64 response; 296 __u32 cq_handle; 297 + __u32 reserved; 298 + }; 299 + 300 + struct ib_uverbs_destroy_cq_resp { 301 + __u32 comp_events_reported; 302 + __u32 async_events_reported; 303 }; 304 305 struct ib_uverbs_create_qp { ··· 372 }; 373 374 struct ib_uverbs_destroy_qp { 375 + __u64 response; 376 __u32 qp_handle; 377 + __u32 reserved; 378 + }; 379 + 380 + struct ib_uverbs_destroy_qp_resp { 381 + __u32 events_reported; 382 }; 383 384 struct ib_uverbs_attach_mcast { ··· 416 }; 417 418 struct ib_uverbs_destroy_srq { 419 + __u64 response; 420 __u32 srq_handle; 421 + __u32 reserved; 422 + }; 423 + 424 + struct ib_uverbs_destroy_srq_resp { 425 + __u32 events_reported; 426 }; 427 428 #endif /* IB_USER_VERBS_H */