IB/uverbs: track multicast group membership for userspace QPs

uverbs needs to track which multicast groups is each qp
attached to, in order to properly detach when cleanup
is performed on device file close.

Signed-off-by: Jack Morgenstein <jackm@mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by Jack Morgenstein and committed by Roland Dreier f4e40156 e0ae9ecf

+99 -23
+11
drivers/infiniband/core/uverbs.h
··· 105 105 u32 *counter; 106 106 }; 107 107 108 + struct ib_uverbs_mcast_entry { 109 + struct list_head list; 110 + union ib_gid gid; 111 + u16 lid; 112 + }; 113 + 108 114 struct ib_uevent_object { 109 115 struct ib_uobject uobject; 110 116 struct list_head event_list; 111 117 u32 events_reported; 118 + }; 119 + 120 + struct ib_uqp_object { 121 + struct ib_uevent_object uevent; 122 + struct list_head mcast_list; 112 123 }; 113 124 114 125 struct ib_ucq_object {
+71 -19
drivers/infiniband/core/uverbs_cmd.c
··· 815 815 struct ib_uverbs_create_qp cmd; 816 816 struct ib_uverbs_create_qp_resp resp; 817 817 struct ib_udata udata; 818 - struct ib_uevent_object *uobj; 818 + struct ib_uqp_object *uobj; 819 819 struct ib_pd *pd; 820 820 struct ib_cq *scq, *rcq; 821 821 struct ib_srq *srq; ··· 866 866 attr.cap.max_recv_sge = cmd.max_recv_sge; 867 867 attr.cap.max_inline_data = cmd.max_inline_data; 868 868 869 - uobj->uobject.user_handle = cmd.user_handle; 870 - uobj->uobject.context = file->ucontext; 871 - uobj->events_reported = 0; 872 - INIT_LIST_HEAD(&uobj->event_list); 869 + uobj->uevent.uobject.user_handle = cmd.user_handle; 870 + uobj->uevent.uobject.context = file->ucontext; 871 + uobj->uevent.events_reported = 0; 872 + INIT_LIST_HEAD(&uobj->uevent.event_list); 873 + INIT_LIST_HEAD(&uobj->mcast_list); 873 874 874 875 qp = pd->device->create_qp(pd, &attr, &udata); 875 876 if (IS_ERR(qp)) { ··· 883 882 qp->send_cq = attr.send_cq; 884 883 qp->recv_cq = attr.recv_cq; 885 884 qp->srq = attr.srq; 886 - qp->uobject = &uobj->uobject; 885 + qp->uobject = &uobj->uevent.uobject; 887 886 qp->event_handler = attr.event_handler; 888 887 qp->qp_context = attr.qp_context; 889 888 qp->qp_type = attr.qp_type; ··· 902 901 goto err_destroy; 903 902 } 904 903 905 - ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 904 + ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id); 906 905 907 906 if (ret == -EAGAIN) 908 907 goto retry; 909 908 if (ret) 910 909 goto err_destroy; 911 910 912 - resp.qp_handle = uobj->uobject.id; 911 + resp.qp_handle = uobj->uevent.uobject.id; 913 912 resp.max_recv_sge = attr.cap.max_recv_sge; 914 913 resp.max_send_sge = attr.cap.max_send_sge; 915 914 resp.max_recv_wr = attr.cap.max_recv_wr; ··· 923 922 } 924 923 925 924 down(&file->mutex); 926 - list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 925 + list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 927 926 up(&file->mutex); 928 927 929 928 up(&ib_uverbs_idr_mutex); ··· 931 930 return in_len; 932 931 933 932 err_idr: 934 - idr_remove(&ib_uverbs_qp_idr, uobj->uobject.id); 933 + idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id); 935 934 936 935 err_destroy: 937 936 ib_destroy_qp(qp); ··· 1033 1032 struct ib_uverbs_destroy_qp cmd; 1034 1033 struct ib_uverbs_destroy_qp_resp resp; 1035 1034 struct ib_qp *qp; 1036 - struct ib_uevent_object *uobj; 1035 + struct ib_uqp_object *uobj; 1037 1036 int ret = -EINVAL; 1038 1037 1039 1038 if (copy_from_user(&cmd, buf, sizeof cmd)) ··· 1047 1046 if (!qp || qp->uobject->context != file->ucontext) 1048 1047 goto out; 1049 1048 1050 - uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 1049 + uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1050 + 1051 + if (!list_empty(&uobj->mcast_list)) { 1052 + ret = -EBUSY; 1053 + goto out; 1054 + } 1051 1055 1052 1056 ret = ib_destroy_qp(qp); 1053 1057 if (ret) ··· 1061 1055 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1062 1056 1063 1057 down(&file->mutex); 1064 - list_del(&uobj->uobject.list); 1058 + list_del(&uobj->uevent.uobject.list); 1065 1059 up(&file->mutex); 1066 1060 1067 - ib_uverbs_release_uevent(file, uobj); 1061 + ib_uverbs_release_uevent(file, &uobj->uevent); 1068 1062 1069 - resp.events_reported = uobj->events_reported; 1063 + resp.events_reported = uobj->uevent.events_reported; 1070 1064 1071 1065 kfree(uobj); 1072 1066 ··· 1548 1542 { 1549 1543 struct ib_uverbs_attach_mcast cmd; 1550 1544 struct ib_qp *qp; 1545 + struct ib_uqp_object *uobj; 1546 + struct ib_uverbs_mcast_entry *mcast; 1551 1547 int ret = -EINVAL; 1552 1548 1553 1549 if (copy_from_user(&cmd, buf, sizeof cmd)) ··· 1558 1550 down(&ib_uverbs_idr_mutex); 1559 1551 1560 1552 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1561 - if (qp && qp->uobject->context == file->ucontext) 1562 - ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1553 + if (!qp || qp->uobject->context != file->ucontext) 1554 + goto out; 1563 1555 1556 + uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1557 + 1558 + list_for_each_entry(mcast, &uobj->mcast_list, list) 1559 + if (cmd.mlid == mcast->lid && 1560 + !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1561 + ret = 0; 1562 + goto out; 1563 + } 1564 + 1565 + mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1566 + if (!mcast) { 1567 + ret = -ENOMEM; 1568 + goto out; 1569 + } 1570 + 1571 + mcast->lid = cmd.mlid; 1572 + memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1573 + 1574 + ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1575 + if (!ret) { 1576 + uobj = container_of(qp->uobject, struct ib_uqp_object, 1577 + uevent.uobject); 1578 + list_add_tail(&mcast->list, &uobj->mcast_list); 1579 + } else 1580 + kfree(mcast); 1581 + 1582 + out: 1564 1583 up(&ib_uverbs_idr_mutex); 1565 1584 1566 1585 return ret ? ret : in_len; ··· 1598 1563 int out_len) 1599 1564 { 1600 1565 struct ib_uverbs_detach_mcast cmd; 1566 + struct ib_uqp_object *uobj; 1601 1567 struct ib_qp *qp; 1568 + struct ib_uverbs_mcast_entry *mcast; 1602 1569 int ret = -EINVAL; 1603 1570 1604 1571 if (copy_from_user(&cmd, buf, sizeof cmd)) ··· 1609 1572 down(&ib_uverbs_idr_mutex); 1610 1573 1611 1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1612 - if (qp && qp->uobject->context == file->ucontext) 1613 - ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1575 + if (!qp || qp->uobject->context != file->ucontext) 1576 + goto out; 1614 1577 1578 + ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1579 + if (ret) 1580 + goto out; 1581 + 1582 + uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1583 + 1584 + list_for_each_entry(mcast, &uobj->mcast_list, list) 1585 + if (cmd.mlid == mcast->lid && 1586 + !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1587 + list_del(&mcast->list); 1588 + kfree(mcast); 1589 + break; 1590 + } 1591 + 1592 + out: 1615 1593 up(&ib_uverbs_idr_mutex); 1616 1594 1617 1595 return ret ? ret : in_len;
+17 -4
drivers/infiniband/core/uverbs_main.c
··· 160 160 spin_unlock_irq(&file->async_file->lock); 161 161 } 162 162 163 + static void ib_uverbs_detach_umcast(struct ib_qp *qp, 164 + struct ib_uqp_object *uobj) 165 + { 166 + struct ib_uverbs_mcast_entry *mcast, *tmp; 167 + 168 + list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { 169 + ib_detach_mcast(qp, &mcast->gid, mcast->lid); 170 + list_del(&mcast->list); 171 + kfree(mcast); 172 + } 173 + } 174 + 163 175 static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, 164 176 struct ib_ucontext *context) 165 177 { ··· 192 180 193 181 list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { 194 182 struct ib_qp *qp = idr_find(&ib_uverbs_qp_idr, uobj->id); 195 - struct ib_uevent_object *uevent = 196 - container_of(uobj, struct ib_uevent_object, uobject); 183 + struct ib_uqp_object *uqp = 184 + container_of(uobj, struct ib_uqp_object, uevent.uobject); 197 185 idr_remove(&ib_uverbs_qp_idr, uobj->id); 186 + ib_uverbs_detach_umcast(qp, uqp); 198 187 ib_destroy_qp(qp); 199 188 list_del(&uobj->list); 200 - ib_uverbs_release_uevent(file, uevent); 201 - kfree(uevent); 189 + ib_uverbs_release_uevent(file, &uqp->uevent); 190 + kfree(uqp); 202 191 } 203 192 204 193 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {