Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

+170 -55
+22 -19
drivers/infiniband/core/user_mad.c
··· 310 310 u8 method; 311 311 __be64 *tid; 312 312 int ret, length, hdr_len, copy_offset; 313 - int rmpp_active = 0; 313 + int rmpp_active, has_rmpp_header; 314 314 315 315 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR) 316 316 return -EINVAL; ··· 360 360 } 361 361 362 362 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 363 - if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) { 364 - /* RMPP active */ 365 - if (!agent->rmpp_version) { 366 - ret = -EINVAL; 367 - goto err_ah; 368 - } 369 - 370 - /* Validate that the management class can support RMPP */ 371 - if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 372 - hdr_len = IB_MGMT_SA_HDR; 373 - } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 374 - (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { 375 - hdr_len = IB_MGMT_VENDOR_HDR; 376 - } else { 377 - ret = -EINVAL; 378 - goto err_ah; 379 - } 380 - rmpp_active = 1; 363 + if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 364 + hdr_len = IB_MGMT_SA_HDR; 381 365 copy_offset = IB_MGMT_RMPP_HDR; 366 + has_rmpp_header = 1; 367 + } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && 368 + rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { 369 + hdr_len = IB_MGMT_VENDOR_HDR; 370 + copy_offset = IB_MGMT_RMPP_HDR; 371 + has_rmpp_header = 1; 382 372 } else { 383 373 hdr_len = IB_MGMT_MAD_HDR; 384 374 copy_offset = IB_MGMT_MAD_HDR; 375 + has_rmpp_header = 0; 376 + } 377 + 378 + if (has_rmpp_header) 379 + rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 380 + IB_MGMT_RMPP_FLAG_ACTIVE; 381 + else 382 + rmpp_active = 0; 383 + 384 + /* Validate that the management class can support RMPP */ 385 + if (rmpp_active && !agent->rmpp_version) { 386 + ret = -EINVAL; 387 + goto err_ah; 385 388 } 386 389 387 390 packet->msg = ib_create_send_mad(agent,
+11
drivers/infiniband/core/uverbs.h
··· 105 105 u32 *counter; 106 106 }; 107 107 108 + struct ib_uverbs_mcast_entry { 109 + struct list_head list; 110 + union ib_gid gid; 111 + u16 lid; 112 + }; 113 + 108 114 struct ib_uevent_object { 109 115 struct ib_uobject uobject; 110 116 struct list_head event_list; 111 117 u32 events_reported; 118 + }; 119 + 120 + struct ib_uqp_object { 121 + struct ib_uevent_object uevent; 122 + struct list_head mcast_list; 112 123 }; 113 124 114 125 struct ib_ucq_object {
+71 -19
drivers/infiniband/core/uverbs_cmd.c
··· 815 815 struct ib_uverbs_create_qp cmd; 816 816 struct ib_uverbs_create_qp_resp resp; 817 817 struct ib_udata udata; 818 - struct ib_uevent_object *uobj; 818 + struct ib_uqp_object *uobj; 819 819 struct ib_pd *pd; 820 820 struct ib_cq *scq, *rcq; 821 821 struct ib_srq *srq; ··· 866 866 attr.cap.max_recv_sge = cmd.max_recv_sge; 867 867 attr.cap.max_inline_data = cmd.max_inline_data; 868 868 869 - uobj->uobject.user_handle = cmd.user_handle; 870 - uobj->uobject.context = file->ucontext; 871 - uobj->events_reported = 0; 872 - INIT_LIST_HEAD(&uobj->event_list); 869 + uobj->uevent.uobject.user_handle = cmd.user_handle; 870 + uobj->uevent.uobject.context = file->ucontext; 871 + uobj->uevent.events_reported = 0; 872 + INIT_LIST_HEAD(&uobj->uevent.event_list); 873 + INIT_LIST_HEAD(&uobj->mcast_list); 873 874 874 875 qp = pd->device->create_qp(pd, &attr, &udata); 875 876 if (IS_ERR(qp)) { ··· 883 882 qp->send_cq = attr.send_cq; 884 883 qp->recv_cq = attr.recv_cq; 885 884 qp->srq = attr.srq; 886 - qp->uobject = &uobj->uobject; 885 + qp->uobject = &uobj->uevent.uobject; 887 886 qp->event_handler = attr.event_handler; 888 887 qp->qp_context = attr.qp_context; 889 888 qp->qp_type = attr.qp_type; ··· 902 901 goto err_destroy; 903 902 } 904 903 905 - ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uobject.id); 904 + ret = idr_get_new(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject.id); 906 905 907 906 if (ret == -EAGAIN) 908 907 goto retry; 909 908 if (ret) 910 909 goto err_destroy; 911 910 912 - resp.qp_handle = uobj->uobject.id; 911 + resp.qp_handle = uobj->uevent.uobject.id; 913 912 resp.max_recv_sge = attr.cap.max_recv_sge; 914 913 resp.max_send_sge = attr.cap.max_send_sge; 915 914 resp.max_recv_wr = attr.cap.max_recv_wr; ··· 923 922 } 924 923 925 924 down(&file->mutex); 926 - list_add_tail(&uobj->uobject.list, &file->ucontext->qp_list); 925 + list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list); 927 926 up(&file->mutex); 928 927 929 928 up(&ib_uverbs_idr_mutex); ··· 931 930 return in_len; 932 931 933 932 err_idr: 934 - idr_remove(&ib_uverbs_qp_idr, uobj->uobject.id); 933 + idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id); 935 934 936 935 err_destroy: 937 936 ib_destroy_qp(qp); ··· 1033 1032 struct ib_uverbs_destroy_qp cmd; 1034 1033 struct ib_uverbs_destroy_qp_resp resp; 1035 1034 struct ib_qp *qp; 1036 - struct ib_uevent_object *uobj; 1035 + struct ib_uqp_object *uobj; 1037 1036 int ret = -EINVAL; 1038 1037 1039 1038 if (copy_from_user(&cmd, buf, sizeof cmd)) ··· 1047 1046 if (!qp || qp->uobject->context != file->ucontext) 1048 1047 goto out; 1049 1048 1050 - uobj = container_of(qp->uobject, struct ib_uevent_object, uobject); 1049 + uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1050 + 1051 + if (!list_empty(&uobj->mcast_list)) { 1052 + ret = -EBUSY; 1053 + goto out; 1054 + } 1051 1055 1052 1056 ret = ib_destroy_qp(qp); 1053 1057 if (ret) ··· 1061 1055 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle); 1062 1056 1063 1057 down(&file->mutex); 1064 - list_del(&uobj->uobject.list); 1058 + list_del(&uobj->uevent.uobject.list); 1065 1059 up(&file->mutex); 1066 1060 1067 - ib_uverbs_release_uevent(file, uobj); 1061 + ib_uverbs_release_uevent(file, &uobj->uevent); 1068 1062 1069 - resp.events_reported = uobj->events_reported; 1063 + resp.events_reported = uobj->uevent.events_reported; 1070 1064 1071 1065 kfree(uobj); 1072 1066 ··· 1548 1542 { 1549 1543 struct ib_uverbs_attach_mcast cmd; 1550 1544 struct ib_qp *qp; 1545 + struct ib_uqp_object *uobj; 1546 + struct ib_uverbs_mcast_entry *mcast; 1551 1547 int ret = -EINVAL; 1552 1548 1553 1549 if (copy_from_user(&cmd, buf, sizeof cmd)) ··· 1558 1550 down(&ib_uverbs_idr_mutex); 1559 1551 1560 1552 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1561 - if (qp && qp->uobject->context == file->ucontext) 1562 - ret = ib_attach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1553 + if (!qp || qp->uobject->context != file->ucontext) 1554 + goto out; 1563 1555 1556 + uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1557 + 1558 + list_for_each_entry(mcast, &uobj->mcast_list, list) 1559 + if (cmd.mlid == mcast->lid && 1560 + !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1561 + ret = 0; 1562 + goto out; 1563 + } 1564 + 1565 + mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 1566 + if (!mcast) { 1567 + ret = -ENOMEM; 1568 + goto out; 1569 + } 1570 + 1571 + mcast->lid = cmd.mlid; 1572 + memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw); 1573 + 1574 + ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid); 1575 + if (!ret) { 1576 + uobj = container_of(qp->uobject, struct ib_uqp_object, 1577 + uevent.uobject); 1578 + list_add_tail(&mcast->list, &uobj->mcast_list); 1579 + } else 1580 + kfree(mcast); 1581 + 1582 + out: 1564 1583 up(&ib_uverbs_idr_mutex); 1565 1584 1566 1585 return ret ? ret : in_len; ··· 1598 1563 int out_len) 1599 1564 { 1600 1565 struct ib_uverbs_detach_mcast cmd; 1566 + struct ib_uqp_object *uobj; 1601 1567 struct ib_qp *qp; 1568 + struct ib_uverbs_mcast_entry *mcast; 1602 1569 int ret = -EINVAL; 1603 1570 1604 1571 if (copy_from_user(&cmd, buf, sizeof cmd)) ··· 1609 1572 down(&ib_uverbs_idr_mutex); 1610 1573 1611 1574 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle); 1612 - if (qp && qp->uobject->context == file->ucontext) 1613 - ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1575 + if (!qp || qp->uobject->context != file->ucontext) 1576 + goto out; 1614 1577 1578 + ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid); 1579 + if (ret) 1580 + goto out; 1581 + 1582 + uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject); 1583 + 1584 + list_for_each_entry(mcast, &uobj->mcast_list, list) 1585 + if (cmd.mlid == mcast->lid && 1586 + !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) { 1587 + list_del(&mcast->list); 1588 + kfree(mcast); 1589 + break; 1590 + } 1591 + 1592 + out: 1615 1593 up(&ib_uverbs_idr_mutex); 1616 1594 1617 1595 return ret ? ret : in_len;
+17 -4
drivers/infiniband/core/uverbs_main.c
··· 160 160 spin_unlock_irq(&file->async_file->lock); 161 161 } 162 162 163 + static void ib_uverbs_detach_umcast(struct ib_qp *qp, 164 + struct ib_uqp_object *uobj) 165 + { 166 + struct ib_uverbs_mcast_entry *mcast, *tmp; 167 + 168 + list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) { 169 + ib_detach_mcast(qp, &mcast->gid, mcast->lid); 170 + list_del(&mcast->list); 171 + kfree(mcast); 172 + } 173 + } 174 + 163 175 static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, 164 176 struct ib_ucontext *context) 165 177 { ··· 192 180 193 181 list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) { 194 182 struct ib_qp *qp = idr_find(&ib_uverbs_qp_idr, uobj->id); 195 - struct ib_uevent_object *uevent = 196 - container_of(uobj, struct ib_uevent_object, uobject); 183 + struct ib_uqp_object *uqp = 184 + container_of(uobj, struct ib_uqp_object, uevent.uobject); 197 185 idr_remove(&ib_uverbs_qp_idr, uobj->id); 186 + ib_uverbs_detach_umcast(qp, uqp); 198 187 ib_destroy_qp(qp); 199 188 list_del(&uobj->list); 200 - ib_uverbs_release_uevent(file, uevent); 201 - kfree(uevent); 189 + ib_uverbs_release_uevent(file, &uqp->uevent); 190 + kfree(uqp); 202 191 } 203 192 204 193 list_for_each_entry_safe(uobj, tmp, &context->cq_list, list) {
+32 -2
drivers/infiniband/hw/mthca/mthca_qp.c
··· 871 871 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 872 872 873 873 mthca_wq_init(&qp->sq); 874 + qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 875 + 874 876 mthca_wq_init(&qp->rq); 877 + qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 875 878 876 879 if (mthca_is_memfree(dev)) { 877 880 *qp->sq.db = 0; ··· 1822 1819 { 1823 1820 struct mthca_dev *dev = to_mdev(ibqp->device); 1824 1821 struct mthca_qp *qp = to_mqp(ibqp); 1822 + __be32 doorbell[2]; 1825 1823 void *wqe; 1826 1824 void *prev_wqe; 1827 1825 unsigned long flags; ··· 1842 1838 ind = qp->sq.head & (qp->sq.max - 1); 1843 1839 1844 1840 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1841 + if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { 1842 + nreq = 0; 1843 + 1844 + doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | 1845 + ((qp->sq.head & 0xffff) << 8) | 1846 + f0 | op0); 1847 + doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0); 1848 + 1849 + qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1850 + size0 = 0; 1851 + 1852 + /* 1853 + * Make sure that descriptors are written before 1854 + * doorbell record. 1855 + */ 1856 + wmb(); 1857 + *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 1858 + 1859 + /* 1860 + * Make sure doorbell record is written before we 1861 + * write MMIO send doorbell. 1862 + */ 1863 + wmb(); 1864 + mthca_write64(doorbell, 1865 + dev->kar + MTHCA_SEND_DOORBELL, 1866 + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1867 + } 1868 + 1845 1869 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1846 1870 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1847 1871 " %d max, %d nreq)\n", qp->qpn, ··· 2046 2014 2047 2015 out: 2048 2016 if (likely(nreq)) { 2049 - __be32 doorbell[2]; 2050 - 2051 2017 doorbell[0] = cpu_to_be32((nreq << 24) | 2052 2018 ((qp->sq.head & 0xffff) << 8) | 2053 2019 f0 | op0);
+2 -1
drivers/infiniband/hw/mthca/mthca_wqe.h
··· 50 50 51 51 enum { 52 52 MTHCA_INVAL_LKEY = 0x100, 53 - MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256 53 + MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256, 54 + MTHCA_ARBEL_MAX_WQES_PER_SEND_DB = 255 54 55 }; 55 56 56 57 struct mthca_next_seg {
+4
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 608 608 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 609 609 ipoib_ib_dev_up(dev); 610 610 611 + down(&priv->vlan_mutex); 612 + 611 613 /* Flush any child interfaces too */ 612 614 list_for_each_entry(cpriv, &priv->child_intfs, list) 613 615 ipoib_ib_dev_flush(&cpriv->dev); 616 + 617 + up(&priv->vlan_mutex); 614 618 } 615 619 616 620 void ipoib_ib_dev_cleanup(struct net_device *dev)
+7 -4
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 94 94 if (ipoib_ib_dev_open(dev)) 95 95 return -EINVAL; 96 96 97 - if (ipoib_ib_dev_up(dev)) 97 + if (ipoib_ib_dev_up(dev)) { 98 + ipoib_ib_dev_stop(dev); 98 99 return -EINVAL; 100 + } 99 101 100 102 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 101 103 struct ipoib_dev_priv *cpriv; ··· 400 398 while ((skb = __skb_dequeue(&neigh->queue))) 401 399 __skb_queue_tail(&skqueue, skb); 402 400 } 403 - } else 404 - path->query = NULL; 401 + } 405 402 403 + path->query = NULL; 406 404 complete(&path->done); 407 405 408 406 spin_unlock_irqrestore(&priv->lock, flags); ··· 430 428 skb_queue_head_init(&path->queue); 431 429 432 430 INIT_LIST_HEAD(&path->neigh_list); 433 - init_completion(&path->done); 434 431 435 432 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); 436 433 path->pathrec.sgid = priv->local_gid; ··· 446 445 447 446 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n", 448 447 IPOIB_GID_ARG(path->pathrec.dgid)); 448 + 449 + init_completion(&path->done); 449 450 450 451 path->query_id = 451 452 ib_sa_path_rec_get(priv->ca, priv->port,
+4 -6
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 135 135 if (!mcast) 136 136 return NULL; 137 137 138 - init_completion(&mcast->done); 139 - 140 138 mcast->dev = dev; 141 139 mcast->created = jiffies; 142 140 mcast->backoff = 1; 143 - mcast->logcount = 0; 144 141 145 142 INIT_LIST_HEAD(&mcast->list); 146 143 INIT_LIST_HEAD(&mcast->neigh_list); 147 144 skb_queue_head_init(&mcast->pkt_queue); 148 - 149 - mcast->ah = NULL; 150 - mcast->query = NULL; 151 145 152 146 return mcast; 153 147 } ··· 344 350 rec.port_gid = priv->local_gid; 345 351 rec.pkey = cpu_to_be16(priv->pkey); 346 352 353 + init_completion(&mcast->done); 354 + 347 355 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, 348 356 IB_SA_MCMEMBER_REC_MGID | 349 357 IB_SA_MCMEMBER_REC_PORT_GID | ··· 464 468 rec.flow_label = priv->broadcast->mcmember.flow_label; 465 469 rec.traffic_class = priv->broadcast->mcmember.traffic_class; 466 470 } 471 + 472 + init_completion(&mcast->done); 467 473 468 474 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask, 469 475 mcast->backoff * 1000, GFP_ATOMIC,