Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Allocate priv->tx_ring with vmalloc()
IPoIB/cm: Set tx_wr.num_sge in connected mode post_send()
IPoIB: Don't drop multicast sends when they can be queued
IB/ipath: Reset the retry counter for RDMA_READ_RESPONSE_MIDDLE packets
IB/ipath: Fix error completion put on send CQ instead of recv CQ
IB/ipath: Fix RC QP initialization
IB/ipath: Fix potentially wrong RNR retry counter returned in ipath_query_qp()
IB/ipath: Fix IB compliance problems with link state vs physical state

+41 -36
+1 -1
drivers/infiniband/hw/ipath/ipath_common.h
··· 75 75 #define IPATH_IB_LINKDOWN 0 76 76 #define IPATH_IB_LINKARM 1 77 77 #define IPATH_IB_LINKACTIVE 2 78 - #define IPATH_IB_LINKINIT 3 78 + #define IPATH_IB_LINKDOWN_ONLY 3 79 79 #define IPATH_IB_LINKDOWN_SLEEP 4 80 80 #define IPATH_IB_LINKDOWN_DISABLE 5 81 81 #define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
+12 -16
drivers/infiniband/hw/ipath/ipath_driver.c
··· 851 851 * -ETIMEDOUT state can have multiple states set, for any of several 852 852 * transitions. 853 853 */ 854 - static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, 855 - int msecs) 854 + int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs) 856 855 { 857 856 dd->ipath_state_wanted = state; 858 857 wait_event_interruptible_timeout(ipath_state_wait, ··· 1655 1656 static void ipath_set_ib_lstate(struct ipath_devdata *dd, int which) 1656 1657 { 1657 1658 static const char *what[4] = { 1658 - [0] = "DOWN", 1659 - [INFINIPATH_IBCC_LINKCMD_INIT] = "INIT", 1659 + [0] = "NOP", 1660 + [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN", 1660 1661 [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED", 1661 1662 [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE" 1662 1663 }; ··· 1671 1672 (dd, dd->ipath_kregs->kr_ibcstatus) >> 1672 1673 INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) & 1673 1674 INFINIPATH_IBCS_LINKTRAININGSTATE_MASK]); 1674 - /* flush all queued sends when going to DOWN or INIT, to be sure that 1675 + /* flush all queued sends when going to DOWN to be sure that 1675 1676 * they don't block MAD packets */ 1676 - if (!linkcmd || linkcmd == INFINIPATH_IBCC_LINKCMD_INIT) 1677 + if (linkcmd == INFINIPATH_IBCC_LINKCMD_DOWN) 1677 1678 ipath_cancel_sends(dd, 1); 1678 1679 1679 1680 ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ··· 1686 1687 int ret; 1687 1688 1688 1689 switch (newstate) { 1690 + case IPATH_IB_LINKDOWN_ONLY: 1691 + ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN << 1692 + INFINIPATH_IBCC_LINKCMD_SHIFT); 1693 + /* don't wait */ 1694 + ret = 0; 1695 + goto bail; 1696 + 1689 1697 case IPATH_IB_LINKDOWN: 1690 1698 ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKINITCMD_POLL << 1691 1699 INFINIPATH_IBCC_LINKINITCMD_SHIFT); ··· 1714 1708 /* don't wait */ 1715 1709 ret = 0; 1716 1710 goto bail; 1717 - 1718 - case IPATH_IB_LINKINIT: 1719 - if (dd->ipath_flags & IPATH_LINKINIT) { 1720 - ret = 0; 1721 - goto bail; 1722 - } 1723 - ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_INIT << 1724 - INFINIPATH_IBCC_LINKCMD_SHIFT); 1725 - lstate = IPATH_LINKINIT; 1726 - break; 1727 1711 1728 1712 case IPATH_IB_LINKARM: 1729 1713 if (dd->ipath_flags & IPATH_LINKARMED) {
+1
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 767 767 int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned); 768 768 int ipath_reset_device(int); 769 769 void ipath_get_faststats(unsigned long); 770 + int ipath_wait_linkstate(struct ipath_devdata *, u32, int); 770 771 int ipath_set_linkstate(struct ipath_devdata *, u8); 771 772 int ipath_set_mtu(struct ipath_devdata *, u16); 772 773 int ipath_set_lid(struct ipath_devdata *, u32, u8);
+3 -4
drivers/infiniband/hw/ipath/ipath_mad.c
··· 555 555 /* FALLTHROUGH */ 556 556 case IB_PORT_DOWN: 557 557 if (lstate == 0) 558 - if (get_linkdowndefaultstate(dd)) 559 - lstate = IPATH_IB_LINKDOWN_SLEEP; 560 - else 561 - lstate = IPATH_IB_LINKDOWN; 558 + lstate = IPATH_IB_LINKDOWN_ONLY; 562 559 else if (lstate == 1) 563 560 lstate = IPATH_IB_LINKDOWN_SLEEP; 564 561 else if (lstate == 2) ··· 565 568 else 566 569 goto err; 567 570 ipath_set_linkstate(dd, lstate); 571 + ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED | 572 + IPATH_LINKACTIVE, 1000); 568 573 break; 569 574 case IB_PORT_ARMED: 570 575 ipath_set_linkstate(dd, IPATH_IB_LINKARM);
+7 -6
drivers/infiniband/hw/ipath/ipath_qp.c
··· 329 329 /** 330 330 * ipath_reset_qp - initialize the QP state to the reset state 331 331 * @qp: the QP to reset 332 + * @type: the QP type 332 333 */ 333 - static void ipath_reset_qp(struct ipath_qp *qp) 334 + static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) 334 335 { 335 336 qp->remote_qpn = 0; 336 337 qp->qkey = 0; ··· 343 342 qp->s_psn = 0; 344 343 qp->r_psn = 0; 345 344 qp->r_msn = 0; 346 - if (qp->ibqp.qp_type == IB_QPT_RC) { 345 + if (type == IB_QPT_RC) { 347 346 qp->s_state = IB_OPCODE_RC_SEND_LAST; 348 347 qp->r_state = IB_OPCODE_RC_SEND_LAST; 349 348 } else { ··· 415 414 wc.wr_id = qp->r_wr_id; 416 415 wc.opcode = IB_WC_RECV; 417 416 wc.status = err; 418 - ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 417 + ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 419 418 } 420 419 wc.status = IB_WC_WR_FLUSH_ERR; 421 420 ··· 535 534 536 535 switch (new_state) { 537 536 case IB_QPS_RESET: 538 - ipath_reset_qp(qp); 537 + ipath_reset_qp(qp, ibqp->qp_type); 539 538 break; 540 539 541 540 case IB_QPS_ERR: ··· 648 647 attr->port_num = 1; 649 648 attr->timeout = qp->timeout; 650 649 attr->retry_cnt = qp->s_retry_cnt; 651 - attr->rnr_retry = qp->s_rnr_retry; 650 + attr->rnr_retry = qp->s_rnr_retry_cnt; 652 651 attr->alt_port_num = 0; 653 652 attr->alt_timeout = 0; 654 653 ··· 840 839 goto bail_qp; 841 840 } 842 841 qp->ip = NULL; 843 - ipath_reset_qp(qp); 842 + ipath_reset_qp(qp, init_attr->qp_type); 844 843 break; 845 844 846 845 default:
+4
drivers/infiniband/hw/ipath/ipath_rc.c
··· 1196 1196 list_move_tail(&qp->timerwait, 1197 1197 &dev->pending[dev->pending_index]); 1198 1198 spin_unlock(&dev->pending_lock); 1199 + 1200 + if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) 1201 + qp->s_retry = qp->s_retry_cnt; 1202 + 1199 1203 /* 1200 1204 * Update the RDMA receive state but do the copy w/o 1201 1205 * holding the locks and blocking interrupts.
+1 -1
drivers/infiniband/hw/ipath/ipath_registers.h
··· 185 185 #define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 186 186 #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 187 187 #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL 188 - #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ 188 + #define INFINIPATH_IBCC_LINKCMD_DOWN 1 /* move to 0x11 */ 189 189 #define INFINIPATH_IBCC_LINKCMD_ARMED 2 /* move to 0x21 */ 190 190 #define INFINIPATH_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */ 191 191 #define INFINIPATH_IBCC_LINKCMD_SHIFT 18
+6 -3
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 38 38 #include <net/icmp.h> 39 39 #include <linux/icmpv6.h> 40 40 #include <linux/delay.h> 41 + #include <linux/vmalloc.h> 41 42 42 43 #include "ipoib.h" 43 44 ··· 638 637 priv->tx_sge[0].addr = addr; 639 638 priv->tx_sge[0].length = len; 640 639 640 + priv->tx_wr.num_sge = 1; 641 641 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; 642 642 643 643 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); ··· 1032 1030 struct ipoib_dev_priv *priv = netdev_priv(p->dev); 1033 1031 int ret; 1034 1032 1035 - p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, 1036 - GFP_KERNEL); 1033 + p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); 1037 1034 if (!p->tx_ring) { 1038 1035 ipoib_warn(priv, "failed to allocate tx ring\n"); 1039 1036 ret = -ENOMEM; 1040 1037 goto err_tx; 1041 1038 } 1039 + memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); 1042 1040 1043 1041 p->qp = ipoib_cm_create_tx_qp(p->dev, p); 1044 1042 if (IS_ERR(p->qp)) { ··· 1079 1077 ib_destroy_qp(p->qp); 1080 1078 err_qp: 1081 1079 p->qp = NULL; 1080 + vfree(p->tx_ring); 1082 1081 err_tx: 1083 1082 return ret; 1084 1083 } ··· 1130 1127 if (p->qp) 1131 1128 ib_destroy_qp(p->qp); 1132 1129 1133 - kfree(p->tx_ring); 1130 + vfree(p->tx_ring); 1134 1131 kfree(p); 1135 1132 } 1136 1133
+5 -4
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 41 41 #include <linux/init.h> 42 42 #include <linux/slab.h> 43 43 #include <linux/kernel.h> 44 + #include <linux/vmalloc.h> 44 45 45 46 #include <linux/if_arp.h> /* For ARPHRD_xxx */ 46 47 ··· 888 887 goto out; 889 888 } 890 889 891 - priv->tx_ring = kzalloc(ipoib_sendq_size * sizeof *priv->tx_ring, 892 - GFP_KERNEL); 890 + priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); 893 891 if (!priv->tx_ring) { 894 892 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", 895 893 ca->name, ipoib_sendq_size); 896 894 goto out_rx_ring_cleanup; 897 895 } 896 + memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); 898 897 899 898 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ 900 899 ··· 904 903 return 0; 905 904 906 905 out_tx_ring_cleanup: 907 - kfree(priv->tx_ring); 906 + vfree(priv->tx_ring); 908 907 909 908 out_rx_ring_cleanup: 910 909 kfree(priv->rx_ring); ··· 929 928 ipoib_ib_dev_cleanup(dev); 930 929 931 930 kfree(priv->rx_ring); 932 - kfree(priv->tx_ring); 931 + vfree(priv->tx_ring); 933 932 934 933 priv->rx_ring = NULL; 935 934 priv->tx_ring = NULL;
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 650 650 */ 651 651 spin_lock(&priv->lock); 652 652 653 - if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) || 653 + if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || 654 654 !priv->broadcast || 655 655 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) { 656 656 ++dev->stats.tx_dropped;