Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
"Second round of -rc fixes for 4.10.

This -rc cycle has been slow for the rdma subsystem. I had already
sent you the first batch before the Holiday break. After that, we kept
only getting a few here or there. Up until this week, when I got a
drop of 13 to one driver (qedr). So, here's the -rc patches I have. I
currently have none held in reserve, so unless something new comes in,
this is it until the next merge window opens.

Summary:

- series of iw_cxgb4 fixes to make it work with the drain cq API

- one or two patches each to: srp, iser, cxgb3, vmw_pvrdma, umem,
rxe, and ipoib

- one big series (13 patches) for the new qedr driver"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (27 commits)
RDMA/cma: Fix unknown symbol when CONFIG_IPV6 is not enabled
IB/rxe: Prevent from completer to operate on non valid QP
IB/rxe: Fix rxe dev insertion to rxe_dev_list
IB/umem: Release pid in error and ODP flow
RDMA/qedr: Dispatch port active event from qedr_add
RDMA/qedr: Fix and simplify memory leak in PD alloc
RDMA/qedr: Fix RDMA CM loopback
RDMA/qedr: Fix formatting
RDMA/qedr: Mark three functions as static
RDMA/qedr: Don't reset QP when queues aren't flushed
RDMA/qedr: Don't spam dmesg if QP is in error state
RDMA/qedr: Remove CQ spinlock from CM completion handlers
RDMA/qedr: Return max inline data in QP query result
RDMA/qedr: Return success when not changing QP state
RDMA/qedr: Add uapi header qedr-abi.h
RDMA/qedr: Fix MTU returned from QP query
RDMA/core: Add the function ib_mtu_int_to_enum
IB/vmw_pvrdma: Fix incorrect cleanup on pvrdma_pci_probe error path
IB/vmw_pvrdma: Don't leak info from alloc_ucontext
IB/cxgb3: fix misspelling in header guard
...

+269 -189
+2 -1
drivers/infiniband/core/cma.c
··· 2811 if (!src_addr || !src_addr->sa_family) { 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2813 src_addr->sa_family = dst_addr->sa_family; 2814 - if (dst_addr->sa_family == AF_INET6) { 2815 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2816 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2817 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
··· 2811 if (!src_addr || !src_addr->sa_family) { 2812 src_addr = (struct sockaddr *) &id->route.addr.src_addr; 2813 src_addr->sa_family = dst_addr->sa_family; 2814 + if (IS_ENABLED(CONFIG_IPV6) && 2815 + dst_addr->sa_family == AF_INET6) { 2816 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr; 2817 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr; 2818 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+2
drivers/infiniband/core/umem.c
··· 134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 135 136 if (access & IB_ACCESS_ON_DEMAND) { 137 ret = ib_umem_odp_get(context, umem); 138 if (ret) { 139 kfree(umem); ··· 150 151 page_list = (struct page **) __get_free_page(GFP_KERNEL); 152 if (!page_list) { 153 kfree(umem); 154 return ERR_PTR(-ENOMEM); 155 }
··· 134 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); 135 136 if (access & IB_ACCESS_ON_DEMAND) { 137 + put_pid(umem->pid); 138 ret = ib_umem_odp_get(context, umem); 139 if (ret) { 140 kfree(umem); ··· 149 150 page_list = (struct page **) __get_free_page(GFP_KERNEL); 151 if (!page_list) { 152 + put_pid(umem->pid); 153 kfree(umem); 154 return ERR_PTR(-ENOMEM); 155 }
+1 -10
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 1135 1136 memset(props, 0, sizeof(struct ib_port_attr)); 1137 props->max_mtu = IB_MTU_4096; 1138 - if (netdev->mtu >= 4096) 1139 - props->active_mtu = IB_MTU_4096; 1140 - else if (netdev->mtu >= 2048) 1141 - props->active_mtu = IB_MTU_2048; 1142 - else if (netdev->mtu >= 1024) 1143 - props->active_mtu = IB_MTU_1024; 1144 - else if (netdev->mtu >= 512) 1145 - props->active_mtu = IB_MTU_512; 1146 - else 1147 - props->active_mtu = IB_MTU_256; 1148 1149 if (!netif_carrier_ok(netdev)) 1150 props->state = IB_PORT_DOWN;
··· 1135 1136 memset(props, 0, sizeof(struct ib_port_attr)); 1137 props->max_mtu = IB_MTU_4096; 1138 + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 1139 1140 if (!netif_carrier_ok(netdev)) 1141 props->state = IB_PORT_DOWN;
+4 -3
drivers/infiniband/hw/cxgb4/cm.c
··· 1804 skb_trim(skb, dlen); 1805 mutex_lock(&ep->com.mutex); 1806 1807 - /* update RX credits */ 1808 - update_rx_credits(ep, dlen); 1809 - 1810 switch (ep->com.state) { 1811 case MPA_REQ_SENT: 1812 ep->rcv_seq += dlen; 1813 disconnect = process_mpa_reply(ep, skb); 1814 break; 1815 case MPA_REQ_WAIT: 1816 ep->rcv_seq += dlen; 1817 disconnect = process_mpa_request(ep, skb); 1818 break; 1819 case FPDU_MODE: { 1820 struct c4iw_qp_attributes attrs; 1821 BUG_ON(!ep->com.qp); 1822 if (status) 1823 pr_err("%s Unexpected streaming data." \
··· 1804 skb_trim(skb, dlen); 1805 mutex_lock(&ep->com.mutex); 1806 1807 switch (ep->com.state) { 1808 case MPA_REQ_SENT: 1809 + update_rx_credits(ep, dlen); 1810 ep->rcv_seq += dlen; 1811 disconnect = process_mpa_reply(ep, skb); 1812 break; 1813 case MPA_REQ_WAIT: 1814 + update_rx_credits(ep, dlen); 1815 ep->rcv_seq += dlen; 1816 disconnect = process_mpa_request(ep, skb); 1817 break; 1818 case FPDU_MODE: { 1819 struct c4iw_qp_attributes attrs; 1820 + 1821 + update_rx_credits(ep, dlen); 1822 BUG_ON(!ep->com.qp); 1823 if (status) 1824 pr_err("%s Unexpected streaming data." \
+13 -8
drivers/infiniband/hw/cxgb4/cq.c
··· 505 } 506 507 /* 508 * Gotta tweak READ completions: 509 * 1) the cqe doesn't contain the sq_wptr from the wr. 510 * 2) opcode not reflected from the wr. ··· 762 c4iw_invalidate_mr(qhp->rhp, 763 CQE_WRID_FR_STAG(&cqe)); 764 break; 765 default: 766 printk(KERN_ERR MOD "Unexpected opcode %d " 767 "in the CQE received for QPID=0x%0x\n", ··· 829 } 830 } 831 out: 832 - if (wq) { 833 - if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) { 834 - if (t4_sq_empty(wq)) 835 - complete(&qhp->sq_drained); 836 - if (t4_rq_empty(wq)) 837 - complete(&qhp->rq_drained); 838 - } 839 spin_unlock(&qhp->lock); 840 - } 841 return ret; 842 } 843
··· 505 } 506 507 /* 508 + * Special cqe for drain WR completions... 509 + */ 510 + if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) { 511 + *cookie = CQE_DRAIN_COOKIE(hw_cqe); 512 + *cqe = *hw_cqe; 513 + goto skip_cqe; 514 + } 515 + 516 + /* 517 * Gotta tweak READ completions: 518 * 1) the cqe doesn't contain the sq_wptr from the wr. 519 * 2) opcode not reflected from the wr. ··· 753 c4iw_invalidate_mr(qhp->rhp, 754 CQE_WRID_FR_STAG(&cqe)); 755 break; 756 + case C4IW_DRAIN_OPCODE: 757 + wc->opcode = IB_WC_SEND; 758 + break; 759 default: 760 printk(KERN_ERR MOD "Unexpected opcode %d " 761 "in the CQE received for QPID=0x%0x\n", ··· 817 } 818 } 819 out: 820 + if (wq) 821 spin_unlock(&qhp->lock); 822 return ret; 823 } 824
+9
drivers/infiniband/hw/cxgb4/device.c
··· 846 } 847 } 848 849 rdev->status_page->db_off = 0; 850 851 return 0; 852 destroy_ocqp_pool: 853 c4iw_ocqp_pool_destroy(rdev); 854 destroy_rqtpool: ··· 870 871 static void c4iw_rdev_close(struct c4iw_rdev *rdev) 872 { 873 kfree(rdev->wr_log); 874 free_page((unsigned long)rdev->status_page); 875 c4iw_pblpool_destroy(rdev);
··· 846 } 847 } 848 849 + rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); 850 + if (!rdev->free_workq) { 851 + err = -ENOMEM; 852 + goto err_free_status_page; 853 + } 854 + 855 rdev->status_page->db_off = 0; 856 857 return 0; 858 + err_free_status_page: 859 + free_page((unsigned long)rdev->status_page); 860 destroy_ocqp_pool: 861 c4iw_ocqp_pool_destroy(rdev); 862 destroy_rqtpool: ··· 862 863 static void c4iw_rdev_close(struct c4iw_rdev *rdev) 864 { 865 + destroy_workqueue(rdev->free_workq); 866 kfree(rdev->wr_log); 867 free_page((unsigned long)rdev->status_page); 868 c4iw_pblpool_destroy(rdev);
+20 -4
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 45 #include <linux/kref.h> 46 #include <linux/timer.h> 47 #include <linux/io.h> 48 49 #include <asm/byteorder.h> 50 ··· 108 struct list_head qpids; 109 struct list_head cqids; 110 struct mutex lock; 111 }; 112 113 enum c4iw_rdev_flags { ··· 185 atomic_t wr_log_idx; 186 struct wr_log_entry *wr_log; 187 int wr_log_size; 188 }; 189 190 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) ··· 483 wait_queue_head_t wait; 484 struct timer_list timer; 485 int sq_sig_all; 486 - struct completion rq_drained; 487 - struct completion sq_drained; 488 }; 489 490 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) ··· 498 u32 key; 499 spinlock_t mmap_lock; 500 struct list_head mmaps; 501 }; 502 503 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 504 { 505 return container_of(c, struct c4iw_ucontext, ibucontext); 506 } 507 508 struct c4iw_mm_entry { ··· 630 } 631 return IB_QPS_ERR; 632 } 633 634 static inline u32 c4iw_ib_to_tpt_access(int a) 635 { ··· 1015 extern int db_fc_threshold; 1016 extern int db_coalescing_threshold; 1017 extern int use_dsgl; 1018 - void c4iw_drain_rq(struct ib_qp *qp); 1019 - void c4iw_drain_sq(struct ib_qp *qp); 1020 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1021 1022 #endif
··· 45 #include <linux/kref.h> 46 #include <linux/timer.h> 47 #include <linux/io.h> 48 + #include <linux/workqueue.h> 49 50 #include <asm/byteorder.h> 51 ··· 107 struct list_head qpids; 108 struct list_head cqids; 109 struct mutex lock; 110 + struct kref kref; 111 }; 112 113 enum c4iw_rdev_flags { ··· 183 atomic_t wr_log_idx; 184 struct wr_log_entry *wr_log; 185 int wr_log_size; 186 + struct workqueue_struct *free_workq; 187 }; 188 189 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) ··· 480 wait_queue_head_t wait; 481 struct timer_list timer; 482 int sq_sig_all; 483 + struct work_struct free_work; 484 + struct c4iw_ucontext *ucontext; 485 }; 486 487 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) ··· 495 u32 key; 496 spinlock_t mmap_lock; 497 struct list_head mmaps; 498 + struct kref kref; 499 }; 500 501 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) 502 { 503 return container_of(c, struct c4iw_ucontext, ibucontext); 504 + } 505 + 506 + void _c4iw_free_ucontext(struct kref *kref); 507 + 508 + static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext) 509 + { 510 + kref_put(&ucontext->kref, _c4iw_free_ucontext); 511 + } 512 + 513 + static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext) 514 + { 515 + kref_get(&ucontext->kref); 516 } 517 518 struct c4iw_mm_entry { ··· 614 } 615 return IB_QPS_ERR; 616 } 617 + 618 + #define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN 619 620 static inline u32 c4iw_ib_to_tpt_access(int a) 621 { ··· 997 extern int db_fc_threshold; 998 extern int db_coalescing_threshold; 999 extern int use_dsgl; 1000 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); 1001 1002 #endif
+17 -16
drivers/infiniband/hw/cxgb4/provider.c
··· 93 return -ENOSYS; 94 } 95 96 - static int c4iw_dealloc_ucontext(struct ib_ucontext *context) 97 { 98 - struct c4iw_dev *rhp = to_c4iw_dev(context->device); 99 - struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); 100 struct c4iw_mm_entry *mm, *tmp; 101 102 - PDBG("%s context %p\n", __func__, context); 103 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 104 kfree(mm); 105 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); 106 kfree(ucontext); 107 return 0; 108 } 109 ··· 138 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); 139 INIT_LIST_HEAD(&context->mmaps); 140 spin_lock_init(&context->mmap_lock); 141 142 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { 143 if (!warned++) ··· 373 374 memset(props, 0, sizeof(struct ib_port_attr)); 375 props->max_mtu = IB_MTU_4096; 376 - if (netdev->mtu >= 4096) 377 - props->active_mtu = IB_MTU_4096; 378 - else if (netdev->mtu >= 2048) 379 - props->active_mtu = IB_MTU_2048; 380 - else if (netdev->mtu >= 1024) 381 - props->active_mtu = IB_MTU_1024; 382 - else if (netdev->mtu >= 512) 383 - props->active_mtu = IB_MTU_512; 384 - else 385 - props->active_mtu = IB_MTU_256; 386 387 if (!netif_carrier_ok(netdev)) 388 props->state = IB_PORT_DOWN; ··· 610 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 611 dev->ibdev.get_port_immutable = c4iw_port_immutable; 612 dev->ibdev.get_dev_fw_str = get_dev_fw_str; 613 - dev->ibdev.drain_sq = c4iw_drain_sq; 614 - dev->ibdev.drain_rq = c4iw_drain_rq; 615 616 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 617 if (!dev->ibdev.iwcm)
··· 93 return -ENOSYS; 94 } 95 96 + void _c4iw_free_ucontext(struct kref *kref) 97 { 98 + struct c4iw_ucontext *ucontext; 99 + struct c4iw_dev *rhp; 100 struct c4iw_mm_entry *mm, *tmp; 101 102 + ucontext = container_of(kref, struct c4iw_ucontext, kref); 103 + rhp = to_c4iw_dev(ucontext->ibucontext.device); 104 + 105 + PDBG("%s ucontext %p\n", __func__, ucontext); 106 list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) 107 kfree(mm); 108 c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); 109 kfree(ucontext); 110 + } 111 + 112 + static int c4iw_dealloc_ucontext(struct ib_ucontext *context) 113 + { 114 + struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); 115 + 116 + PDBG("%s context %p\n", __func__, context); 117 + c4iw_put_ucontext(ucontext); 118 return 0; 119 } 120 ··· 127 c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx); 128 INIT_LIST_HEAD(&context->mmaps); 129 spin_lock_init(&context->mmap_lock); 130 + kref_init(&context->kref); 131 132 if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) { 133 if (!warned++) ··· 361 362 memset(props, 0, sizeof(struct ib_port_attr)); 363 props->max_mtu = IB_MTU_4096; 364 + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 365 366 if (!netif_carrier_ok(netdev)) 367 props->state = IB_PORT_DOWN; ··· 607 dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; 608 dev->ibdev.get_port_immutable = c4iw_port_immutable; 609 dev->ibdev.get_dev_fw_str = get_dev_fw_str; 610 611 dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); 612 if (!dev->ibdev.iwcm)
+94 -53
drivers/infiniband/hw/cxgb4/qp.c
··· 715 return 0; 716 } 717 718 - static void _free_qp(struct kref *kref) 719 { 720 struct c4iw_qp *qhp; 721 722 qhp = container_of(kref, struct c4iw_qp, kref); 723 PDBG("%s qhp %p\n", __func__, qhp); 724 - kfree(qhp); 725 } 726 727 void c4iw_qp_add_ref(struct ib_qp *qp) ··· 752 void c4iw_qp_rem_ref(struct ib_qp *qp) 753 { 754 PDBG("%s ib_qp %p\n", __func__, qp); 755 - kref_put(&to_c4iw_qp(qp)->kref, _free_qp); 756 } 757 758 static void add_to_fc_list(struct list_head *head, struct list_head *entry) ··· 795 return 0; 796 } 797 798 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 799 struct ib_send_wr **bad_wr) 800 { ··· 871 spin_lock_irqsave(&qhp->lock, flag); 872 if (t4_wq_in_error(&qhp->wq)) { 873 spin_unlock_irqrestore(&qhp->lock, flag); 874 - *bad_wr = wr; 875 - return -EINVAL; 876 } 877 num_wrs = t4_sq_avail(&qhp->wq); 878 if (num_wrs == 0) { ··· 1014 spin_lock_irqsave(&qhp->lock, flag); 1015 if (t4_wq_in_error(&qhp->wq)) { 1016 spin_unlock_irqrestore(&qhp->lock, flag); 1017 - *bad_wr = wr; 1018 - return -EINVAL; 1019 } 1020 num_wrs = t4_rq_avail(&qhp->wq); 1021 if (num_wrs == 0) { ··· 1627 } 1628 break; 1629 case C4IW_QP_STATE_CLOSING: 1630 - if (!internal) { 1631 ret = -EINVAL; 1632 goto out; 1633 } ··· 1725 struct c4iw_dev *rhp; 1726 struct c4iw_qp *qhp; 1727 struct c4iw_qp_attributes attrs; 1728 - struct c4iw_ucontext *ucontext; 1729 1730 qhp = to_c4iw_qp(ib_qp); 1731 rhp = qhp->rhp; ··· 1743 list_del_init(&qhp->db_fc_entry); 1744 spin_unlock_irq(&rhp->lock); 1745 free_ird(rhp, qhp->attr.max_ird); 1746 - 1747 - ucontext = ib_qp->uobject ? 1748 - to_c4iw_ucontext(ib_qp->uobject->context) : NULL; 1749 - destroy_qp(&rhp->rdev, &qhp->wq, 1750 - ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 1751 1752 c4iw_qp_rem_ref(ib_qp); 1753 ··· 1839 qhp->attr.max_ird = 0; 1840 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1841 spin_lock_init(&qhp->lock); 1842 - init_completion(&qhp->sq_drained); 1843 - init_completion(&qhp->rq_drained); 1844 mutex_init(&qhp->mutex); 1845 init_waitqueue_head(&qhp->wait); 1846 kref_init(&qhp->kref); 1847 1848 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1849 if (ret) ··· 1929 ma_sync_key_mm->len = PAGE_SIZE; 1930 insert_mmap(ucontext, ma_sync_key_mm); 1931 } 1932 } 1933 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1934 init_timer(&(qhp->timer)); ··· 2035 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; 2036 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 2037 return 0; 2038 - } 2039 - 2040 - static void move_qp_to_err(struct c4iw_qp *qp) 2041 - { 2042 - struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR }; 2043 - 2044 - (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2045 - } 2046 - 2047 - void c4iw_drain_sq(struct ib_qp *ibqp) 2048 - { 2049 - struct c4iw_qp *qp = to_c4iw_qp(ibqp); 2050 - unsigned long flag; 2051 - bool need_to_wait; 2052 - 2053 - move_qp_to_err(qp); 2054 - spin_lock_irqsave(&qp->lock, flag); 2055 - need_to_wait = !t4_sq_empty(&qp->wq); 2056 - spin_unlock_irqrestore(&qp->lock, flag); 2057 - 2058 - if (need_to_wait) 2059 - wait_for_completion(&qp->sq_drained); 2060 - } 2061 - 2062 - void c4iw_drain_rq(struct ib_qp *ibqp) 2063 - { 2064 - struct c4iw_qp *qp = to_c4iw_qp(ibqp); 2065 - unsigned long flag; 2066 - bool need_to_wait; 2067 - 2068 - move_qp_to_err(qp); 2069 - spin_lock_irqsave(&qp->lock, flag); 2070 - need_to_wait = !t4_rq_empty(&qp->wq); 2071 - spin_unlock_irqrestore(&qp->lock, flag); 2072 - 2073 - if (need_to_wait) 2074 - wait_for_completion(&qp->rq_drained); 2075 }
··· 715 return 0; 716 } 717 718 + static void free_qp_work(struct work_struct *work) 719 + { 720 + struct c4iw_ucontext *ucontext; 721 + struct c4iw_qp *qhp; 722 + struct c4iw_dev *rhp; 723 + 724 + qhp = container_of(work, struct c4iw_qp, free_work); 725 + ucontext = qhp->ucontext; 726 + rhp = qhp->rhp; 727 + 728 + PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); 729 + destroy_qp(&rhp->rdev, &qhp->wq, 730 + ucontext ? &ucontext->uctx : &rhp->rdev.uctx); 731 + 732 + if (ucontext) 733 + c4iw_put_ucontext(ucontext); 734 + kfree(qhp); 735 + } 736 + 737 + static void queue_qp_free(struct kref *kref) 738 { 739 struct c4iw_qp *qhp; 740 741 qhp = container_of(kref, struct c4iw_qp, kref); 742 PDBG("%s qhp %p\n", __func__, qhp); 743 + queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); 744 } 745 746 void c4iw_qp_add_ref(struct ib_qp *qp) ··· 733 void c4iw_qp_rem_ref(struct ib_qp *qp) 734 { 735 PDBG("%s ib_qp %p\n", __func__, qp); 736 + kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); 737 } 738 739 static void add_to_fc_list(struct list_head *head, struct list_head *entry) ··· 776 return 0; 777 } 778 779 + static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) 780 + { 781 + struct t4_cqe cqe = {}; 782 + struct c4iw_cq *schp; 783 + unsigned long flag; 784 + struct t4_cq *cq; 785 + 786 + schp = to_c4iw_cq(qhp->ibqp.send_cq); 787 + cq = &schp->cq; 788 + 789 + cqe.u.drain_cookie = wr->wr_id; 790 + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | 791 + CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | 792 + CQE_TYPE_V(1) | 793 + CQE_SWCQE_V(1) | 794 + CQE_QPID_V(qhp->wq.sq.qid)); 795 + 796 + spin_lock_irqsave(&schp->lock, flag); 797 + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); 798 + cq->sw_queue[cq->sw_pidx] = cqe; 799 + t4_swcq_produce(cq); 800 + spin_unlock_irqrestore(&schp->lock, flag); 801 + 802 + spin_lock_irqsave(&schp->comp_handler_lock, flag); 803 + (*schp->ibcq.comp_handler)(&schp->ibcq, 804 + schp->ibcq.cq_context); 805 + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); 806 + } 807 + 808 + static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) 809 + { 810 + struct t4_cqe cqe = {}; 811 + struct c4iw_cq *rchp; 812 + unsigned long flag; 813 + struct t4_cq *cq; 814 + 815 + rchp = to_c4iw_cq(qhp->ibqp.recv_cq); 816 + cq = &rchp->cq; 817 + 818 + cqe.u.drain_cookie = wr->wr_id; 819 + cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | 820 + CQE_OPCODE_V(C4IW_DRAIN_OPCODE) | 821 + CQE_TYPE_V(0) | 822 + CQE_SWCQE_V(1) | 823 + CQE_QPID_V(qhp->wq.sq.qid)); 824 + 825 + spin_lock_irqsave(&rchp->lock, flag); 826 + cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); 827 + cq->sw_queue[cq->sw_pidx] = cqe; 828 + t4_swcq_produce(cq); 829 + spin_unlock_irqrestore(&rchp->lock, flag); 830 + 831 + spin_lock_irqsave(&rchp->comp_handler_lock, flag); 832 + (*rchp->ibcq.comp_handler)(&rchp->ibcq, 833 + rchp->ibcq.cq_context); 834 + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 835 + } 836 + 837 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 838 struct ib_send_wr **bad_wr) 839 { ··· 794 spin_lock_irqsave(&qhp->lock, flag); 795 if (t4_wq_in_error(&qhp->wq)) { 796 spin_unlock_irqrestore(&qhp->lock, flag); 797 + complete_sq_drain_wr(qhp, wr); 798 + return err; 799 } 800 num_wrs = t4_sq_avail(&qhp->wq); 801 if (num_wrs == 0) { ··· 937 spin_lock_irqsave(&qhp->lock, flag); 938 if (t4_wq_in_error(&qhp->wq)) { 939 spin_unlock_irqrestore(&qhp->lock, flag); 940 + complete_rq_drain_wr(qhp, wr); 941 + return err; 942 } 943 num_wrs = t4_rq_avail(&qhp->wq); 944 if (num_wrs == 0) { ··· 1550 } 1551 break; 1552 case C4IW_QP_STATE_CLOSING: 1553 + 1554 + /* 1555 + * Allow kernel users to move to ERROR for qp draining. 1556 + */ 1557 + if (!internal && (qhp->ibqp.uobject || attrs->next_state != 1558 + C4IW_QP_STATE_ERROR)) { 1559 ret = -EINVAL; 1560 goto out; 1561 } ··· 1643 struct c4iw_dev *rhp; 1644 struct c4iw_qp *qhp; 1645 struct c4iw_qp_attributes attrs; 1646 1647 qhp = to_c4iw_qp(ib_qp); 1648 rhp = qhp->rhp; ··· 1662 list_del_init(&qhp->db_fc_entry); 1663 spin_unlock_irq(&rhp->lock); 1664 free_ird(rhp, qhp->attr.max_ird); 1665 1666 c4iw_qp_rem_ref(ib_qp); 1667 ··· 1763 qhp->attr.max_ird = 0; 1764 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1765 spin_lock_init(&qhp->lock); 1766 mutex_init(&qhp->mutex); 1767 init_waitqueue_head(&qhp->wait); 1768 kref_init(&qhp->kref); 1769 + INIT_WORK(&qhp->free_work, free_qp_work); 1770 1771 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1772 if (ret) ··· 1854 ma_sync_key_mm->len = PAGE_SIZE; 1855 insert_mmap(ucontext, ma_sync_key_mm); 1856 } 1857 + 1858 + c4iw_get_ucontext(ucontext); 1859 + qhp->ucontext = ucontext; 1860 } 1861 qhp->ibqp.qp_num = qhp->wq.sq.qid; 1862 init_timer(&(qhp->timer)); ··· 1957 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; 1958 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; 1959 return 0; 1960 }
+2
drivers/infiniband/hw/cxgb4/t4.h
··· 179 __be32 wrid_hi; 180 __be32 wrid_low; 181 } gen; 182 } u; 183 __be64 reserved; 184 __be64 bits_type_ts; ··· 239 /* generic accessor macros */ 240 #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) 241 #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 242 243 /* macros for flit 3 of the cqe */ 244 #define CQE_GENBIT_S 63
··· 179 __be32 wrid_hi; 180 __be32 wrid_low; 181 } gen; 182 + u64 drain_cookie; 183 } u; 184 __be64 reserved; 185 __be64 bits_type_ts; ··· 238 /* generic accessor macros */ 239 #define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) 240 #define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) 241 + #define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie) 242 243 /* macros for flit 3 of the cqe */ 244 #define CQE_GENBIT_S 63
+1 -10
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 100 memset(props, 0, sizeof(*props)); 101 102 props->max_mtu = IB_MTU_4096; 103 - if (netdev->mtu >= 4096) 104 - props->active_mtu = IB_MTU_4096; 105 - else if (netdev->mtu >= 2048) 106 - props->active_mtu = IB_MTU_2048; 107 - else if (netdev->mtu >= 1024) 108 - props->active_mtu = IB_MTU_1024; 109 - else if (netdev->mtu >= 512) 110 - props->active_mtu = IB_MTU_512; 111 - else 112 - props->active_mtu = IB_MTU_256; 113 114 props->lid = 1; 115 if (netif_carrier_ok(iwdev->netdev))
··· 100 memset(props, 0, sizeof(*props)); 101 102 props->max_mtu = IB_MTU_4096; 103 + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 104 105 props->lid = 1; 106 if (netif_carrier_ok(iwdev->netdev))
+1 -11
drivers/infiniband/hw/nes/nes_verbs.c
··· 478 memset(props, 0, sizeof(*props)); 479 480 props->max_mtu = IB_MTU_4096; 481 - 482 - if (netdev->mtu >= 4096) 483 - props->active_mtu = IB_MTU_4096; 484 - else if (netdev->mtu >= 2048) 485 - props->active_mtu = IB_MTU_2048; 486 - else if (netdev->mtu >= 1024) 487 - props->active_mtu = IB_MTU_1024; 488 - else if (netdev->mtu >= 512) 489 - props->active_mtu = IB_MTU_512; 490 - else 491 - props->active_mtu = IB_MTU_256; 492 493 props->lid = 1; 494 props->lmc = 0;
··· 478 memset(props, 0, sizeof(*props)); 479 480 props->max_mtu = IB_MTU_4096; 481 + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); 482 483 props->lid = 1; 484 props->lmc = 0;
+15 -8
drivers/infiniband/hw/qedr/main.c
··· 576 return 0; 577 } 578 579 - void qedr_unaffiliated_event(void *context, 580 - u8 event_code) 581 { 582 pr_err("unaffiliated event not implemented yet\n"); 583 } ··· 791 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 792 goto sysfs_err; 793 794 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 795 return dev; 796 ··· 826 ib_dealloc_device(&dev->ibdev); 827 } 828 829 - static int qedr_close(struct qedr_dev *dev) 830 { 831 - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); 832 - 833 - return 0; 834 } 835 836 static void qedr_shutdown(struct qedr_dev *dev) 837 { 838 qedr_close(dev); 839 qedr_remove(dev); 840 } 841 842 static void qedr_mac_address_change(struct qedr_dev *dev) ··· 870 871 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 872 873 - qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE); 874 875 if (rc) 876 DP_ERR(dev, "Error updating mac filter\n"); ··· 884 { 885 switch (event) { 886 case QEDE_UP: 887 - qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); 888 break; 889 case QEDE_DOWN: 890 qedr_close(dev);
··· 576 return 0; 577 } 578 579 + void qedr_unaffiliated_event(void *context, u8 event_code) 580 { 581 pr_err("unaffiliated event not implemented yet\n"); 582 } ··· 792 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i])) 793 goto sysfs_err; 794 795 + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) 796 + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); 797 + 798 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n"); 799 return dev; 800 ··· 824 ib_dealloc_device(&dev->ibdev); 825 } 826 827 + static void qedr_close(struct qedr_dev *dev) 828 { 829 + if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) 830 + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR); 831 } 832 833 static void qedr_shutdown(struct qedr_dev *dev) 834 { 835 qedr_close(dev); 836 qedr_remove(dev); 837 + } 838 + 839 + static void qedr_open(struct qedr_dev *dev) 840 + { 841 + if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state)) 842 + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE); 843 } 844 845 static void qedr_mac_address_change(struct qedr_dev *dev) ··· 863 864 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); 865 866 + qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE); 867 868 if (rc) 869 DP_ERR(dev, "Error updating mac filter\n"); ··· 877 { 878 switch (event) { 879 case QEDE_UP: 880 + qedr_open(dev); 881 break; 882 case QEDE_DOWN: 883 qedr_close(dev);
+5 -3
drivers/infiniband/hw/qedr/qedr.h
··· 113 struct qed_rdma_events events; 114 }; 115 116 struct qedr_dev { 117 struct ib_device ibdev; 118 struct qed_dev *cdev; ··· 155 struct qedr_cq *gsi_sqcq; 156 struct qedr_cq *gsi_rqcq; 157 struct qedr_qp *gsi_qp; 158 }; 159 160 #define QEDR_MAX_SQ_PBL (0x8000) ··· 192 #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 193 194 #define QEDR_MAX_PORT (1) 195 196 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 197 ··· 255 u32 sig; 256 257 u16 icid; 258 - 259 - /* Lock to protect completion handler */ 260 - spinlock_t comp_handler_lock; 261 262 /* Lock to protect multiplem CQ's */ 263 spinlock_t cq_lock;
··· 113 struct qed_rdma_events events; 114 }; 115 116 + #define QEDR_ENET_STATE_BIT (0) 117 + 118 struct qedr_dev { 119 struct ib_device ibdev; 120 struct qed_dev *cdev; ··· 153 struct qedr_cq *gsi_sqcq; 154 struct qedr_cq *gsi_rqcq; 155 struct qedr_qp *gsi_qp; 156 + 157 + unsigned long enet_state; 158 }; 159 160 #define QEDR_MAX_SQ_PBL (0x8000) ··· 188 #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000) 189 190 #define QEDR_MAX_PORT (1) 191 + #define QEDR_PORT (1) 192 193 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 194 ··· 250 u32 sig; 251 252 u16 icid; 253 254 /* Lock to protect multiplem CQ's */ 255 spinlock_t cq_lock;
+4 -10
drivers/infiniband/hw/qedr/qedr_cm.c
··· 87 qedr_inc_sw_gsi_cons(&qp->sq); 88 spin_unlock_irqrestore(&qp->q_lock, flags); 89 90 - if (cq->ibcq.comp_handler) { 91 - spin_lock_irqsave(&cq->comp_handler_lock, flags); 92 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 93 - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 94 - } 95 } 96 97 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, ··· 110 111 spin_unlock_irqrestore(&qp->q_lock, flags); 112 113 - if (cq->ibcq.comp_handler) { 114 - spin_lock_irqsave(&cq->comp_handler_lock, flags); 115 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 116 - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 117 - } 118 } 119 120 static void qedr_destroy_gsi_cq(struct qedr_dev *dev, ··· 398 } 399 400 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) 401 - packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; 402 - else 403 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; 404 405 packet->roce_mode = roce_mode; 406 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
··· 87 qedr_inc_sw_gsi_cons(&qp->sq); 88 spin_unlock_irqrestore(&qp->q_lock, flags); 89 90 + if (cq->ibcq.comp_handler) 91 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 92 } 93 94 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt, ··· 113 114 spin_unlock_irqrestore(&qp->q_lock, flags); 115 116 + if (cq->ibcq.comp_handler) 117 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 118 } 119 120 static void qedr_destroy_gsi_cq(struct qedr_dev *dev, ··· 404 } 405 406 if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h)) 407 packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB; 408 + else 409 + packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW; 410 411 packet->roce_mode = roce_mode; 412 memcpy(packet->header.vaddr, ud_header_buffer, header_size);
+41 -21
drivers/infiniband/hw/qedr/verbs.c
··· 471 struct ib_ucontext *context, struct ib_udata *udata) 472 { 473 struct qedr_dev *dev = get_qedr_dev(ibdev); 474 - struct qedr_ucontext *uctx = NULL; 475 - struct qedr_alloc_pd_uresp uresp; 476 struct qedr_pd *pd; 477 u16 pd_id; 478 int rc; ··· 487 if (!pd) 488 return ERR_PTR(-ENOMEM); 489 490 - dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); 491 492 - uresp.pd_id = pd_id; 493 pd->pd_id = pd_id; 494 495 if (udata && context) { 496 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 497 - if (rc) 498 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 499 - uctx = get_qedr_ucontext(context); 500 - uctx->pd = pd; 501 - pd->uctx = uctx; 502 } 503 504 return &pd->ibpd; 505 } 506 507 int qedr_dealloc_pd(struct ib_pd *ibpd) ··· 1610 return ERR_PTR(-EFAULT); 1611 } 1612 1613 - enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1614 { 1615 switch (qp_state) { 1616 case QED_ROCE_QP_STATE_RESET: ··· 1631 return IB_QPS_ERR; 1632 } 1633 1634 - enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) 1635 { 1636 switch (qp_state) { 1637 case IB_QPS_RESET: ··· 1668 int status = 0; 1669 1670 if (new_state == qp->state) 1671 - return 1; 1672 1673 switch (qp->state) { 1674 case QED_ROCE_QP_STATE_RESET: ··· 1744 /* ERR->XXX */ 1745 switch (new_state) { 1746 case QED_ROCE_QP_STATE_RESET: 1747 break; 1748 default: 1749 status = -EINVAL; ··· 1884 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1885 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1886 qp_params.remote_mac_addr); 1887 - ; 1888 1889 qp_params.mtu = qp->mtu; 1890 qp_params.lb_indication = false; ··· 2034 2035 qp_attr->qp_state = qedr_get_ibqp_state(params.state); 2036 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 2037 - qp_attr->path_mtu = iboe_get_mtu(params.mtu); 2038 qp_attr->path_mig_state = IB_MIG_MIGRATED; 2039 qp_attr->rq_psn = params.rq_psn; 2040 qp_attr->sq_psn = params.sq_psn; ··· 2046 qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2047 qp_attr->cap.max_send_sge = qp->sq.max_sges; 2048 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2049 - qp_attr->cap.max_inline_data = qp->max_inline_data; 2050 qp_init_attr->cap = qp_attr->cap; 2051 2052 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], ··· 2320 return rc; 2321 } 2322 2323 - struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len) 2324 { 2325 struct qedr_pd *pd = get_qedr_pd(ibpd); 2326 struct qedr_dev *dev = get_qedr_dev(ibpd->device); ··· 2723 return 0; 2724 } 2725 2726 - enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) 2727 { 2728 switch (opcode) { 2729 case IB_WR_RDMA_WRITE: ··· 2748 } 2749 } 2750 2751 - inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) 2752 { 2753 int wq_is_full, err_wr, pbl_is_full; 2754 struct qedr_dev *dev = qp->dev; ··· 2785 return true; 2786 } 2787 2788 - int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2789 struct ib_send_wr **bad_wr) 2790 { 2791 struct qedr_dev *dev = get_qedr_dev(ibqp->device); ··· 3253 IB_WC_SUCCESS, 0); 3254 break; 3255 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: 3256 - DP_ERR(dev, 3257 - "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", 3258 - cq->icid, qp->icid); 3259 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, 3260 IB_WC_WR_FLUSH_ERR, 1); 3261 break;
··· 471 struct ib_ucontext *context, struct ib_udata *udata) 472 { 473 struct qedr_dev *dev = get_qedr_dev(ibdev); 474 struct qedr_pd *pd; 475 u16 pd_id; 476 int rc; ··· 489 if (!pd) 490 return ERR_PTR(-ENOMEM); 491 492 + rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id); 493 + if (rc) 494 + goto err; 495 496 pd->pd_id = pd_id; 497 498 if (udata && context) { 499 + struct qedr_alloc_pd_uresp uresp; 500 + 501 + uresp.pd_id = pd_id; 502 + 503 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 504 + if (rc) { 505 DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); 506 + dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); 507 + goto err; 508 + } 509 + 510 + pd->uctx = get_qedr_ucontext(context); 511 + pd->uctx->pd = pd; 512 } 513 514 return &pd->ibpd; 515 + 516 + err: 517 + kfree(pd); 518 + return ERR_PTR(rc); 519 } 520 521 int qedr_dealloc_pd(struct ib_pd *ibpd) ··· 1600 return ERR_PTR(-EFAULT); 1601 } 1602 1603 + static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1604 { 1605 switch (qp_state) { 1606 case QED_ROCE_QP_STATE_RESET: ··· 1621 return IB_QPS_ERR; 1622 } 1623 1624 + static enum qed_roce_qp_state qedr_get_state_from_ibqp( 1625 + enum ib_qp_state qp_state) 1626 { 1627 switch (qp_state) { 1628 case IB_QPS_RESET: ··· 1657 int status = 0; 1658 1659 if (new_state == qp->state) 1660 + return 0; 1661 1662 switch (qp->state) { 1663 case QED_ROCE_QP_STATE_RESET: ··· 1733 /* ERR->XXX */ 1734 switch (new_state) { 1735 case QED_ROCE_QP_STATE_RESET: 1736 + if ((qp->rq.prod != qp->rq.cons) || 1737 + (qp->sq.prod != qp->sq.cons)) { 1738 + DP_NOTICE(dev, 1739 + "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n", 1740 + qp->rq.prod, qp->rq.cons, qp->sq.prod, 1741 + qp->sq.cons); 1742 + status = -EINVAL; 1743 + } 1744 break; 1745 default: 1746 status = -EINVAL; ··· 1865 qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1866 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1867 qp_params.remote_mac_addr); 1868 1869 qp_params.mtu = qp->mtu; 1870 qp_params.lb_indication = false; ··· 2016 2017 qp_attr->qp_state = qedr_get_ibqp_state(params.state); 2018 qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 2019 + qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu); 2020 qp_attr->path_mig_state = IB_MIG_MIGRATED; 2021 qp_attr->rq_psn = params.rq_psn; 2022 qp_attr->sq_psn = params.sq_psn; ··· 2028 qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2029 qp_attr->cap.max_send_sge = qp->sq.max_sges; 2030 qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2031 + qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE; 2032 qp_init_attr->cap = qp_attr->cap; 2033 2034 memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], ··· 2302 return rc; 2303 } 2304 2305 + static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, 2306 + int max_page_list_len) 2307 { 2308 struct qedr_pd *pd = get_qedr_pd(ibpd); 2309 struct qedr_dev *dev = get_qedr_dev(ibpd->device); ··· 2704 return 0; 2705 } 2706 2707 + static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode) 2708 { 2709 switch (opcode) { 2710 case IB_WR_RDMA_WRITE: ··· 2729 } 2730 } 2731 2732 + static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr) 2733 { 2734 int wq_is_full, err_wr, pbl_is_full; 2735 struct qedr_dev *dev = qp->dev; ··· 2766 return true; 2767 } 2768 2769 + static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 2770 struct ib_send_wr **bad_wr) 2771 { 2772 struct qedr_dev *dev = get_qedr_dev(ibqp->device); ··· 3234 IB_WC_SUCCESS, 0); 3235 break; 3236 case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR: 3237 + if (qp->state != QED_ROCE_QP_STATE_ERR) 3238 + DP_ERR(dev, 3239 + "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n", 3240 + cq->icid, qp->icid); 3241 cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons, 3242 IB_WC_WR_FLUSH_ERR, 1); 3243 break;
+1 -3
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
··· 1029 if (ret) { 1030 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 1031 ret = -ENOMEM; 1032 - goto err_netdevice; 1033 } 1034 1035 /* Allocate UAR table. */ ··· 1092 err_free_intrs: 1093 pvrdma_free_irq(dev); 1094 pvrdma_disable_msi_all(dev); 1095 - err_netdevice: 1096 - unregister_netdevice_notifier(&dev->nb_netdev); 1097 err_free_cq_ring: 1098 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1099 err_free_async_ring:
··· 1029 if (ret) { 1030 dev_err(&pdev->dev, "failed to allocate interrupts\n"); 1031 ret = -ENOMEM; 1032 + goto err_free_cq_ring; 1033 } 1034 1035 /* Allocate UAR table. */ ··· 1092 err_free_intrs: 1093 pvrdma_free_irq(dev); 1094 pvrdma_disable_msi_all(dev); 1095 err_free_cq_ring: 1096 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); 1097 err_free_async_ring:
+1 -1
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
··· 306 union pvrdma_cmd_resp rsp; 307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc; 308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; 309 - struct pvrdma_alloc_ucontext_resp uresp; 310 int ret; 311 void *ptr; 312
··· 306 union pvrdma_cmd_resp rsp; 307 struct pvrdma_cmd_create_uc *cmd = &req.create_uc; 308 struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp; 309 + struct pvrdma_alloc_ucontext_resp uresp = {0}; 310 int ret; 311 void *ptr; 312
+1 -1
drivers/infiniband/sw/rxe/rxe_net.c
··· 555 } 556 557 spin_lock_bh(&dev_list_lock); 558 - list_add_tail(&rxe_dev_list, &rxe->list); 559 spin_unlock_bh(&dev_list_lock); 560 return rxe; 561 }
··· 555 } 556 557 spin_lock_bh(&dev_list_lock); 558 + list_add_tail(&rxe->list, &rxe_dev_list); 559 spin_unlock_bh(&dev_list_lock); 560 return rxe; 561 }
+1 -2
drivers/infiniband/sw/rxe/rxe_qp.c
··· 813 del_timer_sync(&qp->rnr_nak_timer); 814 815 rxe_cleanup_task(&qp->req.task); 816 - if (qp_type(qp) == IB_QPT_RC) 817 - rxe_cleanup_task(&qp->comp.task); 818 819 /* flush out any receive wr's or pending requests */ 820 __rxe_do_task(&qp->req.task);
··· 813 del_timer_sync(&qp->rnr_nak_timer); 814 815 rxe_cleanup_task(&qp->req.task); 816 + rxe_cleanup_task(&qp->comp.task); 817 818 /* flush out any receive wr's or pending requests */ 819 __rxe_do_task(&qp->req.task);
+4 -7
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 651 SHOST_DIX_GUARD_CRC); 652 } 653 654 - /* 655 - * Limit the sg_tablesize and max_sectors based on the device 656 - * max fastreg page list length. 657 - */ 658 - shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize, 659 - ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len); 660 - 661 if (iscsi_host_add(shost, 662 ib_conn->device->ib_device->dma_device)) { 663 mutex_unlock(&iser_conn->state_mutex); ··· 671 */ 672 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; 673 shost->max_sectors = min(iser_max_sectors, max_fr_sectors); 674 675 if (cmds_max > max_cmds) { 676 iser_info("cmds_max changed from %u to %u\n",
··· 651 SHOST_DIX_GUARD_CRC); 652 } 653 654 if (iscsi_host_add(shost, 655 ib_conn->device->ib_device->dma_device)) { 656 mutex_unlock(&iser_conn->state_mutex); ··· 678 */ 679 max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9; 680 shost->max_sectors = min(iser_max_sectors, max_fr_sectors); 681 + 682 + iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", 683 + iser_conn, shost->sg_tablesize, 684 + shost->max_sectors); 685 686 if (cmds_max > max_cmds) { 687 iser_info("cmds_max changed from %u to %u\n",
-2
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 496 * @rx_descs: rx buffers array (cyclic buffer) 497 * @num_rx_descs: number of rx descriptors 498 * @scsi_sg_tablesize: scsi host sg_tablesize 499 - * @scsi_max_sectors: scsi host max sectors 500 */ 501 struct iser_conn { 502 struct ib_conn ib_conn; ··· 518 struct iser_rx_desc *rx_descs; 519 u32 num_rx_descs; 520 unsigned short scsi_sg_tablesize; 521 - unsigned int scsi_max_sectors; 522 bool snd_w_inv; 523 }; 524
··· 496 * @rx_descs: rx buffers array (cyclic buffer) 497 * @num_rx_descs: number of rx descriptors 498 * @scsi_sg_tablesize: scsi host sg_tablesize 499 */ 500 struct iser_conn { 501 struct ib_conn ib_conn; ··· 519 struct iser_rx_desc *rx_descs; 520 u32 num_rx_descs; 521 unsigned short scsi_sg_tablesize; 522 bool snd_w_inv; 523 }; 524
+1 -12
drivers/infiniband/ulp/iser/iser_verbs.c
··· 707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 708 device->ib_device->attrs.max_fast_reg_page_list_len); 709 710 - if (sg_tablesize > sup_sg_tablesize) { 711 - sg_tablesize = sup_sg_tablesize; 712 - iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512; 713 - } else { 714 - iser_conn->scsi_max_sectors = max_sectors; 715 - } 716 - 717 - iser_conn->scsi_sg_tablesize = sg_tablesize; 718 - 719 - iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n", 720 - iser_conn, iser_conn->scsi_sg_tablesize, 721 - iser_conn->scsi_max_sectors); 722 } 723 724 /**
··· 707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, 708 device->ib_device->attrs.max_fast_reg_page_list_len); 709 710 + iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); 711 } 712 713 /**
+13 -2
drivers/infiniband/ulp/srp/ib_srp.c
··· 371 struct srp_fr_desc *d; 372 struct ib_mr *mr; 373 int i, ret = -EINVAL; 374 375 if (pool_size <= 0) 376 goto err; ··· 385 spin_lock_init(&pool->lock); 386 INIT_LIST_HEAD(&pool->free_list); 387 388 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 389 - mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 390 - max_page_list_len); 391 if (IS_ERR(mr)) { 392 ret = PTR_ERR(mr); 393 if (ret == -ENOMEM) ··· 3697 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3698 cmd_sg_entries); 3699 indirect_sg_entries = cmd_sg_entries; 3700 } 3701 3702 srp_remove_wq = create_workqueue("srp_remove");
··· 371 struct srp_fr_desc *d; 372 struct ib_mr *mr; 373 int i, ret = -EINVAL; 374 + enum ib_mr_type mr_type; 375 376 if (pool_size <= 0) 377 goto err; ··· 384 spin_lock_init(&pool->lock); 385 INIT_LIST_HEAD(&pool->free_list); 386 387 + if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 388 + mr_type = IB_MR_TYPE_SG_GAPS; 389 + else 390 + mr_type = IB_MR_TYPE_MEM_REG; 391 + 392 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { 393 + mr = ib_alloc_mr(pd, mr_type, max_page_list_len); 394 if (IS_ERR(mr)) { 395 ret = PTR_ERR(mr); 396 if (ret == -ENOMEM) ··· 3692 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", 3693 cmd_sg_entries); 3694 indirect_sg_entries = cmd_sg_entries; 3695 + } 3696 + 3697 + if (indirect_sg_entries > SG_MAX_SEGMENTS) { 3698 + pr_warn("Clamping indirect_sg_entries to %u\n", 3699 + SG_MAX_SEGMENTS); 3700 + indirect_sg_entries = SG_MAX_SEGMENTS; 3701 } 3702 3703 srp_remove_wq = create_workqueue("srp_remove");
+14
include/rdma/ib_verbs.h
··· 352 } 353 } 354 355 enum ib_port_state { 356 IB_PORT_NOP = 0, 357 IB_PORT_DOWN = 1,
··· 352 } 353 } 354 355 + static inline enum ib_mtu ib_mtu_int_to_enum(int mtu) 356 + { 357 + if (mtu >= 4096) 358 + return IB_MTU_4096; 359 + else if (mtu >= 2048) 360 + return IB_MTU_2048; 361 + else if (mtu >= 1024) 362 + return IB_MTU_1024; 363 + else if (mtu >= 512) 364 + return IB_MTU_512; 365 + else 366 + return IB_MTU_256; 367 + } 368 + 369 enum ib_port_state { 370 IB_PORT_NOP = 0, 371 IB_PORT_DOWN = 1,
+1
include/uapi/rdma/Kbuild
··· 16 header-y += ocrdma-abi.h 17 header-y += hns-abi.h 18 header-y += vmw_pvrdma-abi.h
··· 16 header-y += ocrdma-abi.h 17 header-y += hns-abi.h 18 header-y += vmw_pvrdma-abi.h 19 + header-y += qedr-abi.h
+1 -1
include/uapi/rdma/cxgb3-abi.h
··· 30 * SOFTWARE. 31 */ 32 #ifndef CXGB3_ABI_USER_H 33 - #define CXBG3_ABI_USER_H 34 35 #include <linux/types.h> 36
··· 30 * SOFTWARE. 31 */ 32 #ifndef CXGB3_ABI_USER_H 33 + #define CXGB3_ABI_USER_H 34 35 #include <linux/types.h> 36