Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Correct debugging output when path record lookup fails
RDMA/cxgb3: Stop the EP Timer on BAD CLOSE
RDMA/cxgb3: cleanups
RDMA/cma: Remove unused node_guid from cma_device structure
IB/cm: Remove ca_guid from cm_device structure
RDMA/cma: Request reversible paths only
IB/core: Set hop limit in ib_init_ah_from_wc correctly
IB/uverbs: Return correct error for invalid PD in register MR
IPoIB: Remove unused local_rate tracking
IPoIB/cm: Improve small message bandwidth
IB/mthca: Make 2 functions static

+76 -102
+4 -6
drivers/infiniband/core/cm.c
··· 88 struct cm_device { 89 struct list_head list; 90 struct ib_device *device; 91 - __be64 ca_guid; 92 struct cm_port port[0]; 93 }; 94 ··· 738 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 739 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 740 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 741 - &cm_id_priv->av.port->cm_dev->ca_guid, 742 - sizeof cm_id_priv->av.port->cm_dev->ca_guid, 743 NULL, 0); 744 break; 745 case IB_CM_REQ_RCVD: ··· 882 883 req_msg->local_comm_id = cm_id_priv->id.local_id; 884 req_msg->service_id = param->service_id; 885 - req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 886 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 887 cm_req_set_resp_res(req_msg, param->responder_resources); 888 cm_req_set_init_depth(req_msg, param->initiator_depth); ··· 1441 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1442 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1443 cm_rep_set_srq(rep_msg, param->srq); 1444 - rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1445 1446 if (param->private_data && param->private_data_len) 1447 memcpy(rep_msg->private_data, param->private_data, ··· 3384 return; 3385 3386 cm_dev->device = device; 3387 - cm_dev->ca_guid = device->node_guid; 3388 3389 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3390 for (i = 1; i <= device->phys_port_cnt; i++) {
··· 88 struct cm_device { 89 struct list_head list; 90 struct ib_device *device; 91 struct cm_port port[0]; 92 }; 93 ··· 739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 740 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 741 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 742 + &cm_id_priv->id.device->node_guid, 743 + sizeof cm_id_priv->id.device->node_guid, 744 NULL, 0); 745 break; 746 case IB_CM_REQ_RCVD: ··· 883 884 req_msg->local_comm_id = cm_id_priv->id.local_id; 885 req_msg->service_id = param->service_id; 886 + req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 887 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 888 cm_req_set_resp_res(req_msg, param->responder_resources); 889 cm_req_set_init_depth(req_msg, param->initiator_depth); ··· 1442 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1443 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1444 cm_rep_set_srq(rep_msg, param->srq); 1445 + rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1446 1447 if (param->private_data && param->private_data_len) 1448 memcpy(rep_msg->private_data, param->private_data, ··· 3385 return; 3386 3387 cm_dev->device = device; 3388 3389 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3390 for (i = 1; i <= device->phys_port_cnt; i++) {
+3 -3
drivers/infiniband/core/cma.c
··· 77 struct cma_device { 78 struct list_head list; 79 struct ib_device *device; 80 - __be64 node_guid; 81 struct completion comp; 82 atomic_t refcount; 83 struct list_head id_list; ··· 1491 ib_addr_get_dgid(addr, &path_rec.dgid); 1492 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1493 path_rec.numb_path = 1; 1494 1495 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1496 id_priv->id.port_num, &path_rec, 1497 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1498 - IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, 1499 timeout_ms, GFP_KERNEL, 1500 cma_query_handler, work, &id_priv->query); 1501 ··· 2673 return; 2674 2675 cma_dev->device = device; 2676 - cma_dev->node_guid = device->node_guid; 2677 2678 init_completion(&cma_dev->comp); 2679 atomic_set(&cma_dev->refcount, 1);
··· 77 struct cma_device { 78 struct list_head list; 79 struct ib_device *device; 80 struct completion comp; 81 atomic_t refcount; 82 struct list_head id_list; ··· 1492 ib_addr_get_dgid(addr, &path_rec.dgid); 1493 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1494 path_rec.numb_path = 1; 1495 + path_rec.reversible = 1; 1496 1497 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1498 id_priv->id.port_num, &path_rec, 1499 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1500 + IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1501 + IB_SA_PATH_REC_REVERSIBLE, 1502 timeout_ms, GFP_KERNEL, 1503 cma_query_handler, work, &id_priv->query); 1504 ··· 2672 return; 2673 2674 cma_dev->device = device; 2675 2676 init_completion(&cma_dev->comp); 2677 atomic_set(&cma_dev->refcount, 1);
+3 -1
drivers/infiniband/core/uverbs_cmd.c
··· 622 obj->umem.virt_base = cmd.hca_va; 623 624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 625 - if (!pd) 626 goto err_release; 627 628 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 629 if (IS_ERR(mr)) {
··· 622 obj->umem.virt_base = cmd.hca_va; 623 624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 625 + if (!pd) { 626 + ret = -EINVAL; 627 goto err_release; 628 + } 629 630 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 631 if (IS_ERR(mr)) {
+1 -1
drivers/infiniband/core/verbs.c
··· 167 ah_attr->grh.sgid_index = (u8) gid_index; 168 flow_class = be32_to_cpu(grh->version_tclass_flow); 169 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 170 - ah_attr->grh.hop_limit = grh->hop_limit; 171 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 172 } 173 return 0;
··· 167 ah_attr->grh.sgid_index = (u8) gid_index; 168 flow_class = be32_to_cpu(grh->version_tclass_flow); 169 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 170 + ah_attr->grh.hop_limit = 0xFF; 171 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 172 } 173 return 0;
-1
drivers/infiniband/hw/cxgb3/Makefile
··· 8 9 ifdef CONFIG_INFINIBAND_CXGB3_DEBUG 10 EXTRA_CFLAGS += -DDEBUG 11 - iw_cxgb3-y += cxio_dbg.o 12 endif
··· 8 9 ifdef CONFIG_INFINIBAND_CXGB3_DEBUG 10 EXTRA_CFLAGS += -DDEBUG 11 endif
+9 -22
drivers/infiniband/hw/cxgb3/cxio_hal.c
··· 45 static LIST_HEAD(rdev_list); 46 static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; 47 48 - static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) 49 { 50 struct cxio_rdev *rdev; 51 ··· 55 return NULL; 56 } 57 58 - static inline struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev 59 - *tdev) 60 { 61 struct cxio_rdev *rdev; 62 ··· 117 return 0; 118 } 119 120 - static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) 121 { 122 struct rdma_cq_setup setup; 123 setup.id = cqid; ··· 129 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 130 } 131 132 - int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) 133 { 134 u64 sge_cmd; 135 struct t3_modify_qp_wr *wqe; ··· 424 } 425 } 426 427 - static inline int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) 428 { 429 if (CQE_OPCODE(*cqe) == T3_TERMINATE) 430 return 0; ··· 759 return err; 760 } 761 762 - /* IN : stag key, pdid, pbl_size 763 - * Out: stag index, actaul pbl_size, and pbl_addr allocated. 764 - */ 765 - int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid, 766 - enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr) 767 - { 768 - *stag = T3_STAG_UNSET; 769 - return (__cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR, 770 - perm, 0, 0ULL, 0, 0, NULL, pbl_size, pbl_addr)); 771 - } 772 - 773 int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 774 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 775 u8 page_size, __be64 *pbl, u32 *pbl_size, ··· 1017 cxio_hal_destroy_rhdl_resource(); 1018 } 1019 1020 - static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) 1021 { 1022 struct t3_swsq *sqp; 1023 __u32 ptr = wq->sq_rptr; ··· 1046 break; 1047 } 1048 1049 - static inline void create_read_req_cqe(struct t3_wq *wq, 1050 - struct t3_cqe *hw_cqe, 1051 - struct t3_cqe *read_cqe) 1052 { 1053 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; 1054 read_cqe->len = wq->oldest_read->read_len; ··· 1060 /* 1061 * Return a ptr to the next read wr in the SWSQ or NULL. 1062 */ 1063 - static inline void advance_oldest_read(struct t3_wq *wq) 1064 { 1065 1066 u32 rptr = wq->oldest_read - wq->sq + 1;
··· 45 static LIST_HEAD(rdev_list); 46 static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; 47 48 + static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) 49 { 50 struct cxio_rdev *rdev; 51 ··· 55 return NULL; 56 } 57 58 + static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev) 59 { 60 struct cxio_rdev *rdev; 61 ··· 118 return 0; 119 } 120 121 + static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) 122 { 123 struct rdma_cq_setup setup; 124 setup.id = cqid; ··· 130 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 131 } 132 133 + static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) 134 { 135 u64 sge_cmd; 136 struct t3_modify_qp_wr *wqe; ··· 425 } 426 } 427 428 + static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) 429 { 430 if (CQE_OPCODE(*cqe) == T3_TERMINATE) 431 return 0; ··· 760 return err; 761 } 762 763 int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 764 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 765 u8 page_size, __be64 *pbl, u32 *pbl_size, ··· 1029 cxio_hal_destroy_rhdl_resource(); 1030 } 1031 1032 + static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) 1033 { 1034 struct t3_swsq *sqp; 1035 __u32 ptr = wq->sq_rptr; ··· 1058 break; 1059 } 1060 1061 + static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, 1062 + struct t3_cqe *read_cqe) 1063 { 1064 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; 1065 read_cqe->len = wq->oldest_read->read_len; ··· 1073 /* 1074 * Return a ptr to the next read wr in the SWSQ or NULL. 1075 */ 1076 + static void advance_oldest_read(struct t3_wq *wq) 1077 { 1078 1079 u32 rptr = wq->oldest_read - wq->sq + 1;
-5
drivers/infiniband/hw/cxgb3/cxio_hal.h
··· 143 void cxio_rdev_close(struct cxio_rdev *rdev); 144 int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 145 enum t3_cq_opcode op, u32 credit); 146 - int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev, u32 qpid); 147 int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 148 int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 149 int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); ··· 153 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 154 struct cxio_ucontext *uctx); 155 int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 156 - int cxio_allocate_stag(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 157 - enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr); 158 int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 159 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 160 u8 page_size, __be64 *pbl, u32 *pbl_size, ··· 168 int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr); 169 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 170 void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 171 - u32 cxio_hal_get_rhdl(void); 172 - void cxio_hal_put_rhdl(u32 rhdl); 173 u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp); 174 void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); 175 int __init cxio_hal_init(void);
··· 143 void cxio_rdev_close(struct cxio_rdev *rdev); 144 int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 145 enum t3_cq_opcode op, u32 credit); 146 int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 147 int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 148 int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); ··· 154 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 155 struct cxio_ucontext *uctx); 156 int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 157 int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 158 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 159 u8 page_size, __be64 *pbl, u32 *pbl_size, ··· 171 int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr); 172 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 173 void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 174 u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp); 175 void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); 176 int __init cxio_hal_init(void);
+2 -12
drivers/infiniband/hw/cxgb3/cxio_resource.c
··· 179 /* 180 * returns 0 if no resource available 181 */ 182 - static inline u32 cxio_hal_get_resource(struct kfifo *fifo) 183 { 184 u32 entry; 185 if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) ··· 188 return 0; /* fifo emptry */ 189 } 190 191 - static inline void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) 192 { 193 BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); 194 - } 195 - 196 - u32 cxio_hal_get_rhdl(void) 197 - { 198 - return cxio_hal_get_resource(rhdl_fifo); 199 - } 200 - 201 - void cxio_hal_put_rhdl(u32 rhdl) 202 - { 203 - cxio_hal_put_resource(rhdl_fifo, rhdl); 204 } 205 206 u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
··· 179 /* 180 * returns 0 if no resource available 181 */ 182 + static u32 cxio_hal_get_resource(struct kfifo *fifo) 183 { 184 u32 entry; 185 if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) ··· 188 return 0; /* fifo emptry */ 189 } 190 191 + static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) 192 { 193 BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); 194 } 195 196 u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
+3 -3
drivers/infiniband/hw/cxgb3/iwch_cm.c
··· 209 return state; 210 } 211 212 - static inline void __state_set(struct iwch_ep_common *epc, 213 - enum iwch_ep_state new) 214 { 215 epc->state = new; 216 } ··· 1458 /* 1459 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1460 */ 1461 - static inline int is_neg_adv_abort(unsigned int status) 1462 { 1463 return status == CPL_ERR_RTX_NEG_ADVICE || 1464 status == CPL_ERR_PERSIST_NEG_ADVICE; ··· 1634 1635 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1636 __FUNCTION__, ep->hwtid); 1637 attrs.next_state = IWCH_QP_STATE_ERROR; 1638 iwch_modify_qp(ep->com.qp->rhp, 1639 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
··· 209 return state; 210 } 211 212 + static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) 213 { 214 epc->state = new; 215 } ··· 1459 /* 1460 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1461 */ 1462 + static int is_neg_adv_abort(unsigned int status) 1463 { 1464 return status == CPL_ERR_RTX_NEG_ADVICE || 1465 status == CPL_ERR_PERSIST_NEG_ADVICE; ··· 1635 1636 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1637 __FUNCTION__, ep->hwtid); 1638 + stop_ep_timer(ep); 1639 attrs.next_state = IWCH_QP_STATE_ERROR; 1640 iwch_modify_qp(ep->com.qp->rhp, 1641 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
+1 -1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 948 wake_up(&(to_iwch_qp(qp)->wait)); 949 } 950 951 - struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 952 { 953 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 954 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
··· 948 wake_up(&(to_iwch_qp(qp)->wait)); 949 } 950 951 + static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 952 { 953 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 954 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-1
drivers/infiniband/hw/cxgb3/iwch_provider.h
··· 178 179 void iwch_qp_add_ref(struct ib_qp *qp); 180 void iwch_qp_rem_ref(struct ib_qp *qp); 181 - struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn); 182 183 struct iwch_ucontext { 184 struct ib_ucontext ibucontext;
··· 178 179 void iwch_qp_add_ref(struct ib_qp *qp); 180 void iwch_qp_rem_ref(struct ib_qp *qp); 181 182 struct iwch_ucontext { 183 struct ib_ucontext ibucontext;
+13 -16
drivers/infiniband/hw/cxgb3/iwch_qp.c
··· 36 37 #define NO_SUPPORT -1 38 39 - static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, 40 - u8 * flit_cnt) 41 { 42 int i; 43 u32 plen; ··· 96 return 0; 97 } 98 99 - static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, 100 - u8 *flit_cnt) 101 { 102 int i; 103 u32 plen; ··· 137 return 0; 138 } 139 140 - static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, 141 - u8 *flit_cnt) 142 { 143 if (wr->num_sge > 1) 144 return -EINVAL; ··· 158 /* 159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. 160 */ 161 - static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp, 162 - struct ib_sge *sg_list, u32 num_sgle, 163 - u32 * pbl_addr, u8 * page_size) 164 { 165 int i; 166 struct iwch_mr *mhp; ··· 205 return 0; 206 } 207 208 - static inline int iwch_build_rdma_recv(struct iwch_dev *rhp, 209 - union t3_wr *wqe, 210 - struct ib_recv_wr *wr) 211 { 212 int i, err = 0; 213 u32 pbl_addr[4]; ··· 471 return err; 472 } 473 474 - static inline void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, 475 - int tagged) 476 { 477 switch (t3err) { 478 case TPT_ERR_STAG: ··· 669 spin_lock_irqsave(&qhp->lock, *flag); 670 } 671 672 - static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 673 { 674 if (t3b_device(qhp->rhp)) 675 cxio_set_wq_in_error(&qhp->wq); ··· 681 /* 682 * Return non zero if at least one RECV was pre-posted. 683 */ 684 - static inline int rqes_posted(struct iwch_qp *qhp) 685 { 686 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV; 687 }
··· 36 37 #define NO_SUPPORT -1 38 39 + static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, 40 + u8 * flit_cnt) 41 { 42 int i; 43 u32 plen; ··· 96 return 0; 97 } 98 99 + static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, 100 + u8 *flit_cnt) 101 { 102 int i; 103 u32 plen; ··· 137 return 0; 138 } 139 140 + static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, 141 + u8 *flit_cnt) 142 { 143 if (wr->num_sge > 1) 144 return -EINVAL; ··· 158 /* 159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. 160 */ 161 + static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, 162 + u32 num_sgle, u32 * pbl_addr, u8 * page_size) 163 { 164 int i; 165 struct iwch_mr *mhp; ··· 206 return 0; 207 } 208 209 + static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe, 210 + struct ib_recv_wr *wr) 211 { 212 int i, err = 0; 213 u32 pbl_addr[4]; ··· 473 return err; 474 } 475 476 + static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged) 477 { 478 switch (t3err) { 479 case TPT_ERR_STAG: ··· 672 spin_lock_irqsave(&qhp->lock, *flag); 673 } 674 675 + static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 676 { 677 if (t3b_device(qhp->rhp)) 678 cxio_set_wq_in_error(&qhp->wq); ··· 684 /* 685 * Return non zero if at least one RECV was pre-posted. 686 */ 687 + static int rqes_posted(struct iwch_qp *qhp) 688 { 689 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV; 690 }
+6 -4
drivers/infiniband/hw/mthca/mthca_mr.c
··· 310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; 311 } 312 313 - void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, 314 - int start_index, u64 *buffer_list, int list_len) 315 { 316 u64 __iomem *mtts; 317 int i; ··· 324 mtts + i); 325 } 326 327 - void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, 328 - int start_index, u64 *buffer_list, int list_len) 329 { 330 __be64 *mtts; 331 dma_addr_t dma_handle;
··· 310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; 311 } 312 313 + static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, 314 + struct mthca_mtt *mtt, int start_index, 315 + u64 *buffer_list, int list_len) 316 { 317 u64 __iomem *mtts; 318 int i; ··· 323 mtts + i); 324 } 325 326 + static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, 327 + struct mthca_mtt *mtt, int start_index, 328 + u64 *buffer_list, int list_len) 329 { 330 __be64 *mtts; 331 dma_addr_t dma_handle;
-1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 219 220 union ib_gid local_gid; 221 u16 local_lid; 222 - u8 local_rate; 223 224 unsigned int admin_mtu; 225 unsigned int mcast_mtu;
··· 219 220 union ib_gid local_gid; 221 u16 local_lid; 222 223 unsigned int admin_mtu; 224 unsigned int mcast_mtu;
+27 -19
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 65 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 66 struct ib_cm_event *event); 67 68 - static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, 69 u64 mapping[IPOIB_CM_RX_SG]) 70 { 71 int i; 72 73 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 74 75 - for (i = 0; i < IPOIB_CM_RX_SG - 1; ++i) 76 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 77 } 78 ··· 90 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 91 if (unlikely(ret)) { 92 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 93 - ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[id].mapping); 94 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 95 priv->cm.srq_ring[id].skb = NULL; 96 } ··· 99 return ret; 100 } 101 102 - static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, 103 - u64 mapping[IPOIB_CM_RX_SG]) 104 { 105 struct ipoib_dev_priv *priv = netdev_priv(dev); 106 struct sk_buff *skb; ··· 108 109 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 110 if (unlikely(!skb)) 111 - return -ENOMEM; 112 113 /* 114 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the ··· 120 DMA_FROM_DEVICE); 121 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { 122 dev_kfree_skb_any(skb); 123 - return -EIO; 124 } 125 126 - for (i = 0; i < IPOIB_CM_RX_SG - 1; i++) { 127 struct page *page = alloc_page(GFP_ATOMIC); 128 129 if (!page) ··· 137 } 138 139 priv->cm.srq_ring[id].skb = skb; 140 - return 0; 141 142 partial_error: 143 ··· 147 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 148 149 dev_kfree_skb_any(skb); 150 - return -ENOMEM; 151 } 152 153 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, ··· 310 } 311 /* Adjust length of skb with fragments to match received data */ 312 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 313 - unsigned int length) 314 { 315 int i, num_frags; 316 unsigned int size; ··· 327 328 if (length == 0) { 329 /* don't need this page */ 330 - __free_page(frag->page); 331 --skb_shinfo(skb)->nr_frags; 332 } else { 333 size = min(length, (unsigned) PAGE_SIZE); ··· 345 { 346 struct ipoib_dev_priv *priv = netdev_priv(dev); 347 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; 348 - struct sk_buff *skb; 349 struct ipoib_cm_rx *p; 350 unsigned long flags; 351 u64 mapping[IPOIB_CM_RX_SG]; 352 353 ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", 354 wr_id, wc->opcode, wc->status); ··· 385 } 386 } 387 388 - if (unlikely(ipoib_cm_alloc_rx_skb(dev, wr_id, mapping))) { 389 /* 390 * If we can't allocate a new RX buffer, dump 391 * this packet and reuse the old buffer. ··· 399 goto repost; 400 } 401 402 - ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[wr_id].mapping); 403 - memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, sizeof mapping); 404 405 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 406 wc->byte_len, wc->slid); 407 408 - skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len); 409 410 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 411 skb->mac.raw = skb->data; ··· 1199 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; 1200 1201 for (i = 0; i < ipoib_recvq_size; ++i) { 1202 - if (ipoib_cm_alloc_rx_skb(dev, i, priv->cm.srq_ring[i].mapping)) { 1203 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 1204 ipoib_cm_dev_cleanup(dev); 1205 return -ENOMEM; ··· 1235 return; 1236 for (i = 0; i < ipoib_recvq_size; ++i) 1237 if (priv->cm.srq_ring[i].skb) { 1238 - ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[i].mapping); 1239 dev_kfree_skb_any(priv->cm.srq_ring[i].skb); 1240 priv->cm.srq_ring[i].skb = NULL; 1241 }
··· 65 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 66 struct ib_cm_event *event); 67 68 + static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, 69 u64 mapping[IPOIB_CM_RX_SG]) 70 { 71 int i; 72 73 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 74 75 + for (i = 0; i < frags; ++i) 76 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 77 } 78 ··· 90 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 91 if (unlikely(ret)) { 92 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 93 + ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 94 + priv->cm.srq_ring[id].mapping); 95 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 96 priv->cm.srq_ring[id].skb = NULL; 97 } ··· 98 return ret; 99 } 100 101 + static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags, 102 + u64 mapping[IPOIB_CM_RX_SG]) 103 { 104 struct ipoib_dev_priv *priv = netdev_priv(dev); 105 struct sk_buff *skb; ··· 107 108 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 109 if (unlikely(!skb)) 110 + return NULL; 111 112 /* 113 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the ··· 119 DMA_FROM_DEVICE); 120 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { 121 dev_kfree_skb_any(skb); 122 + return NULL; 123 } 124 125 + for (i = 0; i < frags; i++) { 126 struct page *page = alloc_page(GFP_ATOMIC); 127 128 if (!page) ··· 136 } 137 138 priv->cm.srq_ring[id].skb = skb; 139 + return skb; 140 141 partial_error: 142 ··· 146 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 147 148 dev_kfree_skb_any(skb); 149 + return NULL; 150 } 151 152 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, ··· 309 } 310 /* Adjust length of skb with fragments to match received data */ 311 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 312 + unsigned int length, struct sk_buff *toskb) 313 { 314 int i, num_frags; 315 unsigned int size; ··· 326 327 if (length == 0) { 328 /* don't need this page */ 329 + skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); 330 --skb_shinfo(skb)->nr_frags; 331 } else { 332 size = min(length, (unsigned) PAGE_SIZE); ··· 344 { 345 struct ipoib_dev_priv *priv = netdev_priv(dev); 346 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; 347 + struct sk_buff *skb, *newskb; 348 struct ipoib_cm_rx *p; 349 unsigned long flags; 350 u64 mapping[IPOIB_CM_RX_SG]; 351 + int frags; 352 353 ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", 354 wr_id, wc->opcode, wc->status); ··· 383 } 384 } 385 386 + frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, 387 + (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; 388 + 389 + newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping); 390 + if (unlikely(!newskb)) { 391 /* 392 * If we can't allocate a new RX buffer, dump 393 * this packet and reuse the old buffer. ··· 393 goto repost; 394 } 395 396 + ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping); 397 + memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); 398 399 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 400 wc->byte_len, wc->slid); 401 402 + skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); 403 404 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 405 skb->mac.raw = skb->data; ··· 1193 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; 1194 1195 for (i = 0; i < ipoib_recvq_size; ++i) { 1196 + if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1, 1197 + priv->cm.srq_ring[i].mapping)) { 1198 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 1199 ipoib_cm_dev_cleanup(dev); 1200 return -ENOMEM; ··· 1228 return; 1229 for (i = 0; i < ipoib_recvq_size; ++i) 1230 if (priv->cm.srq_ring[i].skb) { 1231 + ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 1232 + priv->cm.srq_ring[i].mapping); 1233 dev_kfree_skb_any(priv->cm.srq_ring[i].skb); 1234 priv->cm.srq_ring[i].skb = NULL; 1235 }
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 385 struct sk_buff *skb; 386 unsigned long flags; 387 388 - if (pathrec) 389 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 390 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 391 else
··· 385 struct sk_buff *skb; 386 unsigned long flags; 387 388 + if (!status) 389 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 390 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 391 else
+3 -5
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 527 { 528 struct ib_port_attr attr; 529 530 - if (!ib_query_port(priv->ca, priv->port, &attr)) { 531 - priv->local_lid = attr.lid; 532 - priv->local_rate = attr.active_speed * 533 - ib_width_enum_to_int(attr.active_width); 534 - } else 535 ipoib_warn(priv, "ib_query_port failed\n"); 536 } 537
··· 527 { 528 struct ib_port_attr attr; 529 530 + if (!ib_query_port(priv->ca, priv->port, &attr)) 531 + priv->local_lid = attr.lid; 532 + else 533 ipoib_warn(priv, "ib_query_port failed\n"); 534 } 535