Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Correct debugging output when path record lookup fails
RDMA/cxgb3: Stop the EP Timer on BAD CLOSE
RDMA/cxgb3: cleanups
RDMA/cma: Remove unused node_guid from cma_device structure
IB/cm: Remove ca_guid from cm_device structure
RDMA/cma: Request reversible paths only
IB/core: Set hop limit in ib_init_ah_from_wc correctly
IB/uverbs: Return correct error for invalid PD in register MR
IPoIB: Remove unused local_rate tracking
IPoIB/cm: Improve small message bandwidth
IB/mthca: Make 2 functions static

+76 -102
+4 -6
drivers/infiniband/core/cm.c
··· 88 88 struct cm_device { 89 89 struct list_head list; 90 90 struct ib_device *device; 91 - __be64 ca_guid; 92 91 struct cm_port port[0]; 93 92 }; 94 93 ··· 738 739 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 739 740 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 740 741 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, 741 - &cm_id_priv->av.port->cm_dev->ca_guid, 742 - sizeof cm_id_priv->av.port->cm_dev->ca_guid, 742 + &cm_id_priv->id.device->node_guid, 743 + sizeof cm_id_priv->id.device->node_guid, 743 744 NULL, 0); 744 745 break; 745 746 case IB_CM_REQ_RCVD: ··· 882 883 883 884 req_msg->local_comm_id = cm_id_priv->id.local_id; 884 885 req_msg->service_id = param->service_id; 885 - req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 886 + req_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 886 887 cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); 887 888 cm_req_set_resp_res(req_msg, param->responder_resources); 888 889 cm_req_set_init_depth(req_msg, param->initiator_depth); ··· 1441 1442 cm_rep_set_flow_ctrl(rep_msg, param->flow_control); 1442 1443 cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); 1443 1444 cm_rep_set_srq(rep_msg, param->srq); 1444 - rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; 1445 + rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid; 1445 1446 1446 1447 if (param->private_data && param->private_data_len) 1447 1448 memcpy(rep_msg->private_data, param->private_data, ··· 3384 3385 return; 3385 3386 3386 3387 cm_dev->device = device; 3387 - cm_dev->ca_guid = device->node_guid; 3388 3388 3389 3389 set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); 3390 3390 for (i = 1; i <= device->phys_port_cnt; i++) {
+3 -3
drivers/infiniband/core/cma.c
··· 77 77 struct cma_device { 78 78 struct list_head list; 79 79 struct ib_device *device; 80 - __be64 node_guid; 81 80 struct completion comp; 82 81 atomic_t refcount; 83 82 struct list_head id_list; ··· 1491 1492 ib_addr_get_dgid(addr, &path_rec.dgid); 1492 1493 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); 1493 1494 path_rec.numb_path = 1; 1495 + path_rec.reversible = 1; 1494 1496 1495 1497 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, 1496 1498 id_priv->id.port_num, &path_rec, 1497 1499 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1498 - IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, 1500 + IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1501 + IB_SA_PATH_REC_REVERSIBLE, 1499 1502 timeout_ms, GFP_KERNEL, 1500 1503 cma_query_handler, work, &id_priv->query); 1501 1504 ··· 2673 2672 return; 2674 2673 2675 2674 cma_dev->device = device; 2676 - cma_dev->node_guid = device->node_guid; 2677 2675 2678 2676 init_completion(&cma_dev->comp); 2679 2677 atomic_set(&cma_dev->refcount, 1);
+3 -1
drivers/infiniband/core/uverbs_cmd.c
··· 622 622 obj->umem.virt_base = cmd.hca_va; 623 623 624 624 pd = idr_read_pd(cmd.pd_handle, file->ucontext); 625 - if (!pd) 625 + if (!pd) { 626 + ret = -EINVAL; 626 627 goto err_release; 628 + } 627 629 628 630 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); 629 631 if (IS_ERR(mr)) {
+1 -1
drivers/infiniband/core/verbs.c
··· 167 167 ah_attr->grh.sgid_index = (u8) gid_index; 168 168 flow_class = be32_to_cpu(grh->version_tclass_flow); 169 169 ah_attr->grh.flow_label = flow_class & 0xFFFFF; 170 - ah_attr->grh.hop_limit = grh->hop_limit; 170 + ah_attr->grh.hop_limit = 0xFF; 171 171 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; 172 172 } 173 173 return 0;
-1
drivers/infiniband/hw/cxgb3/Makefile
··· 8 8 9 9 ifdef CONFIG_INFINIBAND_CXGB3_DEBUG 10 10 EXTRA_CFLAGS += -DDEBUG 11 - iw_cxgb3-y += cxio_dbg.o 12 11 endif
+9 -22
drivers/infiniband/hw/cxgb3/cxio_hal.c
··· 45 45 static LIST_HEAD(rdev_list); 46 46 static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; 47 47 48 - static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) 48 + static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) 49 49 { 50 50 struct cxio_rdev *rdev; 51 51 ··· 55 55 return NULL; 56 56 } 57 57 58 - static inline struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev 59 - *tdev) 58 + static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev) 60 59 { 61 60 struct cxio_rdev *rdev; 62 61 ··· 117 118 return 0; 118 119 } 119 120 120 - static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) 121 + static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) 121 122 { 122 123 struct rdma_cq_setup setup; 123 124 setup.id = cqid; ··· 129 130 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); 130 131 } 131 132 132 - int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) 133 + static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) 133 134 { 134 135 u64 sge_cmd; 135 136 struct t3_modify_qp_wr *wqe; ··· 424 425 } 425 426 } 426 427 427 - static inline int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) 428 + static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) 428 429 { 429 430 if (CQE_OPCODE(*cqe) == T3_TERMINATE) 430 431 return 0; ··· 759 760 return err; 760 761 } 761 762 762 - /* IN : stag key, pdid, pbl_size 763 - * Out: stag index, actaul pbl_size, and pbl_addr allocated. 764 - */ 765 - int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid, 766 - enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr) 767 - { 768 - *stag = T3_STAG_UNSET; 769 - return (__cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR, 770 - perm, 0, 0ULL, 0, 0, NULL, pbl_size, pbl_addr)); 771 - } 772 - 773 763 int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, 774 764 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 775 765 u8 page_size, __be64 *pbl, u32 *pbl_size, ··· 1017 1029 cxio_hal_destroy_rhdl_resource(); 1018 1030 } 1019 1031 1020 - static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) 1032 + static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) 1021 1033 { 1022 1034 struct t3_swsq *sqp; 1023 1035 __u32 ptr = wq->sq_rptr; ··· 1046 1058 break; 1047 1059 } 1048 1060 1049 - static inline void create_read_req_cqe(struct t3_wq *wq, 1050 - struct t3_cqe *hw_cqe, 1051 - struct t3_cqe *read_cqe) 1061 + static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, 1062 + struct t3_cqe *read_cqe) 1052 1063 { 1053 1064 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; 1054 1065 read_cqe->len = wq->oldest_read->read_len; ··· 1060 1073 /* 1061 1074 * Return a ptr to the next read wr in the SWSQ or NULL. 1062 1075 */ 1063 - static inline void advance_oldest_read(struct t3_wq *wq) 1076 + static void advance_oldest_read(struct t3_wq *wq) 1064 1077 { 1065 1078 1066 1079 u32 rptr = wq->oldest_read - wq->sq + 1;
-5
drivers/infiniband/hw/cxgb3/cxio_hal.h
··· 143 143 void cxio_rdev_close(struct cxio_rdev *rdev); 144 144 int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, 145 145 enum t3_cq_opcode op, u32 credit); 146 - int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev, u32 qpid); 147 146 int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 148 147 int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); 149 148 int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); ··· 153 154 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 154 155 struct cxio_ucontext *uctx); 155 156 int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); 156 - int cxio_allocate_stag(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 157 - enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr); 158 157 int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, 159 158 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, 160 159 u8 page_size, __be64 *pbl, u32 *pbl_size, ··· 168 171 int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr); 169 172 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 170 173 void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb); 171 - u32 cxio_hal_get_rhdl(void); 172 - void cxio_hal_put_rhdl(u32 rhdl); 173 174 u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp); 174 175 void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); 175 176 int __init cxio_hal_init(void);
+2 -12
drivers/infiniband/hw/cxgb3/cxio_resource.c
··· 179 179 /* 180 180 * returns 0 if no resource available 181 181 */ 182 - static inline u32 cxio_hal_get_resource(struct kfifo *fifo) 182 + static u32 cxio_hal_get_resource(struct kfifo *fifo) 183 183 { 184 184 u32 entry; 185 185 if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) ··· 188 188 return 0; /* fifo emptry */ 189 189 } 190 190 191 - static inline void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) 191 + static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) 192 192 { 193 193 BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); 194 - } 195 - 196 - u32 cxio_hal_get_rhdl(void) 197 - { 198 - return cxio_hal_get_resource(rhdl_fifo); 199 - } 200 - 201 - void cxio_hal_put_rhdl(u32 rhdl) 202 - { 203 - cxio_hal_put_resource(rhdl_fifo, rhdl); 204 194 } 205 195 206 196 u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
+3 -3
drivers/infiniband/hw/cxgb3/iwch_cm.c
··· 209 209 return state; 210 210 } 211 211 212 - static inline void __state_set(struct iwch_ep_common *epc, 213 - enum iwch_ep_state new) 212 + static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new) 214 213 { 215 214 epc->state = new; 216 215 } ··· 1458 1459 /* 1459 1460 * Returns whether an ABORT_REQ_RSS message is a negative advice. 1460 1461 */ 1461 - static inline int is_neg_adv_abort(unsigned int status) 1462 + static int is_neg_adv_abort(unsigned int status) 1462 1463 { 1463 1464 return status == CPL_ERR_RTX_NEG_ADVICE || 1464 1465 status == CPL_ERR_PERSIST_NEG_ADVICE; ··· 1634 1635 1635 1636 printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", 1636 1637 __FUNCTION__, ep->hwtid); 1638 + stop_ep_timer(ep); 1637 1639 attrs.next_state = IWCH_QP_STATE_ERROR; 1638 1640 iwch_modify_qp(ep->com.qp->rhp, 1639 1641 ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
+1 -1
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 948 948 wake_up(&(to_iwch_qp(qp)->wait)); 949 949 } 950 950 951 - struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 951 + static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) 952 952 { 953 953 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); 954 954 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
-1
drivers/infiniband/hw/cxgb3/iwch_provider.h
··· 178 178 179 179 void iwch_qp_add_ref(struct ib_qp *qp); 180 180 void iwch_qp_rem_ref(struct ib_qp *qp); 181 - struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn); 182 181 183 182 struct iwch_ucontext { 184 183 struct ib_ucontext ibucontext;
+13 -16
drivers/infiniband/hw/cxgb3/iwch_qp.c
··· 36 36 37 37 #define NO_SUPPORT -1 38 38 39 - static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, 40 - u8 * flit_cnt) 39 + static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, 40 + u8 * flit_cnt) 41 41 { 42 42 int i; 43 43 u32 plen; ··· 96 96 return 0; 97 97 } 98 98 99 - static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, 100 - u8 *flit_cnt) 99 + static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, 100 + u8 *flit_cnt) 101 101 { 102 102 int i; 103 103 u32 plen; ··· 137 137 return 0; 138 138 } 139 139 140 - static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, 141 - u8 *flit_cnt) 140 + static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, 141 + u8 *flit_cnt) 142 142 { 143 143 if (wr->num_sge > 1) 144 144 return -EINVAL; ··· 158 158 /* 159 159 * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. 160 160 */ 161 - static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp, 162 - struct ib_sge *sg_list, u32 num_sgle, 163 - u32 * pbl_addr, u8 * page_size) 161 + static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list, 162 + u32 num_sgle, u32 * pbl_addr, u8 * page_size) 164 163 { 165 164 int i; 166 165 struct iwch_mr *mhp; ··· 205 206 return 0; 206 207 } 207 208 208 - static inline int iwch_build_rdma_recv(struct iwch_dev *rhp, 209 - union t3_wr *wqe, 210 - struct ib_recv_wr *wr) 209 + static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe, 210 + struct ib_recv_wr *wr) 211 211 { 212 212 int i, err = 0; 213 213 u32 pbl_addr[4]; ··· 471 473 return err; 472 474 } 473 475 474 - static inline void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, 475 - int tagged) 476 + static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged) 476 477 { 477 478 switch (t3err) { 478 479 case TPT_ERR_STAG: ··· 669 672 spin_lock_irqsave(&qhp->lock, *flag); 670 673 } 671 674 672 - static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 675 + static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) 673 676 { 674 677 if (t3b_device(qhp->rhp)) 675 678 cxio_set_wq_in_error(&qhp->wq); ··· 681 684 /* 682 685 * Return non zero if at least one RECV was pre-posted. 683 686 */ 684 - static inline int rqes_posted(struct iwch_qp *qhp) 687 + static int rqes_posted(struct iwch_qp *qhp) 685 688 { 686 689 return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV; 687 690 }
+6 -4
drivers/infiniband/hw/mthca/mthca_mr.c
··· 310 310 return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; 311 311 } 312 312 313 - void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, 314 - int start_index, u64 *buffer_list, int list_len) 313 + static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, 314 + struct mthca_mtt *mtt, int start_index, 315 + u64 *buffer_list, int list_len) 315 316 { 316 317 u64 __iomem *mtts; 317 318 int i; ··· 324 323 mtts + i); 325 324 } 326 325 327 - void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, 328 - int start_index, u64 *buffer_list, int list_len) 326 + static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, 327 + struct mthca_mtt *mtt, int start_index, 328 + u64 *buffer_list, int list_len) 329 329 { 330 330 __be64 *mtts; 331 331 dma_addr_t dma_handle;
-1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 219 219 220 220 union ib_gid local_gid; 221 221 u16 local_lid; 222 - u8 local_rate; 223 222 224 223 unsigned int admin_mtu; 225 224 unsigned int mcast_mtu;
+27 -19
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 65 65 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 66 66 struct ib_cm_event *event); 67 67 68 - static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, 68 + static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags, 69 69 u64 mapping[IPOIB_CM_RX_SG]) 70 70 { 71 71 int i; 72 72 73 73 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); 74 74 75 - for (i = 0; i < IPOIB_CM_RX_SG - 1; ++i) 75 + for (i = 0; i < frags; ++i) 76 76 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 77 77 } 78 78 ··· 90 90 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); 91 91 if (unlikely(ret)) { 92 92 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); 93 - ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[id].mapping); 93 + ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 94 + priv->cm.srq_ring[id].mapping); 94 95 dev_kfree_skb_any(priv->cm.srq_ring[id].skb); 95 96 priv->cm.srq_ring[id].skb = NULL; 96 97 } ··· 99 98 return ret; 100 99 } 101 100 102 - static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, 103 - u64 mapping[IPOIB_CM_RX_SG]) 101 + static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags, 102 + u64 mapping[IPOIB_CM_RX_SG]) 104 103 { 105 104 struct ipoib_dev_priv *priv = netdev_priv(dev); 106 105 struct sk_buff *skb; ··· 108 107 109 108 skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); 110 109 if (unlikely(!skb)) 111 - return -ENOMEM; 110 + return NULL; 112 111 113 112 /* 114 113 * IPoIB adds a 4 byte header. So we need 12 more bytes to align the ··· 120 119 DMA_FROM_DEVICE); 121 120 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { 122 121 dev_kfree_skb_any(skb); 123 - return -EIO; 122 + return NULL; 124 123 } 125 124 126 - for (i = 0; i < IPOIB_CM_RX_SG - 1; i++) { 125 + for (i = 0; i < frags; i++) { 127 126 struct page *page = alloc_page(GFP_ATOMIC); 128 127 129 128 if (!page) ··· 137 136 } 138 137 139 138 priv->cm.srq_ring[id].skb = skb; 140 - return 0; 139 + return skb; 141 140 142 141 partial_error: 143 142 ··· 147 146 ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); 148 147 149 148 dev_kfree_skb_any(skb); 150 - return -ENOMEM; 149 + return NULL; 151 150 } 152 151 153 152 static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, ··· 310 309 } 311 310 /* Adjust length of skb with fragments to match received data */ 312 311 static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, 313 - unsigned int length) 312 + unsigned int length, struct sk_buff *toskb) 314 313 { 315 314 int i, num_frags; 316 315 unsigned int size; ··· 327 326 328 327 if (length == 0) { 329 328 /* don't need this page */ 330 - __free_page(frag->page); 329 + skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE); 331 330 --skb_shinfo(skb)->nr_frags; 332 331 } else { 333 332 size = min(length, (unsigned) PAGE_SIZE); ··· 345 344 { 346 345 struct ipoib_dev_priv *priv = netdev_priv(dev); 347 346 unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; 348 - struct sk_buff *skb; 347 + struct sk_buff *skb, *newskb; 349 348 struct ipoib_cm_rx *p; 350 349 unsigned long flags; 351 350 u64 mapping[IPOIB_CM_RX_SG]; 351 + int frags; 352 352 353 353 ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", 354 354 wr_id, wc->opcode, wc->status); ··· 385 383 } 386 384 } 387 385 388 - if (unlikely(ipoib_cm_alloc_rx_skb(dev, wr_id, mapping))) { 386 + frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len, 387 + (unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE; 388 + 389 + newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping); 390 + if (unlikely(!newskb)) { 389 391 /* 390 392 * If we can't allocate a new RX buffer, dump 391 393 * this packet and reuse the old buffer. ··· 399 393 goto repost; 400 394 } 401 395 402 - ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[wr_id].mapping); 403 - memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, sizeof mapping); 396 + ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping); 397 + memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping); 404 398 405 399 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 406 400 wc->byte_len, wc->slid); 407 401 408 - skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len); 402 + skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb); 409 403 410 404 skb->protocol = ((struct ipoib_header *) skb->data)->proto; 411 405 skb->mac.raw = skb->data; ··· 1199 1193 priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; 1200 1194 1201 1195 for (i = 0; i < ipoib_recvq_size; ++i) { 1202 - if (ipoib_cm_alloc_rx_skb(dev, i, priv->cm.srq_ring[i].mapping)) { 1196 + if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1, 1197 + priv->cm.srq_ring[i].mapping)) { 1203 1198 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 1204 1199 ipoib_cm_dev_cleanup(dev); 1205 1200 return -ENOMEM; ··· 1235 1228 return; 1236 1229 for (i = 0; i < ipoib_recvq_size; ++i) 1237 1230 if (priv->cm.srq_ring[i].skb) { 1238 - ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[i].mapping); 1231 + ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1, 1232 + priv->cm.srq_ring[i].mapping); 1239 1233 dev_kfree_skb_any(priv->cm.srq_ring[i].skb); 1240 1234 priv->cm.srq_ring[i].skb = NULL; 1241 1235 }
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 385 385 struct sk_buff *skb; 386 386 unsigned long flags; 387 387 388 - if (pathrec) 388 + if (!status) 389 389 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", 390 390 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); 391 391 else
+3 -5
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 527 527 { 528 528 struct ib_port_attr attr; 529 529 530 - if (!ib_query_port(priv->ca, priv->port, &attr)) { 531 - priv->local_lid = attr.lid; 532 - priv->local_rate = attr.active_speed * 533 - ib_width_enum_to_int(attr.active_width); 534 - } else 530 + if (!ib_query_port(priv->ca, priv->port, &attr)) 531 + priv->local_lid = attr.lid; 532 + else 535 533 ipoib_warn(priv, "ib_query_port failed\n"); 536 534 } 537 535