Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/mad: Test ib_create_send_mad() return with IS_ERR(), not == NULL
IB/mlx4: Allow 4K messages for UD QPs
mlx4_core: Add ethernet fields to CQE struct
IB/ipath: Fix printk format warnings
RDMA/cxgb3: Fix deadlock initializing iw_cxgb3 device
RDMA/cxgb3: Fix up MW access rights
RDMA/cxgb3: Fix QP capabilities
RDMA/cma: Remove padding arrays by using struct sockaddr_storage
IB/ipath: Use unsigned long for irq flags
IPoIB/cm: Set correct SG list in ipoib_cm_init_rx_wr()

+108 -122
+18 -19
drivers/infiniband/core/cma.c
··· 155 155 } multicast; 156 156 struct list_head list; 157 157 void *context; 158 - struct sockaddr addr; 159 - u8 pad[sizeof(struct sockaddr_in6) - 160 - sizeof(struct sockaddr)]; 158 + struct sockaddr_storage addr; 161 159 }; 162 160 163 161 struct cma_work { ··· 784 786 cma_cancel_route(id_priv); 785 787 break; 786 788 case CMA_LISTEN: 787 - if (cma_any_addr(&id_priv->id.route.addr.src_addr) && 788 - !id_priv->cma_dev) 789 + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 790 + && !id_priv->cma_dev) 789 791 cma_cancel_listens(id_priv); 790 792 break; 791 793 default: ··· 1024 1026 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; 1025 1027 1026 1028 ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1027 - ret = rdma_translate_ip(&id->route.addr.src_addr, 1029 + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1028 1030 &id->route.addr.dev_addr); 1029 1031 if (ret) 1030 1032 goto destroy_id; ··· 1062 1064 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1063 1065 ip_ver, port, src, dst); 1064 1066 1065 - ret = rdma_translate_ip(&id->route.addr.src_addr, 1067 + ret = rdma_translate_ip((struct sockaddr *) &id->route.addr.src_addr, 1066 1068 &id->route.addr.dev_addr); 1067 1069 if (ret) 1068 1070 goto err; ··· 1375 1377 if (IS_ERR(id_priv->cm_id.ib)) 1376 1378 return PTR_ERR(id_priv->cm_id.ib); 1377 1379 1378 - addr = &id_priv->id.route.addr.src_addr; 1380 + addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1379 1381 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1380 1382 if (cma_any_addr(addr)) 1381 1383 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); ··· 1441 1443 1442 1444 dev_id_priv->state = CMA_ADDR_BOUND; 1443 1445 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1444 - ip_addr_size(&id_priv->id.route.addr.src_addr)); 1446 + ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1445 1447 1446 1448 cma_attach_to_dev(dev_id_priv, cma_dev); 1447 1449 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); ··· 1561 1563 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr)); 1562 1564 path_rec.numb_path = 1; 1563 1565 path_rec.reversible = 1; 1564 - path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr); 1566 + path_rec.service_id = cma_get_service_id(id_priv->id.ps, 1567 + (struct sockaddr *) &addr->dst_addr); 1565 1568 1566 1569 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | 1567 1570 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | 1568 1571 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID; 1569 1572 1570 - if (addr->src_addr.sa_family == AF_INET) { 1573 + if (addr->src_addr.ss_family == AF_INET) { 1571 1574 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); 1572 1575 comp_mask |= IB_SA_PATH_REC_QOS_CLASS; 1573 1576 } else { ··· 1847 1848 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); 1848 1849 ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); 1849 1850 1850 - if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) { 1851 + if (cma_zero_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) { 1851 1852 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr; 1852 1853 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr; 1853 1854 src_in->sin_family = dst_in->sin_family; ··· 1896 1897 if (cma_any_addr(dst_addr)) 1897 1898 ret = cma_resolve_loopback(id_priv); 1898 1899 else 1899 - ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr, 1900 + ret = rdma_resolve_ip(&addr_client, (struct sockaddr *) &id->route.addr.src_addr, 1900 1901 dst_addr, &id->route.addr.dev_addr, 1901 1902 timeout_ms, addr_handler, id_priv); 1902 1903 if (ret) ··· 2020 2021 * We don't support binding to any address if anyone is bound to 2021 2022 * a specific address on the same port. 2022 2023 */ 2023 - if (cma_any_addr(&id_priv->id.route.addr.src_addr)) 2024 + if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2024 2025 return -EADDRNOTAVAIL; 2025 2026 2026 2027 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { 2027 - if (cma_any_addr(&cur_id->id.route.addr.src_addr)) 2028 + if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) 2028 2029 return -EADDRNOTAVAIL; 2029 2030 2030 2031 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; ··· 2059 2060 } 2060 2061 2061 2062 mutex_lock(&lock); 2062 - if (cma_any_port(&id_priv->id.route.addr.src_addr)) 2063 + if (cma_any_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)) 2063 2064 ret = cma_alloc_any_port(ps, id_priv); 2064 2065 else 2065 2066 ret = cma_use_port(ps, id_priv); ··· 2231 2232 2232 2233 req.path = route->path_rec; 2233 2234 req.service_id = cma_get_service_id(id_priv->id.ps, 2234 - &route->addr.dst_addr); 2235 + (struct sockaddr *) &route->addr.dst_addr); 2235 2236 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 2236 2237 req.max_cm_retries = CMA_MAX_CM_RETRIES; 2237 2238 ··· 2282 2283 req.alternate_path = &route->path_rec[1]; 2283 2284 2284 2285 req.service_id = cma_get_service_id(id_priv->id.ps, 2285 - &route->addr.dst_addr); 2286 + (struct sockaddr *) &route->addr.dst_addr); 2286 2287 req.qp_num = id_priv->qp_num; 2287 2288 req.qp_type = IB_QPT_RC; 2288 2289 req.starting_psn = id_priv->seq_num; ··· 2666 2667 if (ret) 2667 2668 return ret; 2668 2669 2669 - cma_set_mgid(id_priv, &mc->addr, &rec.mgid); 2670 + cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); 2670 2671 if (id_priv->id.ps == RDMA_PS_UDP) 2671 2672 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY); 2672 2673 ib_addr_get_sgid(dev_addr, &rec.port_gid);
+1 -1
drivers/infiniband/core/mad_rmpp.c
··· 133 133 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 134 134 recv_wc->wc->pkey_index, 1, hdr_len, 135 135 0, GFP_KERNEL); 136 - if (!msg) 136 + if (IS_ERR(msg)) 137 137 return; 138 138 139 139 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
+6 -8
drivers/infiniband/core/ucma.c
··· 81 81 82 82 u64 uid; 83 83 struct list_head list; 84 - struct sockaddr addr; 85 - u8 pad[sizeof(struct sockaddr_in6) - 86 - sizeof(struct sockaddr)]; 84 + struct sockaddr_storage addr; 87 85 }; 88 86 89 87 struct ucma_event { ··· 601 603 return PTR_ERR(ctx); 602 604 603 605 memset(&resp, 0, sizeof resp); 604 - addr = &ctx->cm_id->route.addr.src_addr; 606 + addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; 605 607 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 606 608 sizeof(struct sockaddr_in) : 607 609 sizeof(struct sockaddr_in6)); 608 - addr = &ctx->cm_id->route.addr.dst_addr; 610 + addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; 609 611 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 610 612 sizeof(struct sockaddr_in) : 611 613 sizeof(struct sockaddr_in6)); ··· 911 913 912 914 mc->uid = cmd.uid; 913 915 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr); 914 - ret = rdma_join_multicast(ctx->cm_id, &mc->addr, mc); 916 + ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); 915 917 if (ret) 916 918 goto err2; 917 919 ··· 927 929 return 0; 928 930 929 931 err3: 930 - rdma_leave_multicast(ctx->cm_id, &mc->addr); 932 + rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); 931 933 ucma_cleanup_mc_events(mc); 932 934 err2: 933 935 mutex_lock(&mut); ··· 973 975 goto out; 974 976 } 975 977 976 - rdma_leave_multicast(mc->ctx->cm_id, &mc->addr); 978 + rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); 977 979 mutex_lock(&mc->ctx->file->mut); 978 980 ucma_cleanup_mc_events(mc); 979 981 list_del(&mc->list);
+3 -3
drivers/infiniband/hw/cxgb3/cxio_hal.c
··· 725 725 V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid)); 726 726 BUG_ON(page_size >= 28); 727 727 tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) | 728 - F_TPT_MW_BIND_ENABLE | 729 - V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | 730 - V_TPT_PAGE_SIZE(page_size)); 728 + ((perm & TPT_MW_BIND) ? F_TPT_MW_BIND_ENABLE : 0) | 729 + V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) | 730 + V_TPT_PAGE_SIZE(page_size)); 731 731 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 : 732 732 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, pbl_addr)>>3)); 733 733 tpt.len = cpu_to_be32(len);
+3 -25
drivers/infiniband/hw/cxgb3/iwch_provider.c
··· 1187 1187 return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type); 1188 1188 } 1189 1189 1190 - static int fw_supports_fastreg(struct iwch_dev *iwch_dev) 1191 - { 1192 - struct ethtool_drvinfo info; 1193 - struct net_device *lldev = iwch_dev->rdev.t3cdev_p->lldev; 1194 - char *cp, *next; 1195 - unsigned fw_maj, fw_min; 1196 - 1197 - rtnl_lock(); 1198 - lldev->ethtool_ops->get_drvinfo(lldev, &info); 1199 - rtnl_unlock(); 1200 - 1201 - next = info.fw_version+1; 1202 - cp = strsep(&next, "."); 1203 - sscanf(cp, "%i", &fw_maj); 1204 - cp = strsep(&next, "."); 1205 - sscanf(cp, "%i", &fw_min); 1206 - 1207 - PDBG("%s maj %u min %u\n", __func__, fw_maj, fw_min); 1208 - 1209 - return fw_maj > 6 || (fw_maj == 6 && fw_min > 0); 1210 - } 1211 - 1212 1190 static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) 1213 1191 { 1214 1192 struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev, ··· 1303 1325 memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); 1304 1326 memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6); 1305 1327 dev->ibdev.owner = THIS_MODULE; 1306 - dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; 1328 + dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | 1329 + IB_DEVICE_MEM_WINDOW | 1330 + IB_DEVICE_MEM_MGT_EXTENSIONS; 1307 1331 1308 1332 /* cxgb3 supports STag 0. */ 1309 1333 dev->ibdev.local_dma_lkey = 0; 1310 - if (fw_supports_fastreg(dev)) 1311 - dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 1312 1334 1313 1335 dev->ibdev.uverbs_cmd_mask = 1314 1336 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+7
drivers/infiniband/hw/cxgb3/iwch_provider.h
··· 293 293 return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | 294 294 (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0) | 295 295 (acc & IB_ACCESS_LOCAL_WRITE ? TPT_LOCAL_WRITE : 0) | 296 + (acc & IB_ACCESS_MW_BIND ? TPT_MW_BIND : 0) | 296 297 TPT_LOCAL_READ; 298 + } 299 + 300 + static inline u32 iwch_ib_to_tpt_bind_access(int acc) 301 + { 302 + return (acc & IB_ACCESS_REMOTE_WRITE ? TPT_REMOTE_WRITE : 0) | 303 + (acc & IB_ACCESS_REMOTE_READ ? TPT_REMOTE_READ : 0); 297 304 } 298 305 299 306 enum iwch_mmid_state {
+8 -17
drivers/infiniband/hw/cxgb3/iwch_qp.c
··· 565 565 wqe->bind.type = TPT_VATO; 566 566 567 567 /* TBD: check perms */ 568 - wqe->bind.perms = iwch_ib_to_tpt_access(mw_bind->mw_access_flags); 568 + wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags); 569 569 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); 570 570 wqe->bind.mw_stag = cpu_to_be32(mw->rkey); 571 571 wqe->bind.mw_len = cpu_to_be32(mw_bind->length); ··· 879 879 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) | 880 880 (qhp->attr.mpa_attr.crc_enabled << 2); 881 881 882 - /* 883 - * XXX - The IWCM doesn't quite handle getting these 884 - * attrs set before going into RTS. For now, just turn 885 - * them on always... 886 - */ 887 - #if 0 888 - init_attr.qpcaps = qhp->attr.enableRdmaRead | 889 - (qhp->attr.enableRdmaWrite << 1) | 890 - (qhp->attr.enableBind << 2) | 891 - (qhp->attr.enable_stag0_fastreg << 3) | 892 - (qhp->attr.enable_stag0_fastreg << 4); 893 - #else 894 - init_attr.qpcaps = 0x1f; 895 - #endif 882 + init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE | 883 + uP_RI_QP_RDMA_WRITE_ENABLE | 884 + uP_RI_QP_BIND_ENABLE; 885 + if (!qhp->ibqp.uobject) 886 + init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE | 887 + uP_RI_QP_FAST_REGISTER_ENABLE; 888 + 896 889 init_attr.tcp_emss = qhp->ep->emss; 897 890 init_attr.ord = qhp->attr.max_ord; 898 891 init_attr.ird = qhp->attr.max_ird; ··· 893 900 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); 894 901 init_attr.rqe_count = iwch_rqes_posted(qhp); 895 902 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0; 896 - if (!qhp->ibqp.uobject) 897 - init_attr.flags |= PRIV_QP; 898 903 if (peer2peer) { 899 904 init_attr.rtr_type = RTR_READ; 900 905 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
+3 -2
drivers/infiniband/hw/ipath/ipath_driver.c
··· 1259 1259 */ 1260 1260 ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf" 1261 1261 " %x, len %x hdrq+%x rhf: %Lx\n", 1262 - etail, tlen, l, 1262 + etail, tlen, l, (unsigned long long) 1263 1263 le64_to_cpu(*(__le64 *) rhf_addr)); 1264 1264 if (ipath_debug & __IPATH_ERRPKTDBG) { 1265 1265 u32 j, *d, dw = rsize-2; ··· 1457 1457 0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */ 1458 1458 if (oldval != dd->ipath_pioavailshadow[i]) 1459 1459 ipath_dbg("shadow[%d] was %Lx, now %lx\n", 1460 - i, oldval, dd->ipath_pioavailshadow[i]); 1460 + i, (unsigned long long) oldval, 1461 + dd->ipath_pioavailshadow[i]); 1461 1462 } 1462 1463 spin_unlock_irqrestore(&ipath_pioavail_lock, flags); 1463 1464 }
+4 -3
drivers/infiniband/hw/ipath/ipath_iba7220.c
··· 1032 1032 ipath_cdbg(VERBOSE, "done: xgxs=%llx from %llx\n", 1033 1033 (unsigned long long) 1034 1034 ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig), 1035 - prev_val); 1035 + (unsigned long long) prev_val); 1036 1036 1037 1037 guid = be64_to_cpu(dd->ipath_guid); 1038 1038 ··· 1042 1042 ipath_dbg("No GUID for heartbeat, faking %llx\n", 1043 1043 (unsigned long long)guid); 1044 1044 } else 1045 - ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", guid); 1045 + ipath_cdbg(VERBOSE, "Wrote %llX to HRTBT_GUID\n", 1046 + (unsigned long long) guid); 1046 1047 ipath_write_kreg(dd, dd->ipath_kregs->kr_hrtbt_guid, guid); 1047 1048 return ret; 1048 1049 } ··· 2506 2505 if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) { 2507 2506 ipath_dbg("Did not get to DDR INIT (%x) after %Lu msecs\n", 2508 2507 ipath_ib_state(dd, dd->ipath_lastibcstat), 2509 - jiffies_to_msecs(jiffies)-startms); 2508 + (unsigned long long) jiffies_to_msecs(jiffies)-startms); 2510 2509 dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; 2511 2510 if (dd->ipath_autoneg_tries == IPATH_AUTONEG_TRIES) { 2512 2511 dd->ipath_flags |= IPATH_IB_AUTONEG_FAILED;
+8 -4
drivers/infiniband/hw/ipath/ipath_intr.c
··· 356 356 dd->ipath_cregs->cr_iblinkerrrecovcnt); 357 357 if (linkrecov != dd->ipath_lastlinkrecov) { 358 358 ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n", 359 - ibcs, ib_linkstate(dd, ibcs), 359 + (unsigned long long) ibcs, 360 + ib_linkstate(dd, ibcs), 360 361 ipath_ibcstatus_str[ltstate], 361 - linkrecov); 362 + (unsigned long long) linkrecov); 362 363 /* and no more until active again */ 363 364 dd->ipath_lastlinkrecov = 0; 364 365 ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); ··· 1119 1118 if (unlikely(istat & ~dd->ipath_i_bitsextant)) 1120 1119 ipath_dev_err(dd, 1121 1120 "interrupt with unknown interrupts %Lx set\n", 1121 + (unsigned long long) 1122 1122 istat & ~dd->ipath_i_bitsextant); 1123 1123 else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */ 1124 - ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", istat); 1124 + ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n", 1125 + (unsigned long long) istat); 1125 1126 1126 1127 if (istat & INFINIPATH_I_ERROR) { 1127 1128 ipath_stats.sps_errints++; ··· 1131 1128 dd->ipath_kregs->kr_errorstatus); 1132 1129 if (!estat) 1133 1130 dev_info(&dd->pcidev->dev, "error interrupt (%Lx), " 1134 - "but no error bits set!\n", istat); 1131 + "but no error bits set!\n", 1132 + (unsigned long long) istat); 1135 1133 else if (estat == -1LL) 1136 1134 /* 1137 1135 * should we try clearing all, or hope next read
+3 -3
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 1021 1021 struct ipath_verbs_txreq *tx = cookie; 1022 1022 struct ipath_qp *qp = tx->qp; 1023 1023 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 1024 - unsigned int flags; 1024 + unsigned long flags; 1025 1025 enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? 1026 1026 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; 1027 1027 ··· 1051 1051 1052 1052 static void decrement_dma_busy(struct ipath_qp *qp) 1053 1053 { 1054 - unsigned int flags; 1054 + unsigned long flags; 1055 1055 1056 1056 if (atomic_dec_and_test(&qp->s_dma_busy)) { 1057 1057 spin_lock_irqsave(&qp->s_lock, flags); ··· 1221 1221 unsigned flush_wc; 1222 1222 u32 control; 1223 1223 int ret; 1224 - unsigned int flags; 1224 + unsigned long flags; 1225 1225 1226 1226 piobuf = ipath_getpiobuf(dd, plen, NULL); 1227 1227 if (unlikely(piobuf == NULL)) {
+16 -17
drivers/infiniband/hw/mlx4/cq.c
··· 515 515 wc->vendor_err = cqe->vendor_err_syndrome; 516 516 } 517 517 518 - static int mlx4_ib_ipoib_csum_ok(__be32 status, __be16 checksum) 518 + static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) 519 519 { 520 - return ((status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | 521 - MLX4_CQE_IPOIB_STATUS_IPV4F | 522 - MLX4_CQE_IPOIB_STATUS_IPV4OPT | 523 - MLX4_CQE_IPOIB_STATUS_IPV6 | 524 - MLX4_CQE_IPOIB_STATUS_IPOK)) == 525 - cpu_to_be32(MLX4_CQE_IPOIB_STATUS_IPV4 | 526 - MLX4_CQE_IPOIB_STATUS_IPOK)) && 527 - (status & cpu_to_be32(MLX4_CQE_IPOIB_STATUS_UDP | 528 - MLX4_CQE_IPOIB_STATUS_TCP)) && 520 + return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 521 + MLX4_CQE_STATUS_IPV4F | 522 + MLX4_CQE_STATUS_IPV4OPT | 523 + MLX4_CQE_STATUS_IPV6 | 524 + MLX4_CQE_STATUS_IPOK)) == 525 + cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 526 + MLX4_CQE_STATUS_IPOK)) && 527 + (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | 528 + MLX4_CQE_STATUS_TCP)) && 529 529 checksum == cpu_to_be16(0xffff); 530 530 } 531 531 ··· 582 582 } 583 583 584 584 if (!*cur_qp || 585 - (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { 585 + (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { 586 586 /* 587 587 * We do not have to take the QP table lock here, 588 588 * because CQs will be locked while QPs are removed 589 589 * from the table. 590 590 */ 591 591 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 592 - be32_to_cpu(cqe->my_qpn)); 592 + be32_to_cpu(cqe->vlan_my_qpn)); 593 593 if (unlikely(!mqp)) { 594 594 printk(KERN_WARNING "CQ %06x with entry for unknown QPN %06x\n", 595 - cq->mcq.cqn, be32_to_cpu(cqe->my_qpn) & 0xffffff); 595 + cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); 596 596 return -EINVAL; 597 597 } 598 598 ··· 692 692 } 693 693 694 694 wc->slid = be16_to_cpu(cqe->rlid); 695 - wc->sl = cqe->sl >> 4; 695 + wc->sl = be16_to_cpu(cqe->sl_vid >> 12); 696 696 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 697 697 wc->src_qp = g_mlpath_rqpn & 0xffffff; 698 698 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 699 699 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 700 700 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 701 - wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->ipoib_status, 702 - cqe->checksum); 701 + wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); 703 702 } 704 703 705 704 return 0; ··· 766 767 */ 767 768 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 768 769 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 769 - if ((be32_to_cpu(cqe->my_qpn) & 0xffffff) == qpn) { 770 + if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { 770 771 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 771 772 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 772 773 ++nfreed;
+1 -1
drivers/infiniband/hw/mlx4/qp.c
··· 902 902 context->mtu_msgmax = (IB_MTU_4096 << 5) | 903 903 ilog2(dev->dev->caps.max_gso_sz); 904 904 else 905 - context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; 905 + context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 906 906 } else if (attr_mask & IB_QP_PATH_MTU) { 907 907 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 908 908 printk(KERN_ERR "path MTU (%u) is invalid\n",
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 337 337 sge[i].length = PAGE_SIZE; 338 338 339 339 wr->next = NULL; 340 - wr->sg_list = priv->cm.rx_sge; 340 + wr->sg_list = sge; 341 341 wr->num_sge = priv->cm.num_frags; 342 342 } 343 343
+24 -12
include/linux/mlx4/cq.h
··· 39 39 #include <linux/mlx4/doorbell.h> 40 40 41 41 struct mlx4_cqe { 42 - __be32 my_qpn; 42 + __be32 vlan_my_qpn; 43 43 __be32 immed_rss_invalid; 44 44 __be32 g_mlpath_rqpn; 45 - u8 sl; 46 - u8 reserved1; 45 + __be16 sl_vid; 47 46 __be16 rlid; 48 - __be32 ipoib_status; 47 + __be16 status; 48 + u8 ipv6_ext_mask; 49 + u8 badfcs_enc; 49 50 __be32 byte_cnt; 50 51 __be16 wqe_index; 51 52 __be16 checksum; 52 - u8 reserved2[3]; 53 + u8 reserved[3]; 53 54 u8 owner_sr_opcode; 54 55 }; 55 56 ··· 62 61 u8 syndrome; 63 62 u8 reserved2[3]; 64 63 u8 owner_sr_opcode; 64 + }; 65 + 66 + enum { 67 + MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, 68 + MLX4_CQE_QPN_MASK = 0xffffff, 65 69 }; 66 70 67 71 enum { ··· 92 86 }; 93 87 94 88 enum { 95 - MLX4_CQE_IPOIB_STATUS_IPV4 = 1 << 22, 96 - MLX4_CQE_IPOIB_STATUS_IPV4F = 1 << 23, 97 - MLX4_CQE_IPOIB_STATUS_IPV6 = 1 << 24, 98 - MLX4_CQE_IPOIB_STATUS_IPV4OPT = 1 << 25, 99 - MLX4_CQE_IPOIB_STATUS_TCP = 1 << 26, 100 - MLX4_CQE_IPOIB_STATUS_UDP = 1 << 27, 101 - MLX4_CQE_IPOIB_STATUS_IPOK = 1 << 28, 89 + MLX4_CQE_STATUS_IPV4 = 1 << 6, 90 + MLX4_CQE_STATUS_IPV4F = 1 << 7, 91 + MLX4_CQE_STATUS_IPV6 = 1 << 8, 92 + MLX4_CQE_STATUS_IPV4OPT = 1 << 9, 93 + MLX4_CQE_STATUS_TCP = 1 << 10, 94 + MLX4_CQE_STATUS_UDP = 1 << 11, 95 + MLX4_CQE_STATUS_IPOK = 1 << 12, 96 + }; 97 + 98 + enum { 99 + MLX4_CQE_LLC = 1, 100 + MLX4_CQE_SNAP = 1 << 1, 101 + MLX4_CQE_BAD_FCS = 1 << 4, 102 102 }; 103 103 104 104 static inline void mlx4_cq_arm(struct mlx4_cq *cq, u32 cmd,
+2 -6
include/rdma/rdma_cm.h
··· 71 71 }; 72 72 73 73 struct rdma_addr { 74 - struct sockaddr src_addr; 75 - u8 src_pad[sizeof(struct sockaddr_in6) - 76 - sizeof(struct sockaddr)]; 77 - struct sockaddr dst_addr; 78 - u8 dst_pad[sizeof(struct sockaddr_in6) - 79 - sizeof(struct sockaddr)]; 74 + struct sockaddr_storage src_addr; 75 + struct sockaddr_storage dst_addr; 80 76 struct rdma_dev_addr dev_addr; 81 77 }; 82 78