Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'core', 'cxgb4', 'ip-roce', 'iser', 'misc', 'mlx4', 'nes', 'ocrdma', 'qib', 'sgwrapper', 'srp' and 'usnic' into for-next

+2972 -882
-17
drivers/infiniband/core/cm.c
··· 349 349 grh, &av->ah_attr); 350 350 } 351 351 352 - int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac) 353 - { 354 - struct cm_id_private *cm_id_priv; 355 - 356 - cm_id_priv = container_of(id, struct cm_id_private, id); 357 - 358 - if (smac != NULL) 359 - memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac)); 360 - 361 - if (alt_smac != NULL) 362 - memcpy(cm_id_priv->alt_av.smac, alt_smac, 363 - sizeof(cm_id_priv->alt_av.smac)); 364 - 365 - return 0; 366 - } 367 - EXPORT_SYMBOL(ib_update_cm_av); 368 - 369 352 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) 370 353 { 371 354 struct cm_device *cm_dev;
-26
drivers/infiniband/core/cma.c
··· 1284 1284 struct rdma_id_private *listen_id, *conn_id; 1285 1285 struct rdma_cm_event event; 1286 1286 int offset, ret; 1287 - u8 smac[ETH_ALEN]; 1288 - u8 alt_smac[ETH_ALEN]; 1289 - u8 *psmac = smac; 1290 - u8 *palt_smac = alt_smac; 1291 - int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) == 1292 - RDMA_TRANSPORT_IB) && 1293 - (rdma_port_get_link_layer(cm_id->device, 1294 - ib_event->param.req_rcvd.port) == 1295 - IB_LINK_LAYER_ETHERNET)); 1296 1287 1297 1288 listen_id = cm_id->context; 1298 1289 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) ··· 1327 1336 ret = conn_id->id.event_handler(&conn_id->id, &event); 1328 1337 if (ret) 1329 1338 goto err3; 1330 - 1331 - if (is_iboe) { 1332 - if (ib_event->param.req_rcvd.primary_path != NULL) 1333 - rdma_addr_find_smac_by_sgid( 1334 - &ib_event->param.req_rcvd.primary_path->sgid, 1335 - psmac, NULL); 1336 - else 1337 - psmac = NULL; 1338 - if (ib_event->param.req_rcvd.alternate_path != NULL) 1339 - rdma_addr_find_smac_by_sgid( 1340 - &ib_event->param.req_rcvd.alternate_path->sgid, 1341 - palt_smac, NULL); 1342 - else 1343 - palt_smac = NULL; 1344 - } 1345 1339 /* 1346 1340 * Acquire mutex to prevent user executing rdma_destroy_id() 1347 1341 * while we're accessing the cm_id. 1348 1342 */ 1349 1343 mutex_lock(&lock); 1350 - if (is_iboe) 1351 - ib_update_cm_av(cm_id, psmac, palt_smac); 1352 1344 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1353 1345 (conn_id->id.qp_type != IB_QPT_UD)) 1354 1346 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
+14
drivers/infiniband/core/mad.c
··· 1022 1022 mad_send_wr->send_buf.mad, 1023 1023 sge[0].length, 1024 1024 DMA_TO_DEVICE); 1025 + if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr))) 1026 + return -ENOMEM; 1027 + 1025 1028 mad_send_wr->header_mapping = sge[0].addr; 1026 1029 1027 1030 sge[1].addr = ib_dma_map_single(mad_agent->device, 1028 1031 ib_get_payload(mad_send_wr), 1029 1032 sge[1].length, 1030 1033 DMA_TO_DEVICE); 1034 + if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) { 1035 + ib_dma_unmap_single(mad_agent->device, 1036 + mad_send_wr->header_mapping, 1037 + sge[0].length, DMA_TO_DEVICE); 1038 + return -ENOMEM; 1039 + } 1031 1040 mad_send_wr->payload_mapping = sge[1].addr; 1032 1041 1033 1042 spin_lock_irqsave(&qp_info->send_queue.lock, flags); ··· 2599 2590 sizeof *mad_priv - 2600 2591 sizeof mad_priv->header, 2601 2592 DMA_FROM_DEVICE); 2593 + if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, 2594 + sg_list.addr))) { 2595 + ret = -ENOMEM; 2596 + break; 2597 + } 2602 2598 mad_priv->header.mapping = sg_list.addr; 2603 2599 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2604 2600 mad_priv->header.mad_list.mad_queue = recv_queue;
+72 -38
drivers/infiniband/hw/cxgb4/cm.c
··· 98 98 module_param(c4iw_debug, int, 0644); 99 99 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 100 100 101 - static int peer2peer; 101 + static int peer2peer = 1; 102 102 module_param(peer2peer, int, 0644); 103 - MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)"); 103 + MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 104 104 105 105 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 106 106 module_param(p2p_type, int, 0644); ··· 400 400 n = dst_neigh_lookup(&rt->dst, &peer_ip); 401 401 if (!n) 402 402 return NULL; 403 - if (!our_interface(dev, n->dev)) { 403 + if (!our_interface(dev, n->dev) && 404 + !(n->dev->flags & IFF_LOOPBACK)) { 404 405 dst_release(&rt->dst); 405 406 return NULL; 406 407 } ··· 760 759 ep->mpa_skb = skb; 761 760 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 762 761 start_ep_timer(ep); 763 - state_set(&ep->com, MPA_REQ_SENT); 762 + __state_set(&ep->com, MPA_REQ_SENT); 764 763 ep->mpa_attr.initiator = 1; 764 + ep->snd_seq += mpalen; 765 765 return; 766 766 } 767 767 ··· 842 840 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 843 841 BUG_ON(ep->mpa_skb); 844 842 ep->mpa_skb = skb; 843 + ep->snd_seq += mpalen; 845 844 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 846 845 } 847 846 ··· 926 923 skb_get(skb); 927 924 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 928 925 ep->mpa_skb = skb; 929 - state_set(&ep->com, MPA_REP_SENT); 926 + __state_set(&ep->com, MPA_REP_SENT); 927 + ep->snd_seq += mpalen; 930 928 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 931 929 } 932 930 ··· 944 940 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 945 941 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 946 942 943 + mutex_lock(&ep->com.mutex); 947 944 dst_confirm(ep->dst); 948 945 949 946 /* setup the hwtid for this connection */ ··· 968 963 send_mpa_req(ep, skb, 1); 969 964 else 970 965 send_mpa_req(ep, skb, mpa_rev); 971 - 966 + mutex_unlock(&ep->com.mutex); 972 967 return 0; 973 968 } 974 969 975 - static void close_complete_upcall(struct c4iw_ep *ep) 970 + static void close_complete_upcall(struct c4iw_ep *ep, int status) 976 971 { 977 972 struct iw_cm_event event; 978 973 979 974 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 980 975 memset(&event, 0, sizeof(event)); 981 976 event.event = IW_CM_EVENT_CLOSE; 977 + event.status = status; 982 978 if (ep->com.cm_id) { 983 979 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 984 980 ep, ep->com.cm_id, ep->hwtid); ··· 993 987 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) 994 988 { 995 989 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 996 - close_complete_upcall(ep); 997 990 state_set(&ep->com, ABORTING); 998 991 set_bit(ABORT_CONN, &ep->com.history); 999 992 return send_abort(ep, skb, gfp); ··· 1071 1066 } 1072 1067 } 1073 1068 1074 - static void connect_request_upcall(struct c4iw_ep *ep) 1069 + static int connect_request_upcall(struct c4iw_ep *ep) 1075 1070 { 1076 1071 struct iw_cm_event event; 1072 + int ret; 1077 1073 1078 1074 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1079 1075 memset(&event, 0, sizeof(event)); ··· 1099 1093 event.private_data_len = ep->plen; 1100 1094 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1101 1095 } 1102 - if (state_read(&ep->parent_ep->com) != DEAD) { 1103 - c4iw_get_ep(&ep->com); 1104 - ep->parent_ep->com.cm_id->event_handler( 1105 - ep->parent_ep->com.cm_id, 1106 - &event); 1107 - } 1096 + c4iw_get_ep(&ep->com); 1097 + ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1098 + &event); 1099 + if (ret) 1100 + c4iw_put_ep(&ep->com); 1108 1101 set_bit(CONNREQ_UPCALL, &ep->com.history); 1109 1102 c4iw_put_ep(&ep->parent_ep->com); 1110 - ep->parent_ep = NULL; 1103 + return ret; 1111 1104 } 1112 1105 1113 1106 static void established_upcall(struct c4iw_ep *ep) ··· 1170 1165 * the connection. 1171 1166 */ 1172 1167 stop_ep_timer(ep); 1173 - if (state_read(&ep->com) != MPA_REQ_SENT) 1168 + if (ep->com.state != MPA_REQ_SENT) 1174 1169 return; 1175 1170 1176 1171 /* ··· 1245 1240 * start reply message including private data. And 1246 1241 * the MPA header is valid. 1247 1242 */ 1248 - state_set(&ep->com, FPDU_MODE); 1243 + __state_set(&ep->com, FPDU_MODE); 1249 1244 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1250 1245 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1251 1246 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ··· 1360 1355 } 1361 1356 goto out; 1362 1357 err: 1363 - state_set(&ep->com, ABORTING); 1358 + __state_set(&ep->com, ABORTING); 1364 1359 send_abort(ep, skb, GFP_KERNEL); 1365 1360 out: 1366 1361 connect_reply_upcall(ep, err); ··· 1375 1370 1376 1371 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1377 1372 1378 - if (state_read(&ep->com) != MPA_REQ_WAIT) 1373 + if (ep->com.state != MPA_REQ_WAIT) 1379 1374 return; 1380 1375 1381 1376 /* ··· 1405 1400 return; 1406 1401 1407 1402 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1408 - stop_ep_timer(ep); 1409 1403 mpa = (struct mpa_message *) ep->mpa_pkt; 1410 1404 1411 1405 /* ··· 1496 1492 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1497 1493 ep->mpa_attr.p2p_type); 1498 1494 1499 - state_set(&ep->com, MPA_REQ_RCVD); 1495 + __state_set(&ep->com, MPA_REQ_RCVD); 1496 + stop_ep_timer(ep); 1500 1497 1501 1498 /* drive upcall */ 1502 - connect_request_upcall(ep); 1499 + mutex_lock(&ep->parent_ep->com.mutex); 1500 + if (ep->parent_ep->com.state != DEAD) { 1501 + if (connect_request_upcall(ep)) 1502 + abort_connection(ep, skb, GFP_KERNEL); 1503 + } else { 1504 + abort_connection(ep, skb, GFP_KERNEL); 1505 + } 1506 + mutex_unlock(&ep->parent_ep->com.mutex); 1503 1507 return; 1504 1508 } 1505 1509 ··· 1521 1509 __u8 status = hdr->status; 1522 1510 1523 1511 ep = lookup_tid(t, tid); 1512 + if (!ep) 1513 + return 0; 1524 1514 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1525 1515 skb_pull(skb, sizeof(*hdr)); 1526 1516 skb_trim(skb, dlen); 1517 + mutex_lock(&ep->com.mutex); 1527 1518 1528 1519 /* update RX credits */ 1529 1520 update_rx_credits(ep, dlen); 1530 1521 1531 - switch (state_read(&ep->com)) { 1522 + switch (ep->com.state) { 1532 1523 case MPA_REQ_SENT: 1533 1524 ep->rcv_seq += dlen; 1534 1525 process_mpa_reply(ep, skb); ··· 1547 1532 pr_err("%s Unexpected streaming data." \ 1548 1533 " qpid %u ep %p state %d tid %u status %d\n", 1549 1534 __func__, ep->com.qp->wq.sq.qid, ep, 1550 - state_read(&ep->com), ep->hwtid, status); 1535 + ep->com.state, ep->hwtid, status); 1551 1536 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1552 1537 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1553 1538 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); ··· 1556 1541 default: 1557 1542 break; 1558 1543 } 1544 + mutex_unlock(&ep->com.mutex); 1559 1545 return 0; 1560 1546 } 1561 1547 ··· 2262 2246 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2263 2247 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2264 2248 } 2265 - close_complete_upcall(ep); 2249 + close_complete_upcall(ep, 0); 2266 2250 __state_set(&ep->com, DEAD); 2267 2251 release = 1; 2268 2252 disconnect = 0; ··· 2441 2425 C4IW_QP_ATTR_NEXT_STATE, 2442 2426 &attrs, 1); 2443 2427 } 2444 - close_complete_upcall(ep); 2428 + close_complete_upcall(ep, 0); 2445 2429 __state_set(&ep->com, DEAD); 2446 2430 release = 1; 2447 2431 break; ··· 2516 2500 2517 2501 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2518 2502 { 2519 - int err; 2503 + int err = 0; 2504 + int disconnect = 0; 2520 2505 struct c4iw_ep *ep = to_ep(cm_id); 2521 2506 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2522 2507 2523 - if (state_read(&ep->com) == DEAD) { 2508 + mutex_lock(&ep->com.mutex); 2509 + if (ep->com.state == DEAD) { 2510 + mutex_unlock(&ep->com.mutex); 2524 2511 c4iw_put_ep(&ep->com); 2525 2512 return -ECONNRESET; 2526 2513 } 2527 2514 set_bit(ULP_REJECT, &ep->com.history); 2528 - BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2515 + BUG_ON(ep->com.state != MPA_REQ_RCVD); 2529 2516 if (mpa_rev == 0) 2530 2517 abort_connection(ep, NULL, GFP_KERNEL); 2531 2518 else { 2532 2519 err = send_mpa_reject(ep, pdata, pdata_len); 2533 - err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2520 + disconnect = 1; 2534 2521 } 2522 + mutex_unlock(&ep->com.mutex); 2523 + if (disconnect) 2524 + err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2535 2525 c4iw_put_ep(&ep->com); 2536 2526 return 0; 2537 2527 } ··· 2552 2530 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2553 2531 2554 2532 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2555 - if (state_read(&ep->com) == DEAD) { 2533 + 2534 + mutex_lock(&ep->com.mutex); 2535 + if (ep->com.state == DEAD) { 2556 2536 err = -ECONNRESET; 2557 2537 goto err; 2558 2538 } 2559 2539 2560 - BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2540 + BUG_ON(ep->com.state != MPA_REQ_RCVD); 2561 2541 BUG_ON(!qp); 2562 2542 2563 2543 set_bit(ULP_ACCEPT, &ep->com.history); ··· 2628 2604 if (err) 2629 2605 goto err1; 2630 2606 2631 - state_set(&ep->com, FPDU_MODE); 2607 + __state_set(&ep->com, FPDU_MODE); 2632 2608 established_upcall(ep); 2609 + mutex_unlock(&ep->com.mutex); 2633 2610 c4iw_put_ep(&ep->com); 2634 2611 return 0; 2635 2612 err1: 2636 2613 ep->com.cm_id = NULL; 2637 2614 cm_id->rem_ref(cm_id); 2638 2615 err: 2616 + mutex_unlock(&ep->com.mutex); 2639 2617 c4iw_put_ep(&ep->com); 2640 2618 return err; 2641 2619 } ··· 3006 2980 rdev = &ep->com.dev->rdev; 3007 2981 if (c4iw_fatal_error(rdev)) { 3008 2982 fatal = 1; 3009 - close_complete_upcall(ep); 2983 + close_complete_upcall(ep, -EIO); 3010 2984 ep->com.state = DEAD; 3011 2985 } 3012 2986 switch (ep->com.state) { ··· 3048 3022 if (close) { 3049 3023 if (abrupt) { 3050 3024 set_bit(EP_DISC_ABORT, &ep->com.history); 3051 - close_complete_upcall(ep); 3025 + close_complete_upcall(ep, -ECONNRESET); 3052 3026 ret = send_abort(ep, NULL, gfp); 3053 3027 } else { 3054 3028 set_bit(EP_DISC_CLOSE, &ep->com.history); ··· 3229 3203 struct sk_buff *req_skb; 3230 3204 struct fw_ofld_connection_wr *req; 3231 3205 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3206 + int ret; 3232 3207 3233 3208 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3234 3209 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); ··· 3266 3239 req->cookie = (unsigned long)skb; 3267 3240 3268 3241 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3269 - cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3242 + ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3243 + if (ret < 0) { 3244 + pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3245 + ret); 3246 + kfree_skb(skb); 3247 + kfree_skb(req_skb); 3248 + } 3270 3249 } 3271 3250 3272 3251 /* ··· 3379 3346 pi = (struct port_info *)netdev_priv(pdev); 3380 3347 tx_chan = cxgb4_port_chan(pdev); 3381 3348 } 3349 + neigh_release(neigh); 3382 3350 if (!e) { 3383 3351 pr_err("%s - failed to allocate l2t entry!\n", 3384 3352 __func__); 3385 3353 goto free_dst; 3386 3354 } 3387 3355 3388 - neigh_release(neigh); 3389 3356 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 3390 3357 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 3391 3358 window = (__force u16) htons((__force u16)tcph->window); ··· 3460 3427 &attrs, 1); 3461 3428 } 3462 3429 __state_set(&ep->com, ABORTING); 3430 + close_complete_upcall(ep, -ETIMEDOUT); 3463 3431 break; 3464 3432 default: 3465 3433 WARN(1, "%s unexpected state ep %p tid %u state %u\n",
+24 -7
drivers/infiniband/hw/cxgb4/cq.c
··· 365 365 366 366 if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) { 367 367 368 - /* 369 - * drop peer2peer RTR reads. 368 + /* If we have reached here because of async 369 + * event or other error, and have egress error 370 + * then drop 371 + */ 372 + if (CQE_TYPE(hw_cqe) == 1) 373 + goto next_cqe; 374 + 375 + /* drop peer2peer RTR reads. 370 376 */ 371 377 if (CQE_WRID_STAG(hw_cqe) == 1) 372 378 goto next_cqe; ··· 517 511 */ 518 512 if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) { 519 513 520 - /* 521 - * If this is an unsolicited read response, then the read 514 + /* If we have reached here because of async 515 + * event or other error, and have egress error 516 + * then drop 517 + */ 518 + if (CQE_TYPE(hw_cqe) == 1) { 519 + if (CQE_STATUS(hw_cqe)) 520 + t4_set_wq_in_error(wq); 521 + ret = -EAGAIN; 522 + goto skip_cqe; 523 + } 524 + 525 + /* If this is an unsolicited read response, then the read 522 526 * was generated by the kernel driver as part of peer-2-peer 523 527 * connection setup. So ignore the completion. 524 528 */ ··· 619 603 */ 620 604 if (SQ_TYPE(hw_cqe)) { 621 605 int idx = CQE_WRID_SQ_IDX(hw_cqe); 622 - BUG_ON(idx > wq->sq.size); 606 + BUG_ON(idx >= wq->sq.size); 623 607 624 608 /* 625 609 * Account for any unsignaled completions completed by ··· 633 617 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; 634 618 else 635 619 wq->sq.in_use -= idx - wq->sq.cidx; 636 - BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size); 620 + BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); 637 621 638 622 wq->sq.cidx = (uint16_t)idx; 639 623 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); ··· 897 881 /* 898 882 * Make actual HW queue 2x to avoid cdix_inc overflows. 899 883 */ 900 - hwentries = entries * 2; 884 + hwentries = min(entries * 2, T4_MAX_IQ_SIZE); 901 885 902 886 /* 903 887 * Make HW queue at least 64 entries so GTS updates aren't too ··· 946 930 if (!mm2) 947 931 goto err4; 948 932 933 + memset(&uresp, 0, sizeof(uresp)); 949 934 uresp.qid_mask = rhp->rdev.cqmask; 950 935 uresp.cqid = chp->cq.cqid; 951 936 uresp.size = chp->cq.size;
+4 -2
drivers/infiniband/hw/cxgb4/device.c
··· 897 897 } 898 898 899 899 opcode = *(u8 *)rsp; 900 - if (c4iw_handlers[opcode]) 900 + if (c4iw_handlers[opcode]) { 901 901 c4iw_handlers[opcode](dev, skb); 902 - else 902 + } else { 903 903 pr_info("%s no handler opcode 0x%x...\n", __func__, 904 904 opcode); 905 + kfree_skb(skb); 906 + } 905 907 906 908 return 0; 907 909 nomem:
+2
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 369 369 DEFINE_DMA_UNMAP_ADDR(mapping); 370 370 dma_addr_t dma_addr; 371 371 struct c4iw_dev *dev; 372 + int pll_len; 372 373 }; 373 374 374 375 static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( ··· 442 441 atomic_t refcnt; 443 442 wait_queue_head_t wait; 444 443 struct timer_list timer; 444 + int sq_sig_all; 445 445 }; 446 446 447 447 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
+12 -4
drivers/infiniband/hw/cxgb4/mem.c
··· 37 37 38 38 #include "iw_cxgb4.h" 39 39 40 - int use_dsgl = 1; 40 + int use_dsgl = 0; 41 41 module_param(use_dsgl, int, 0644); 42 - MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)"); 42 + MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=0)"); 43 43 44 44 #define T4_ULPTX_MIN_IO 32 45 45 #define C4IW_MAX_INLINE_SIZE 96 ··· 898 898 dma_unmap_addr_set(c4pl, mapping, dma_addr); 899 899 c4pl->dma_addr = dma_addr; 900 900 c4pl->dev = dev; 901 - c4pl->ibpl.max_page_list_len = pll_len; 901 + c4pl->pll_len = pll_len; 902 + 903 + PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n", 904 + __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list, 905 + &c4pl->dma_addr); 902 906 903 907 return &c4pl->ibpl; 904 908 } ··· 911 907 { 912 908 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 913 909 910 + PDBG("%s c4pl %p pll_len %u page_list %p dma_addr %pad\n", 911 + __func__, c4pl, c4pl->pll_len, c4pl->ibpl.page_list, 912 + &c4pl->dma_addr); 913 + 914 914 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, 915 - c4pl->ibpl.max_page_list_len, 915 + c4pl->pll_len, 916 916 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping)); 917 917 kfree(c4pl); 918 918 }
+5 -3
drivers/infiniband/hw/cxgb4/qp.c
··· 675 675 fw_flags = 0; 676 676 if (wr->send_flags & IB_SEND_SOLICITED) 677 677 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; 678 - if (wr->send_flags & IB_SEND_SIGNALED) 678 + if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all) 679 679 fw_flags |= FW_RI_COMPLETION_FLAG; 680 680 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; 681 681 switch (wr->opcode) { ··· 736 736 } 737 737 swsqe->idx = qhp->wq.sq.pidx; 738 738 swsqe->complete = 0; 739 - swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); 739 + swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) || 740 + qhp->sq_sig_all; 740 741 swsqe->flushed = 0; 741 742 swsqe->wr_id = wr->wr_id; 742 743 ··· 1534 1533 struct c4iw_cq *schp; 1535 1534 struct c4iw_cq *rchp; 1536 1535 struct c4iw_create_qp_resp uresp; 1537 - int sqsize, rqsize; 1536 + unsigned int sqsize, rqsize; 1538 1537 struct c4iw_ucontext *ucontext; 1539 1538 int ret; 1540 1539 struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; ··· 1606 1605 qhp->attr.enable_bind = 1; 1607 1606 qhp->attr.max_ord = 1; 1608 1607 qhp->attr.max_ird = 1; 1608 + qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1609 1609 spin_lock_init(&qhp->lock); 1610 1610 mutex_init(&qhp->mutex); 1611 1611 init_waitqueue_head(&qhp->wait);
+1
drivers/infiniband/hw/ehca/ehca_cq.c
··· 283 283 (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1)); 284 284 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 285 285 ehca_err(device, "Copy to udata failed."); 286 + cq = ERR_PTR(-EFAULT); 286 287 goto create_cq_exit4; 287 288 } 288 289 }
-12
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 2534 2534 /* This is only a stub; nothing to be done here */ 2535 2535 } 2536 2536 2537 - static u64 ehca_dma_address(struct ib_device *dev, struct scatterlist *sg) 2538 - { 2539 - return sg->dma_address; 2540 - } 2541 - 2542 - static unsigned int ehca_dma_len(struct ib_device *dev, struct scatterlist *sg) 2543 - { 2544 - return sg->length; 2545 - } 2546 - 2547 2537 static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr, 2548 2538 size_t size, 2549 2539 enum dma_data_direction dir) ··· 2586 2596 .unmap_page = ehca_dma_unmap_page, 2587 2597 .map_sg = ehca_dma_map_sg, 2588 2598 .unmap_sg = ehca_dma_unmap_sg, 2589 - .dma_address = ehca_dma_address, 2590 - .dma_len = ehca_dma_len, 2591 2599 .sync_single_for_cpu = ehca_dma_sync_single_for_cpu, 2592 2600 .sync_single_for_device = ehca_dma_sync_single_for_device, 2593 2601 .alloc_coherent = ehca_dma_alloc_coherent,
+26 -42
drivers/infiniband/hw/ipath/ipath_diag.c
··· 326 326 size_t count, loff_t *off) 327 327 { 328 328 u32 __iomem *piobuf; 329 - u32 plen, clen, pbufn; 329 + u32 plen, pbufn, maxlen_reserve; 330 330 struct ipath_diag_pkt odp; 331 331 struct ipath_diag_xpkt dp; 332 332 u32 *tmpbuf = NULL; ··· 335 335 u64 val; 336 336 u32 l_state, lt_state; /* LinkState, LinkTrainingState */ 337 337 338 - if (count < sizeof(odp)) { 339 - ret = -EINVAL; 340 - goto bail; 341 - } 342 338 343 339 if (count == sizeof(dp)) { 344 340 if (copy_from_user(&dp, data, sizeof(dp))) { 345 341 ret = -EFAULT; 346 342 goto bail; 347 343 } 348 - } else if (copy_from_user(&odp, data, sizeof(odp))) { 349 - ret = -EFAULT; 344 + } else if (count == sizeof(odp)) { 345 + if (copy_from_user(&odp, data, sizeof(odp))) { 346 + ret = -EFAULT; 347 + goto bail; 348 + } 349 + } else { 350 + ret = -EINVAL; 350 351 goto bail; 351 - } 352 - 353 - /* 354 - * Due to padding/alignment issues (lessened with new struct) 355 - * the old and new structs are the same length. We need to 356 - * disambiguate them, which we can do because odp.len has never 357 - * been less than the total of LRH+BTH+DETH so far, while 358 - * dp.unit (same offset) unit is unlikely to get that high. 359 - * Similarly, dp.data, the pointer to user at the same offset 360 - * as odp.unit, is almost certainly at least one (512byte)page 361 - * "above" NULL. The if-block below can be omitted if compatibility 362 - * between a new driver and older diagnostic code is unimportant. 363 - * compatibility the other direction (new diags, old driver) is 364 - * handled in the diagnostic code, with a warning. 365 - */ 366 - if (dp.unit >= 20 && dp.data < 512) { 367 - /* very probable version mismatch. Fix it up */ 368 - memcpy(&odp, &dp, sizeof(odp)); 369 - /* We got a legacy dp, copy elements to dp */ 370 - dp.unit = odp.unit; 371 - dp.data = odp.data; 372 - dp.len = odp.len; 373 - dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */ 374 352 } 375 353 376 354 /* send count must be an exact number of dwords */ ··· 357 379 goto bail; 358 380 } 359 381 360 - clen = dp.len >> 2; 382 + plen = dp.len >> 2; 361 383 362 384 dd = ipath_lookup(dp.unit); 363 385 if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || ··· 400 422 goto bail; 401 423 } 402 424 403 - /* need total length before first word written */ 404 - /* +1 word is for the qword padding */ 425 + /* 426 + * need total length before first word written, plus 2 Dwords. One Dword 427 + * is for padding so we get the full user data when not aligned on 428 + * a word boundary. The other Dword is to make sure we have room for the 429 + * ICRC which gets tacked on later. 430 + */ 431 + maxlen_reserve = 2 * sizeof(u32); 432 + if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) { 433 + ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", 434 + dp.len, dd->ipath_ibmaxlen); 435 + ret = -EINVAL; 436 + goto bail; 437 + } 438 + 405 439 plen = sizeof(u32) + dp.len; 406 440 407 - if ((plen + 4) > dd->ipath_ibmaxlen) { 408 - ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", 409 - plen - 4, dd->ipath_ibmaxlen); 410 - ret = -EINVAL; 411 - goto bail; /* before writing pbc */ 412 - } 413 441 tmpbuf = vmalloc(plen); 414 442 if (!tmpbuf) { 415 443 dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " ··· 457 473 */ 458 474 if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { 459 475 ipath_flush_wc(); 460 - __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); 476 + __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1); 461 477 ipath_flush_wc(); 462 - __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); 478 + __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); 463 479 } else 464 - __iowrite32_copy(piobuf + 2, tmpbuf, clen); 480 + __iowrite32_copy(piobuf + 2, tmpbuf, plen); 465 481 466 482 ipath_flush_wc(); 467 483
+15 -28
drivers/infiniband/hw/ipath/ipath_dma.c
··· 115 115 ret = 0; 116 116 break; 117 117 } 118 + sg->dma_address = addr + sg->offset; 119 + #ifdef CONFIG_NEED_SG_DMA_LENGTH 120 + sg->dma_length = sg->length; 121 + #endif 118 122 } 119 123 return ret; 120 124 } ··· 128 124 enum dma_data_direction direction) 129 125 { 130 126 BUG_ON(!valid_dma_direction(direction)); 131 - } 132 - 133 - static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) 134 - { 135 - u64 addr = (u64) page_address(sg_page(sg)); 136 - 137 - if (addr) 138 - addr += sg->offset; 139 - return addr; 140 - } 141 - 142 - static unsigned int ipath_sg_dma_len(struct ib_device *dev, 143 - struct scatterlist *sg) 144 - { 145 - return sg->length; 146 127 } 147 128 148 129 static void ipath_sync_single_for_cpu(struct ib_device *dev, ··· 165 176 } 166 177 167 178 struct ib_dma_mapping_ops ipath_dma_mapping_ops = { 168 - ipath_mapping_error, 169 - ipath_dma_map_single, 170 - ipath_dma_unmap_single, 171 - ipath_dma_map_page, 172 - ipath_dma_unmap_page, 173 - ipath_map_sg, 174 - ipath_unmap_sg, 175 - ipath_sg_dma_address, 176 - ipath_sg_dma_len, 177 - ipath_sync_single_for_cpu, 178 - ipath_sync_single_for_device, 179 - ipath_dma_alloc_coherent, 180 - ipath_dma_free_coherent 179 + .mapping_error = ipath_mapping_error, 180 + .map_single = ipath_dma_map_single, 181 + .unmap_single = ipath_dma_unmap_single, 182 + .map_page = ipath_dma_map_page, 183 + .unmap_page = ipath_dma_unmap_page, 184 + .map_sg = ipath_map_sg, 185 + .unmap_sg = ipath_unmap_sg, 186 + .sync_single_for_cpu = ipath_sync_single_for_cpu, 187 + .sync_single_for_device = ipath_sync_single_for_device, 188 + .alloc_coherent = ipath_dma_alloc_coherent, 189 + .free_coherent = ipath_dma_free_coherent 181 190 };
+6 -5
drivers/infiniband/hw/mlx4/main.c
··· 1803 1803 1804 1804 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 1805 1805 { 1806 - char name[32]; 1806 + char name[80]; 1807 1807 int eq_per_port = 0; 1808 1808 int added_eqs = 0; 1809 1809 int total_eqs = 0; ··· 1833 1833 eq = 0; 1834 1834 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { 1835 1835 for (j = 0; j < eq_per_port; j++) { 1836 - sprintf(name, "mlx4-ib-%d-%d@%s", 1837 - i, j, dev->pdev->bus->name); 1836 + snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", 1837 + i, j, dev->pdev->bus->name); 1838 1838 /* Set IRQ for specific name (per ring) */ 1839 1839 if (mlx4_assign_eq(dev, name, NULL, 1840 1840 &ibdev->eq_table[eq])) { ··· 2056 2056 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); 2057 2057 if (err) 2058 2058 ibdev->counters[i] = -1; 2059 - } else 2060 - ibdev->counters[i] = -1; 2059 + } else { 2060 + ibdev->counters[i] = -1; 2061 + } 2061 2062 } 2062 2063 2063 2064 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+1 -1
drivers/infiniband/hw/mlx4/qp.c
··· 1882 1882 return err; 1883 1883 } 1884 1884 1885 - if (ah->av.eth.vlan != 0xffff) { 1885 + if (ah->av.eth.vlan != cpu_to_be16(0xffff)) { 1886 1886 vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; 1887 1887 is_vlan = 1; 1888 1888 }
+1
drivers/infiniband/hw/mthca/mthca_provider.c
··· 695 695 696 696 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { 697 697 mthca_free_cq(to_mdev(ibdev), cq); 698 + err = -EFAULT; 698 699 goto err_free; 699 700 } 700 701
+90 -29
drivers/infiniband/hw/nes/nes_cm.c
··· 128 128 static void build_rdma0_msg(struct nes_cm_node *, struct nes_qp **); 129 129 130 130 static void print_core(struct nes_cm_core *core); 131 + static void record_ird_ord(struct nes_cm_node *, u16, u16); 131 132 132 133 /* External CM API Interface */ 133 134 /* instance of function pointers for client API */ ··· 318 317 } 319 318 } 320 319 321 - 322 320 if (priv_data_len + mpa_hdr_len != len) { 323 321 nes_debug(NES_DBG_CM, "The received ietf buffer was not right" 324 322 " complete (%x + %x != %x)\n", ··· 356 356 /* send reset */ 357 357 return -EINVAL; 358 358 } 359 + if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) 360 + cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD; 359 361 360 - if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { 362 + if (cm_node->mpav2_ird_ord != IETF_NO_IRD_ORD) { 361 363 /* responder */ 362 - if (cm_node->ord_size > ird_size) 363 - cm_node->ord_size = ird_size; 364 - } else { 365 - /* initiator */ 366 - if (cm_node->ord_size > ird_size) 367 - cm_node->ord_size = ird_size; 364 + if (cm_node->state != NES_CM_STATE_MPAREQ_SENT) { 365 + /* we are still negotiating */ 366 + if (ord_size > NES_MAX_IRD) { 367 + cm_node->ird_size = NES_MAX_IRD; 368 + } else { 369 + cm_node->ird_size = ord_size; 370 + if (ord_size == 0 && 371 + (rtr_ctrl_ord & IETF_RDMA0_READ)) { 372 + cm_node->ird_size = 1; 373 + nes_debug(NES_DBG_CM, 374 + "%s: Remote peer doesn't support RDMA0_READ (ord=%u)\n", 375 + __func__, ord_size); 376 + } 377 + } 378 + if (ird_size > NES_MAX_ORD) 379 + cm_node->ord_size = NES_MAX_ORD; 380 + else 381 + cm_node->ord_size = ird_size; 382 + } else { /* initiator */ 383 + if (ord_size > NES_MAX_IRD) { 384 + nes_debug(NES_DBG_CM, 385 + "%s: Unable to support the requested (ord =%u)\n", 386 + __func__, ord_size); 387 + return -EINVAL; 388 + } 389 + cm_node->ird_size = ord_size; 368 390 369 - if (cm_node->ird_size < ord_size) { 370 - /* no resources available */ 371 - /* send terminate message */ 372 - return -EINVAL; 391 + if (ird_size > NES_MAX_ORD) { 392 + cm_node->ord_size = NES_MAX_ORD; 393 + } else { 394 + if (ird_size == 0 && 395 + (rtr_ctrl_ord & IETF_RDMA0_READ)) { 396 + nes_debug(NES_DBG_CM, 397 + "%s: Remote peer doesn't support RDMA0_READ (ird=%u)\n", 398 + __func__, ird_size); 399 + return -EINVAL; 400 + } else { 401 + cm_node->ord_size = ird_size; 402 + } 403 + } 373 404 } 374 405 } 375 406 376 407 if (rtr_ctrl_ord & IETF_RDMA0_READ) { 377 408 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; 409 + 378 410 } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) { 379 411 cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; 380 412 } else { /* Not supported RDMA0 operation */ ··· 546 514 nes_debug(NES_DBG_CM, "-------------- end core ---------------\n"); 547 515 } 548 516 517 + static void record_ird_ord(struct nes_cm_node *cm_node, 518 + u16 conn_ird, u16 conn_ord) 519 + { 520 + if (conn_ird > NES_MAX_IRD) 521 + conn_ird = NES_MAX_IRD; 522 + 523 + if (conn_ord > NES_MAX_ORD) 524 + conn_ord = NES_MAX_ORD; 525 + 526 + cm_node->ird_size = conn_ird; 527 + cm_node->ord_size = conn_ord; 528 + } 529 + 549 530 /** 550 531 * cm_build_mpa_frame - build a MPA V1 frame or MPA V2 frame 551 532 */ ··· 602 557 mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); 603 558 604 559 /* initialize RTR msg */ 605 - ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? 606 - IETF_NO_IRD_ORD : cm_node->ird_size; 607 - ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? 608 - IETF_NO_IRD_ORD : cm_node->ord_size; 609 - 560 + if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { 561 + ctrl_ird = IETF_NO_IRD_ORD; 562 + ctrl_ord = IETF_NO_IRD_ORD; 563 + } else { 564 + ctrl_ird = cm_node->ird_size & IETF_NO_IRD_ORD; 565 + ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; 566 + } 610 567 ctrl_ird |= IETF_PEER_TO_PEER; 611 568 ctrl_ird |= IETF_FLPDU_ZERO_LEN; 612 569 ··· 657 610 struct nes_qp *nesqp = *nesqp_addr; 658 611 struct nes_hw_qp_wqe *wqe = &nesqp->hwqp.sq_vbase[0]; 659 612 660 - u64temp = (unsigned long)nesqp; 613 + u64temp = (unsigned long)nesqp->nesuqp_addr; 661 614 u64temp |= NES_SW_CONTEXT_ALIGN >> 1; 662 615 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX, u64temp); 663 616 ··· 1456 1409 1457 1410 cm_node->mpa_frame_rev = mpa_version; 1458 1411 cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; 1459 - cm_node->ird_size = IETF_NO_IRD_ORD; 1460 - cm_node->ord_size = IETF_NO_IRD_ORD; 1412 + cm_node->mpav2_ird_ord = 0; 1413 + cm_node->ird_size = 0; 1414 + cm_node->ord_size = 0; 1461 1415 1462 1416 nes_debug(NES_DBG_CM, "Make node addresses : loc = %pI4:%x, rem = %pI4:%x\n", 1463 1417 &cm_node->loc_addr, cm_node->loc_port, ··· 3075 3027 rem_ref_cm_node(cm_node->cm_core, cm_node); 3076 3028 return -ECONNRESET; 3077 3029 } 3078 - 3079 3030 /* associate the node with the QP */ 3080 3031 nesqp->cm_node = (void *)cm_node; 3081 3032 cm_node->nesqp = nesqp; 3033 + 3082 3034 3083 3035 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n", 3084 3036 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener); ··· 3101 3053 3102 3054 if (cm_node->mpa_frame_rev == IETF_MPA_V1) 3103 3055 mpa_frame_offset = 4; 3056 + 3057 + if (cm_node->mpa_frame_rev == IETF_MPA_V1 || 3058 + cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { 3059 + record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); 3060 + } 3104 3061 3105 3062 memcpy(mpa_v2_frame->priv_data, conn_param->private_data, 3106 3063 conn_param->private_data_len); ··· 3170 3117 } 3171 3118 nesqp->skip_lsmm = 1; 3172 3119 3173 - 3174 3120 /* Cache the cm_id in the qp */ 3175 3121 nesqp->cm_id = cm_id; 3176 3122 cm_node->cm_id = cm_id; ··· 3206 3154 nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32( 3207 3155 ((u32)1 << NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT)); 3208 3156 nesqp->nesqp_context->ird_ord_sizes |= 3209 - cpu_to_le32((u32)conn_param->ord); 3157 + cpu_to_le32((u32)cm_node->ord_size); 3210 3158 3211 3159 memset(&nes_quad, 0, sizeof(nes_quad)); 3212 3160 nes_quad.DstIpAdrIndex = ··· 3246 3194 cm_event.remote_addr = cm_id->remote_addr; 3247 3195 cm_event.private_data = NULL; 3248 3196 cm_event.private_data_len = 0; 3197 + cm_event.ird = cm_node->ird_size; 3198 + cm_event.ord = cm_node->ord_size; 3199 + 3249 3200 ret = cm_id->event_handler(cm_id, &cm_event); 3250 3201 attr.qp_state = IB_QPS_RTS; 3251 3202 nes_modify_qp(&nesqp->ibqp, &attr, IB_QP_STATE, NULL); ··· 3345 3290 3346 3291 /* cache the cm_id in the qp */ 3347 3292 nesqp->cm_id = cm_id; 3348 - 3349 3293 cm_id->provider_data = nesqp; 3350 - 3351 3294 nesqp->private_data_len = conn_param->private_data_len; 3352 - nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32((u32)conn_param->ord); 3353 - /* space for rdma0 read msg */ 3354 - if (conn_param->ord == 0) 3355 - nesqp->nesqp_context->ird_ord_sizes |= cpu_to_le32(1); 3356 3295 3357 3296 nes_debug(NES_DBG_CM, "requested ord = 0x%08X.\n", (u32)conn_param->ord); 3358 3297 nes_debug(NES_DBG_CM, "mpa private data len =%u\n", ··· 3382 3333 cm_id->rem_ref(cm_id); 3383 3334 return -ENOMEM; 3384 3335 } 3336 + 3337 + record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); 3338 + if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && 3339 + cm_node->ord_size == 0) 3340 + cm_node->ord_size = 1; 3385 3341 3386 3342 cm_node->apbvt_set = apbvt_set; 3387 3343 nesqp->cm_node = cm_node; ··· 3584 3530 nesqp->nesqp_context->ird_ord_sizes |= 3585 3531 cpu_to_le32((u32)1 << 3586 3532 NES_QPCONTEXT_ORDIRD_IWARP_MODE_SHIFT); 3533 + nesqp->nesqp_context->ird_ord_sizes |= 3534 + cpu_to_le32((u32)cm_node->ord_size); 3587 3535 3588 3536 /* Adjust tail for not having a LSMM */ 3589 3537 /*nesqp->hwqp.sq_tail = 1;*/ ··· 3798 3742 cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr); 3799 3743 cm_event.private_data = cm_node->mpa_frame_buf; 3800 3744 cm_event.private_data_len = (u8)cm_node->mpa_frame_size; 3745 + if (cm_node->mpa_frame_rev == IETF_MPA_V1) { 3746 + cm_event.ird = NES_MAX_IRD; 3747 + cm_event.ord = NES_MAX_ORD; 3748 + } else { 3801 3749 cm_event.ird = cm_node->ird_size; 3802 3750 cm_event.ord = cm_node->ord_size; 3751 + } 3803 3752 3804 3753 ret = cm_id->event_handler(cm_id, &cm_event); 3805 3754 if (ret)
+3
drivers/infiniband/hw/nes/nes_cm.h
··· 58 58 #define IETF_RDMA0_WRITE 0x8000 59 59 #define IETF_RDMA0_READ 0x4000 60 60 #define IETF_NO_IRD_ORD 0x3FFF 61 + #define NES_MAX_IRD 0x40 62 + #define NES_MAX_ORD 0x7F 61 63 62 64 enum ietf_mpa_flags { 63 65 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ ··· 335 333 enum mpa_frame_version mpa_frame_rev; 336 334 u16 ird_size; 337 335 u16 ord_size; 336 + u16 mpav2_ird_ord; 338 337 339 338 u16 mpa_frame_size; 340 339 struct iw_cm_id *cm_id;
+3 -2
drivers/infiniband/hw/nes/nes_user.h
··· 39 39 40 40 #include <linux/types.h> 41 41 42 - #define NES_ABI_USERSPACE_VER 1 43 - #define NES_ABI_KERNEL_VER 1 42 + #define NES_ABI_USERSPACE_VER 2 43 + #define NES_ABI_KERNEL_VER 2 44 44 45 45 /* 46 46 * Make sure that all structs defined in this file remain laid out so ··· 78 78 79 79 struct nes_create_qp_req { 80 80 __u64 user_wqe_buffers; 81 + __u64 user_qp_buffer; 81 82 }; 82 83 83 84 enum iwnes_memreg_type {
+4 -4
drivers/infiniband/hw/nes/nes_verbs.c
··· 1186 1186 nes_free_resource(nesadapter, nesadapter->allocated_qps, qp_num); 1187 1187 kfree(nesqp->allocated_buffer); 1188 1188 nes_debug(NES_DBG_QP, "ib_copy_from_udata() Failed \n"); 1189 - return NULL; 1189 + return ERR_PTR(-EFAULT); 1190 1190 } 1191 1191 if (req.user_wqe_buffers) { 1192 1192 virt_wqs = 1; 1193 1193 } 1194 + if (req.user_qp_buffer) 1195 + nesqp->nesuqp_addr = req.user_qp_buffer; 1194 1196 if ((ibpd->uobject) && (ibpd->uobject->context)) { 1195 1197 nesqp->user_mode = 1; 1196 1198 nes_ucontext = to_nesucontext(ibpd->uobject->context); ··· 3137 3135 " original_last_aeq = 0x%04X. last_aeq = 0x%04X.\n", 3138 3136 nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount), 3139 3137 original_last_aeq, nesqp->last_aeq); 3140 - if ((!ret) || 3141 - ((original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) && 3142 - (ret))) { 3138 + if (!ret || original_last_aeq != NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) { 3143 3139 if (dont_wait) { 3144 3140 if (nesqp->cm_id && nesqp->hw_tcp_state != 0) { 3145 3141 nes_debug(NES_DBG_MOD_QP, "QP%u Queuing fake disconnect for QP refcount (%d),"
+1
drivers/infiniband/hw/nes/nes_verbs.h
··· 184 184 u8 pau_busy; 185 185 u8 pau_pending; 186 186 u8 pau_state; 187 + __u64 nesuqp_addr; 187 188 }; 188 189 #endif /* NES_VERBS_H */
+1 -1
drivers/infiniband/hw/ocrdma/Makefile
··· 2 2 3 3 obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o 4 4 5 - ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o 5 + ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o
+96 -14
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 35 35 36 36 #include <rdma/ib_verbs.h> 37 37 #include <rdma/ib_user_verbs.h> 38 + #include <rdma/ib_addr.h> 38 39 39 40 #include <be_roce.h> 40 41 #include "ocrdma_sli.h" 41 42 42 - #define OCRDMA_ROCE_DEV_VERSION "1.0.0" 43 + #define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u" 44 + 45 + #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 43 46 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" 44 47 48 + #define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)" 49 + #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)" 50 + 51 + #define OC_SKH_DEVICE_PF 0x720 52 + #define OC_SKH_DEVICE_VF 0x728 45 53 #define OCRDMA_MAX_AH 512 46 54 47 55 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 56 + 57 + #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) 48 58 49 59 struct ocrdma_dev_attr { 50 60 u8 fw_ver[32]; ··· 75 65 int max_mr; 76 66 u64 max_mr_size; 77 67 u32 max_num_mr_pbl; 68 + int max_mw; 78 69 int max_fmr; 79 70 int max_map_per_fmr; 80 71 int max_pages_per_frmr; ··· 92 81 u8 local_ca_ack_delay; 93 82 u8 ird; 94 83 u8 num_ird_pages; 84 + }; 85 + 86 + struct ocrdma_dma_mem { 87 + void *va; 88 + dma_addr_t pa; 89 + u32 size; 95 90 }; 96 91 97 92 struct ocrdma_pbl { ··· 165 148 struct ocrdma_hw_mr hwmr; 166 149 }; 167 150 151 + struct ocrdma_stats { 152 + u8 type; 153 + struct ocrdma_dev *dev; 154 + }; 155 + 156 + struct stats_mem { 157 + struct ocrdma_mqe mqe; 158 + void *va; 159 + dma_addr_t pa; 160 + u32 size; 161 + char *debugfs_mem; 162 + }; 163 + 164 + struct phy_info { 165 + u16 auto_speeds_supported; 166 + u16 fixed_speeds_supported; 167 + u16 phy_type; 168 + u16 interface_type; 169 + }; 170 + 168 171 struct ocrdma_dev { 169 172 struct ib_device ibdev; 170 173 struct ocrdma_dev_attr attr; ··· 228 191 struct mqe_ctx mqe_ctx; 229 192 230 193 struct be_dev_info nic_info; 194 + struct phy_info phy; 195 + char model_number[32]; 196 + u32 hba_port_num; 231 197 232 198 struct list_head entry; 233 199 struct rcu_head rcu; 234 200 int id; 235 - struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; 201 + u64 stag_arr[OCRDMA_MAX_STAG]; 236 202 u16 pvid; 203 + u32 asic_id; 204 + 205 + ulong last_stats_time; 206 + struct mutex stats_lock; /* provide synch for debugfs operations */ 207 + struct stats_mem stats_mem; 208 + struct ocrdma_stats rsrc_stats; 209 + struct ocrdma_stats rx_stats; 210 + struct ocrdma_stats wqe_stats; 211 + struct ocrdma_stats tx_stats; 212 + struct ocrdma_stats db_err_stats; 213 + struct ocrdma_stats tx_qp_err_stats; 214 + struct ocrdma_stats rx_qp_err_stats; 215 + struct ocrdma_stats tx_dbg_stats; 216 + struct ocrdma_stats rx_dbg_stats; 217 + struct dentry *dir; 237 218 }; 238 219 239 220 struct ocrdma_cq { ··· 264 209 */ 265 210 u32 max_hw_cqe; 266 211 bool phase_change; 267 - bool armed, solicited; 268 - bool arm_needed; 212 + bool deferred_arm, deferred_sol; 213 + bool first_arm; 269 214 270 215 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 271 216 * to cq polling ··· 278 223 struct ocrdma_ucontext *ucontext; 279 224 dma_addr_t pa; 280 225 u32 len; 226 + u32 cqe_cnt; 281 227 282 228 /* head of all qp's sq and rq for which cqes need to be flushed 283 229 * by the software. ··· 288 232 289 233 struct ocrdma_pd { 290 234 struct ib_pd ibpd; 291 - struct ocrdma_dev *dev; 292 235 struct ocrdma_ucontext *uctx; 293 236 u32 id; 294 237 int num_dpp_qp; ··· 372 317 bool dpp_enabled; 373 318 u8 *ird_q_va; 374 319 bool signaled; 375 - u16 db_cache; 376 320 }; 377 - 378 321 379 322 struct ocrdma_ucontext { 380 323 struct ib_ucontext ibucontext; ··· 438 385 return container_of(ibsrq, struct ocrdma_srq, ibsrq); 439 386 } 440 387 441 - 442 - static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) 443 - { 444 - return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && 445 - qp->id < 128) ? 24 : 16); 446 - } 447 - 448 388 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) 449 389 { 450 390 int cqe_valid; ··· 480 434 else 481 435 memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); 482 436 return 0; 437 + } 438 + 439 + static inline char *hca_name(struct ocrdma_dev *dev) 440 + { 441 + switch (dev->nic_info.pdev->device) { 442 + case OC_SKH_DEVICE_PF: 443 + case OC_SKH_DEVICE_VF: 444 + return OC_NAME_SH; 445 + default: 446 + return OC_NAME_UNKNOWN; 447 + } 448 + } 449 + 450 + static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, 451 + int eqid) 452 + { 453 + int indx; 454 + 455 + for (indx = 0; indx < dev->eq_cnt; indx++) { 456 + if (dev->eq_tbl[indx].q.id == eqid) 457 + return indx; 458 + } 459 + 460 + return -EINVAL; 461 + } 462 + 463 + static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) 464 + { 465 + if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { 466 + pci_read_config_dword( 467 + dev->nic_info.pdev, 468 + OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); 469 + } 470 + 471 + return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> 472 + OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; 483 473 } 484 474 485 475 #endif
+3 -4
drivers/infiniband/hw/ocrdma/ocrdma_abi.h
··· 28 28 #ifndef __OCRDMA_ABI_H__ 29 29 #define __OCRDMA_ABI_H__ 30 30 31 - #define OCRDMA_ABI_VERSION 1 31 + #define OCRDMA_ABI_VERSION 2 32 + #define OCRDMA_BE_ROCE_ABI_VERSION 1 32 33 /* user kernel communication data structures. */ 33 34 34 35 struct ocrdma_alloc_ucontext_resp { ··· 108 107 u32 db_sq_offset; 109 108 u32 db_rq_offset; 110 109 u32 db_shift; 111 - u64 rsvd1; 112 - u64 rsvd2; 113 - u64 rsvd3; 110 + u64 rsvd[11]; 114 111 } __packed; 115 112 116 113 struct ocrdma_create_srq_uresp {
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 100 100 if (!(attr->ah_flags & IB_AH_GRH)) 101 101 return ERR_PTR(-EINVAL); 102 102 103 - ah = kzalloc(sizeof *ah, GFP_ATOMIC); 103 + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); 104 104 if (!ah) 105 105 return ERR_PTR(-ENOMEM); 106 106
+240 -59
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 32 32 33 33 #include <rdma/ib_verbs.h> 34 34 #include <rdma/ib_user_verbs.h> 35 - #include <rdma/ib_addr.h> 36 35 37 36 #include "ocrdma.h" 38 37 #include "ocrdma_hw.h" ··· 242 243 return err_num; 243 244 } 244 245 246 + char *port_speed_string(struct ocrdma_dev *dev) 247 + { 248 + char *str = ""; 249 + u16 speeds_supported; 250 + 251 + speeds_supported = dev->phy.fixed_speeds_supported | 252 + dev->phy.auto_speeds_supported; 253 + if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS) 254 + str = "40Gbps "; 255 + else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS) 256 + str = "10Gbps "; 257 + else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS) 258 + str = "1Gbps "; 259 + 260 + return str; 261 + } 262 + 245 263 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status) 246 264 { 247 265 int err_num = -EINVAL; ··· 348 332 return mqe; 349 333 } 350 334 335 + static void *ocrdma_alloc_mqe(void) 336 + { 337 + return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL); 338 + } 339 + 351 340 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) 352 341 { 353 342 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); ··· 385 364 } 386 365 } 387 366 388 - static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q, 389 - int queue_type) 367 + static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, 368 + struct ocrdma_queue_info *q, int queue_type) 390 369 { 391 370 u8 opcode = 0; 392 371 int status; ··· 465 444 return status; 466 445 } 467 446 468 - static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) 447 + int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) 469 448 { 470 449 int irq; 471 450 ··· 595 574 if (status) 596 575 goto alloc_err; 597 576 577 + dev->eq_tbl[0].cq_cnt++; 598 578 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q); 599 579 if (status) 600 580 goto mbx_cq_free; ··· 661 639 { 662 640 struct ocrdma_qp *qp = NULL; 663 641 struct ocrdma_cq *cq = NULL; 664 - struct ib_event ib_evt; 642 + struct ib_event ib_evt = { 0 }; 665 643 int cq_event = 0; 666 644 int qp_event = 1; 667 645 int srq_event = 0; ··· 686 664 case OCRDMA_CQ_OVERRUN_ERROR: 687 665 ib_evt.element.cq = &cq->ibcq; 688 666 ib_evt.event = IB_EVENT_CQ_ERR; 667 + cq_event = 1; 668 + qp_event = 0; 689 669 break; 690 670 case OCRDMA_CQ_QPCAT_ERROR: 691 671 ib_evt.element.qp = &qp->ibqp; ··· 749 725 qp->srq->ibsrq. 750 726 srq_context); 751 727 } else if (dev_event) { 728 + pr_err("%s: Fatal event received\n", dev->ibdev.name); 752 729 ib_dispatch_event(&ib_evt); 753 730 } 754 731 ··· 776 751 break; 777 752 } 778 753 } 779 - 780 754 781 755 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) 782 756 { ··· 823 799 ocrdma_process_acqe(dev, cqe); 824 800 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) 825 801 ocrdma_process_mcqe(dev, cqe); 826 - else 827 - pr_err("%s() cqe->compl is not set.\n", __func__); 828 802 memset(cqe, 0, sizeof(struct ocrdma_mcqe)); 829 803 ocrdma_mcq_inc_tail(dev); 830 804 } ··· 880 858 BUG(); 881 859 882 860 cq = dev->cq_tbl[cq_idx]; 883 - if (cq == NULL) { 884 - pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); 861 + if (cq == NULL) 885 862 return; 886 - } 887 - spin_lock_irqsave(&cq->cq_lock, flags); 888 - cq->armed = false; 889 - cq->solicited = false; 890 - spin_unlock_irqrestore(&cq->cq_lock, flags); 891 - 892 - ocrdma_ring_cq_db(dev, cq->id, false, false, 0); 893 863 894 864 if (cq->ibcq.comp_handler) { 895 865 spin_lock_irqsave(&cq->comp_handler_lock, flags); ··· 906 892 struct ocrdma_dev *dev = eq->dev; 907 893 struct ocrdma_eqe eqe; 908 894 struct ocrdma_eqe *ptr; 909 - u16 eqe_popped = 0; 910 895 u16 cq_id; 911 - while (1) { 896 + int budget = eq->cq_cnt; 897 + 898 + do { 912 899 ptr = ocrdma_get_eqe(eq); 913 900 eqe = *ptr; 914 901 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); 915 902 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) 916 903 break; 917 - eqe_popped += 1; 904 + 918 905 ptr->id_valid = 0; 906 + /* ring eq doorbell as soon as its consumed. */ 907 + ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1); 919 908 /* check whether its CQE or not. */ 920 909 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { 921 910 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; 922 911 ocrdma_cq_handler(dev, cq_id); 923 912 } 924 913 ocrdma_eq_inc_tail(eq); 925 - } 926 - ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped); 927 - /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */ 928 - if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) 929 - ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 914 + 915 + /* There can be a stale EQE after the last bound CQ is 916 + * destroyed. EQE valid and budget == 0 implies this. 917 + */ 918 + if (budget) 919 + budget--; 920 + 921 + } while (budget); 922 + 923 + ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 930 924 return IRQ_HANDLED; 931 925 } 932 926 ··· 971 949 { 972 950 int status = 0; 973 951 u16 cqe_status, ext_status; 974 - struct ocrdma_mqe *rsp; 952 + struct ocrdma_mqe *rsp_mqe; 953 + struct ocrdma_mbx_rsp *rsp = NULL; 975 954 976 955 mutex_lock(&dev->mqe_ctx.lock); 977 956 ocrdma_post_mqe(dev, mqe); ··· 981 958 goto mbx_err; 982 959 cqe_status = dev->mqe_ctx.cqe_status; 983 960 ext_status = dev->mqe_ctx.ext_status; 984 - rsp = ocrdma_get_mqe_rsp(dev); 985 - ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); 961 + rsp_mqe = ocrdma_get_mqe_rsp(dev); 962 + ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe))); 963 + if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> 964 + OCRDMA_MQE_HDR_EMB_SHIFT) 965 + rsp = &mqe->u.rsp; 966 + 986 967 if (cqe_status || ext_status) { 987 - pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", 988 - __func__, 989 - (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> 990 - OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status); 968 + pr_err("%s() cqe_status=0x%x, ext_status=0x%x,", 969 + __func__, cqe_status, ext_status); 970 + if (rsp) { 971 + /* This is for embedded cmds. */ 972 + pr_err("opcode=0x%x, subsystem=0x%x\n", 973 + (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> 974 + OCRDMA_MBX_RSP_OPCODE_SHIFT, 975 + (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >> 976 + OCRDMA_MBX_RSP_SUBSYS_SHIFT); 977 + } 991 978 status = ocrdma_get_mbx_cqe_errno(cqe_status); 992 979 goto mbx_err; 993 980 } 994 - if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK) 981 + /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */ 982 + if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)) 995 983 status = ocrdma_get_mbx_errno(mqe->u.rsp.status); 996 984 mbx_err: 997 985 mutex_unlock(&dev->mqe_ctx.lock); 986 + return status; 987 + } 988 + 989 + static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe, 990 + void *payload_va) 991 + { 992 + int status = 0; 993 + struct ocrdma_mbx_rsp *rsp = payload_va; 994 + 995 + if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> 996 + OCRDMA_MQE_HDR_EMB_SHIFT) 997 + BUG(); 998 + 999 + status = ocrdma_mbx_cmd(dev, mqe); 1000 + if (!status) 1001 + /* For non embedded, only CQE failures are handled in 1002 + * ocrdma_mbx_cmd. We need to check for RSP errors. 1003 + */ 1004 + if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK) 1005 + status = ocrdma_get_mbx_errno(rsp->status); 1006 + 1007 + if (status) 1008 + pr_err("opcode=0x%x, subsystem=0x%x\n", 1009 + (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> 1010 + OCRDMA_MBX_RSP_OPCODE_SHIFT, 1011 + (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >> 1012 + OCRDMA_MBX_RSP_SUBSYS_SHIFT); 998 1013 return status; 999 1014 } 1000 1015 ··· 1046 985 attr->max_qp = 1047 986 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 1048 987 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; 988 + attr->max_srq = 989 + (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> 990 + OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; 1049 991 attr->max_send_sge = ((rsp->max_write_send_sge & 1050 992 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1051 993 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT); ··· 1064 1000 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & 1065 1001 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> 1066 1002 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; 1067 - attr->max_srq = 1068 - (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >> 1069 - OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET; 1070 1003 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp & 1071 1004 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >> 1072 1005 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT; ··· 1076 1015 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay & 1077 1016 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >> 1078 1017 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT; 1018 + attr->max_mw = rsp->max_mw; 1079 1019 attr->max_mr = rsp->max_mr; 1080 1020 attr->max_mr_size = ~0ull; 1081 1021 attr->max_fmr = 0; ··· 1098 1036 attr->max_inline_data = 1099 1037 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + 1100 1038 sizeof(struct ocrdma_sge)); 1101 - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1039 + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1102 1040 attr->ird = 1; 1103 1041 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; 1104 1042 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; ··· 1172 1110 return status; 1173 1111 } 1174 1112 1113 + int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset) 1114 + { 1115 + struct ocrdma_rdma_stats_req *req = dev->stats_mem.va; 1116 + struct ocrdma_mqe *mqe = &dev->stats_mem.mqe; 1117 + struct ocrdma_rdma_stats_resp *old_stats = NULL; 1118 + int status; 1119 + 1120 + old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL); 1121 + if (old_stats == NULL) 1122 + return -ENOMEM; 1123 + 1124 + memset(mqe, 0, sizeof(*mqe)); 1125 + mqe->hdr.pyld_len = dev->stats_mem.size; 1126 + mqe->hdr.spcl_sge_cnt_emb |= 1127 + (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & 1128 + OCRDMA_MQE_HDR_SGE_CNT_MASK; 1129 + mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff); 1130 + mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa); 1131 + mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size; 1132 + 1133 + /* Cache the old stats */ 1134 + memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp)); 1135 + memset(req, 0, dev->stats_mem.size); 1136 + 1137 + ocrdma_init_mch((struct ocrdma_mbx_hdr *)req, 1138 + OCRDMA_CMD_GET_RDMA_STATS, 1139 + OCRDMA_SUBSYS_ROCE, 1140 + dev->stats_mem.size); 1141 + if (reset) 1142 + req->reset_stats = reset; 1143 + 1144 + status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va); 1145 + if (status) 1146 + /* Copy from cache, if mbox fails */ 1147 + memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp)); 1148 + else 1149 + ocrdma_le32_to_cpu(req, dev->stats_mem.size); 1150 + 1151 + kfree(old_stats); 1152 + return status; 1153 + } 1154 + 1155 + static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev) 1156 + { 1157 + int status = -ENOMEM; 1158 + struct ocrdma_dma_mem dma; 1159 + struct ocrdma_mqe *mqe; 1160 + struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp; 1161 + struct mgmt_hba_attribs *hba_attribs; 1162 + 1163 + mqe = ocrdma_alloc_mqe(); 1164 + if (!mqe) 1165 + return status; 1166 + memset(mqe, 0, sizeof(*mqe)); 1167 + 1168 + dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp); 1169 + dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev, 1170 + dma.size, &dma.pa, GFP_KERNEL); 1171 + if (!dma.va) 1172 + goto free_mqe; 1173 + 1174 + mqe->hdr.pyld_len = dma.size; 1175 + mqe->hdr.spcl_sge_cnt_emb |= 1176 + (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) & 1177 + OCRDMA_MQE_HDR_SGE_CNT_MASK; 1178 + mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff); 1179 + mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa); 1180 + mqe->u.nonemb_req.sge[0].len = dma.size; 1181 + 1182 + memset(dma.va, 0, dma.size); 1183 + ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va, 1184 + OCRDMA_CMD_GET_CTRL_ATTRIBUTES, 1185 + OCRDMA_SUBSYS_COMMON, 1186 + dma.size); 1187 + 1188 + status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va); 1189 + if (!status) { 1190 + ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va; 1191 + hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs; 1192 + 1193 + dev->hba_port_num = hba_attribs->phy_port; 1194 + strncpy(dev->model_number, 1195 + hba_attribs->controller_model_number, 31); 1196 + } 1197 + dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa); 1198 + free_mqe: 1199 + kfree(mqe); 1200 + return status; 1201 + } 1202 + 1175 1203 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev) 1176 1204 { 1177 1205 int status = -ENOMEM; ··· 1304 1152 rsp = (struct ocrdma_get_link_speed_rsp *)cmd; 1305 1153 *lnk_speed = rsp->phys_port_speed; 1306 1154 1155 + mbx_err: 1156 + kfree(cmd); 1157 + return status; 1158 + } 1159 + 1160 + static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev) 1161 + { 1162 + int status = -ENOMEM; 1163 + struct ocrdma_mqe *cmd; 1164 + struct ocrdma_get_phy_info_rsp *rsp; 1165 + 1166 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd)); 1167 + if (!cmd) 1168 + return status; 1169 + 1170 + ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0], 1171 + OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON, 1172 + sizeof(*cmd)); 1173 + 1174 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1175 + if (status) 1176 + goto mbx_err; 1177 + 1178 + rsp = (struct ocrdma_get_phy_info_rsp *)cmd; 1179 + dev->phy.phy_type = le16_to_cpu(rsp->phy_type); 1180 + dev->phy.auto_speeds_supported = 1181 + le16_to_cpu(rsp->auto_speeds_supported); 1182 + dev->phy.fixed_speeds_supported = 1183 + le16_to_cpu(rsp->fixed_speeds_supported); 1307 1184 mbx_err: 1308 1185 kfree(cmd); 1309 1186 return status; ··· 1407 1226 1408 1227 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) 1409 1228 { 1410 - int i ; 1229 + int i; 1411 1230 int status = 0; 1412 1231 int max_ah; 1413 1232 struct ocrdma_create_ah_tbl *cmd; ··· 1538 1357 int i; 1539 1358 1540 1359 mutex_lock(&dev->dev_lock); 1541 - for (i = 0; i < dev->eq_cnt; i++) { 1542 - if (dev->eq_tbl[i].q.id != eq_id) 1543 - continue; 1544 - dev->eq_tbl[i].cq_cnt -= 1; 1545 - break; 1546 - } 1360 + i = ocrdma_get_eq_table_index(dev, eq_id); 1361 + if (i == -EINVAL) 1362 + BUG(); 1363 + dev->eq_tbl[i].cq_cnt -= 1; 1547 1364 mutex_unlock(&dev->dev_lock); 1548 1365 } 1549 1366 ··· 1559 1380 __func__, dev->id, dev->attr.max_cqe, entries); 1560 1381 return -EINVAL; 1561 1382 } 1562 - if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) 1383 + if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R)) 1563 1384 return -EINVAL; 1564 1385 1565 1386 if (dpp_cq) { ··· 1596 1417 cq->eqn = ocrdma_bind_eq(dev); 1597 1418 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3; 1598 1419 cqe_count = cq->len / cqe_size; 1420 + cq->cqe_cnt = cqe_count; 1599 1421 if (cqe_count > 1024) { 1600 1422 /* Set cnt to 3 to indicate more than 1024 cq entries */ 1601 1423 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT); ··· 1619 1439 } 1620 1440 /* shared eq between all the consumer cqs. */ 1621 1441 cmd->cmd.eqn = cq->eqn; 1622 - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1442 + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1623 1443 if (dpp_cq) 1624 1444 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP << 1625 1445 OCRDMA_CREATE_CQ_TYPE_SHIFT; ··· 1664 1484 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) & 1665 1485 OCRDMA_DESTROY_CQ_QID_MASK; 1666 1486 1667 - ocrdma_unbind_eq(dev, cq->eqn); 1668 1487 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1669 - if (status) 1670 - goto mbx_err; 1488 + ocrdma_unbind_eq(dev, cq->eqn); 1671 1489 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa); 1672 - mbx_err: 1673 1490 kfree(cmd); 1674 1491 return status; 1675 1492 } ··· 2206 2029 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK; 2207 2030 qp->rq_cq = cq; 2208 2031 2209 - if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp && 2210 - (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) { 2032 + if (pd->dpp_enabled && pd->num_dpp_qp) { 2211 2033 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq, 2212 2034 dpp_cq_id); 2213 2035 } ··· 2275 2099 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2276 2100 sizeof(cmd->params.dgid)); 2277 2101 status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2278 - ah_attr->grh.sgid_index, &sgid); 2102 + ah_attr->grh.sgid_index, &sgid); 2279 2103 if (status) 2280 2104 return status; 2281 2105 ··· 2303 2127 2304 2128 static int ocrdma_set_qp_params(struct ocrdma_qp *qp, 2305 2129 struct ocrdma_modify_qp *cmd, 2306 - struct ib_qp_attr *attrs, int attr_mask, 2307 - enum ib_qp_state old_qps) 2130 + struct ib_qp_attr *attrs, int attr_mask) 2308 2131 { 2309 2132 int status = 0; 2310 2133 ··· 2408 2233 } 2409 2234 2410 2235 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 2411 - struct ib_qp_attr *attrs, int attr_mask, 2412 - enum ib_qp_state old_qps) 2236 + struct ib_qp_attr *attrs, int attr_mask) 2413 2237 { 2414 2238 int status = -ENOMEM; 2415 2239 struct ocrdma_modify_qp *cmd; ··· 2431 2257 OCRDMA_QP_PARAMS_STATE_MASK; 2432 2258 } 2433 2259 2434 - status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps); 2260 + status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask); 2435 2261 if (status) 2436 2262 goto mbx_err; 2437 2263 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); ··· 2662 2488 2663 2489 for (i = 0; i < num_eq; i++) { 2664 2490 status = ocrdma_create_eq(dev, &dev->eq_tbl[i], 2665 - OCRDMA_EQ_LEN); 2491 + OCRDMA_EQ_LEN); 2666 2492 if (status) { 2667 2493 status = -EINVAL; 2668 2494 break; ··· 2707 2533 status = ocrdma_mbx_create_ah_tbl(dev); 2708 2534 if (status) 2709 2535 goto conf_err; 2536 + status = ocrdma_mbx_get_phy_info(dev); 2537 + if (status) 2538 + goto conf_err; 2539 + status = ocrdma_mbx_get_ctrl_attribs(dev); 2540 + if (status) 2541 + goto conf_err; 2542 + 2710 2543 return 0; 2711 2544 2712 2545 conf_err:
+4 -2
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
··· 112 112 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset, 113 113 u16 *dpp_credit_lmt); 114 114 int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *, 115 - struct ib_qp_attr *attrs, int attr_mask, 116 - enum ib_qp_state old_qps); 115 + struct ib_qp_attr *attrs, int attr_mask); 117 116 int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *, 118 117 struct ocrdma_qp_params *param); 119 118 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *); ··· 131 132 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); 132 133 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *); 133 134 void ocrdma_flush_qp(struct ocrdma_qp *); 135 + int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); 134 136 137 + int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); 138 + char *port_speed_string(struct ocrdma_dev *dev); 135 139 #endif /* __OCRDMA_HW_H__ */
+74 -7
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 39 39 #include "ocrdma_ah.h" 40 40 #include "be_roce.h" 41 41 #include "ocrdma_hw.h" 42 + #include "ocrdma_stats.h" 42 43 #include "ocrdma_abi.h" 43 44 44 - MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION); 45 - MODULE_DESCRIPTION("Emulex RoCE HCA Driver"); 45 + MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 46 + MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); 46 47 MODULE_AUTHOR("Emulex Corporation"); 47 48 MODULE_LICENSE("GPL"); 48 49 ··· 287 286 288 287 dev->ibdev.process_mad = ocrdma_process_mad; 289 288 290 - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 289 + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 291 290 dev->ibdev.uverbs_cmd_mask |= 292 291 OCRDMA_UVERBS(CREATE_SRQ) | 293 292 OCRDMA_UVERBS(MODIFY_SRQ) | ··· 339 338 kfree(dev->sgid_tbl); 340 339 } 341 340 341 + /* OCRDMA sysfs interface */ 342 + static ssize_t show_rev(struct device *device, struct device_attribute *attr, 343 + char *buf) 344 + { 345 + struct ocrdma_dev *dev = dev_get_drvdata(device); 346 + 347 + return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor); 348 + } 349 + 350 + static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 351 + char *buf) 352 + { 353 + struct ocrdma_dev *dev = dev_get_drvdata(device); 354 + 355 + return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]); 356 + } 357 + 358 + static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 359 + static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 360 + 361 + static struct device_attribute *ocrdma_attributes[] = { 362 + &dev_attr_hw_rev, 363 + &dev_attr_fw_ver 364 + }; 365 + 366 + static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev) 367 + { 368 + int i; 369 + 370 + for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) 371 + device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]); 372 + } 373 + 342 374 static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) 343 375 { 344 - int status = 0; 376 + int status = 0, i; 345 377 struct ocrdma_dev *dev; 346 378 347 379 dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); ··· 403 369 if (status) 404 370 goto alloc_err; 405 371 372 + for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++) 373 + if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i])) 374 + goto sysfs_err; 406 375 spin_lock(&ocrdma_devlist_lock); 407 376 list_add_tail_rcu(&dev->entry, &ocrdma_dev_list); 408 377 spin_unlock(&ocrdma_devlist_lock); 378 + /* Init stats */ 379 + ocrdma_add_port_stats(dev); 380 + 381 + pr_info("%s %s: %s \"%s\" port %d\n", 382 + dev_name(&dev->nic_info.pdev->dev), hca_name(dev), 383 + port_speed_string(dev), dev->model_number, 384 + dev->hba_port_num); 385 + pr_info("%s ocrdma%d driver loaded successfully\n", 386 + dev_name(&dev->nic_info.pdev->dev), dev->id); 409 387 return dev; 410 388 389 + sysfs_err: 390 + ocrdma_remove_sysfiles(dev); 411 391 alloc_err: 412 392 ocrdma_free_resources(dev); 413 393 ocrdma_cleanup_hw(dev); ··· 448 400 /* first unregister with stack to stop all the active traffic 449 401 * of the registered clients. 450 402 */ 403 + ocrdma_rem_port_stats(dev); 404 + ocrdma_remove_sysfiles(dev); 405 + 451 406 ib_unregister_device(&dev->ibdev); 452 407 453 408 spin_lock(&ocrdma_devlist_lock); ··· 488 437 cur_qp = dev->qp_tbl; 489 438 for (i = 0; i < OCRDMA_MAX_QP; i++) { 490 439 qp = cur_qp[i]; 491 - if (qp) { 440 + if (qp && qp->ibqp.qp_type != IB_QPT_GSI) { 492 441 /* change the QP state to ERROR */ 493 442 _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask); 494 443 ··· 529 478 .add = ocrdma_add, 530 479 .remove = ocrdma_remove, 531 480 .state_change_handler = ocrdma_event_handler, 481 + .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION, 532 482 }; 533 483 534 484 static void ocrdma_unregister_inet6addr_notifier(void) ··· 539 487 #endif 540 488 } 541 489 490 + static void ocrdma_unregister_inetaddr_notifier(void) 491 + { 492 + unregister_inetaddr_notifier(&ocrdma_inetaddr_notifier); 493 + } 494 + 542 495 static int __init ocrdma_init_module(void) 543 496 { 544 497 int status; 498 + 499 + ocrdma_init_debugfs(); 545 500 546 501 status = register_inetaddr_notifier(&ocrdma_inetaddr_notifier); 547 502 if (status) ··· 557 498 #if IS_ENABLED(CONFIG_IPV6) 558 499 status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier); 559 500 if (status) 560 - return status; 501 + goto err_notifier6; 561 502 #endif 562 503 563 504 status = be_roce_register_driver(&ocrdma_drv); 564 505 if (status) 565 - ocrdma_unregister_inet6addr_notifier(); 506 + goto err_be_reg; 566 507 508 + return 0; 509 + 510 + err_be_reg: 511 + ocrdma_unregister_inet6addr_notifier(); 512 + err_notifier6: 513 + ocrdma_unregister_inetaddr_notifier(); 567 514 return status; 568 515 } 569 516 ··· 577 512 { 578 513 be_roce_unregister_driver(&ocrdma_drv); 579 514 ocrdma_unregister_inet6addr_notifier(); 515 + ocrdma_unregister_inetaddr_notifier(); 516 + ocrdma_rem_debugfs(); 580 517 } 581 518 582 519 module_init(ocrdma_init_module);
+254 -7
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
··· 30 30 31 31 #define Bit(_b) (1 << (_b)) 32 32 33 - #define OCRDMA_GEN1_FAMILY 0xB 34 - #define OCRDMA_GEN2_FAMILY 0x0F 33 + enum { 34 + OCRDMA_ASIC_GEN_SKH_R = 0x04, 35 + OCRDMA_ASIC_GEN_LANCER = 0x0B 36 + }; 37 + 38 + enum { 39 + OCRDMA_ASIC_REV_A0 = 0x00, 40 + OCRDMA_ASIC_REV_B0 = 0x10, 41 + OCRDMA_ASIC_REV_C0 = 0x20 42 + }; 35 43 36 44 #define OCRDMA_SUBSYS_ROCE 10 37 45 enum { ··· 72 64 73 65 OCRDMA_CMD_ATTACH_MCAST, 74 66 OCRDMA_CMD_DETACH_MCAST, 67 + OCRDMA_CMD_GET_RDMA_STATS, 75 68 76 69 OCRDMA_CMD_MAX 77 70 }; ··· 83 74 OCRDMA_CMD_CREATE_CQ = 12, 84 75 OCRDMA_CMD_CREATE_EQ = 13, 85 76 OCRDMA_CMD_CREATE_MQ = 21, 77 + OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, 86 78 OCRDMA_CMD_GET_FW_VER = 35, 87 79 OCRDMA_CMD_DELETE_MQ = 53, 88 80 OCRDMA_CMD_DELETE_CQ = 54, 89 81 OCRDMA_CMD_DELETE_EQ = 55, 90 82 OCRDMA_CMD_GET_FW_CONFIG = 58, 91 - OCRDMA_CMD_CREATE_MQ_EXT = 90 83 + OCRDMA_CMD_CREATE_MQ_EXT = 90, 84 + OCRDMA_CMD_PHY_DETAILS = 102 92 85 }; 93 86 94 87 enum { ··· 114 103 OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET, 115 104 OCRDMA_DB_CQ_OFFSET = 0x120, 116 105 OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET, 117 - OCRDMA_DB_MQ_OFFSET = 0x140 106 + OCRDMA_DB_MQ_OFFSET = 0x140, 107 + 108 + OCRDMA_DB_SQ_SHIFT = 16, 109 + OCRDMA_DB_RQ_SHIFT = 24 118 110 }; 119 111 120 112 #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ ··· 152 138 #define OCRDMA_MIN_Q_PAGE_SIZE (4096) 153 139 #define OCRDMA_MAX_Q_PAGES (8) 154 140 141 + #define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C 142 + #define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF 143 + #define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00 144 + #define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08 155 145 /* 156 146 # 0: 4K Bytes 157 147 # 1: 8K Bytes ··· 580 562 OCRDMA_FN_MODE_RDMA = 0x4 581 563 }; 582 564 565 + struct ocrdma_get_phy_info_rsp { 566 + struct ocrdma_mqe_hdr hdr; 567 + struct ocrdma_mbx_rsp rsp; 568 + 569 + u16 phy_type; 570 + u16 interface_type; 571 + u32 misc_params; 572 + u16 ext_phy_details; 573 + u16 rsvd; 574 + u16 auto_speeds_supported; 575 + u16 fixed_speeds_supported; 576 + u32 future_use[2]; 577 + }; 578 + 579 + enum { 580 + OCRDMA_PHY_SPEED_ZERO = 0x0, 581 + OCRDMA_PHY_SPEED_10MBPS = 0x1, 582 + OCRDMA_PHY_SPEED_100MBPS = 0x2, 583 + OCRDMA_PHY_SPEED_1GBPS = 0x4, 584 + OCRDMA_PHY_SPEED_10GBPS = 0x8, 585 + OCRDMA_PHY_SPEED_40GBPS = 0x20 586 + }; 587 + 588 + 583 589 struct ocrdma_get_link_speed_rsp { 584 590 struct ocrdma_mqe_hdr hdr; 585 591 struct ocrdma_mbx_rsp rsp; ··· 632 590 633 591 enum { 634 592 OCRDMA_CREATE_CQ_VER2 = 2, 635 - OCRDMA_CREATE_CQ_VER3 = 3, 593 + OCRDMA_CREATE_CQ_VER3 = 3, 636 594 637 595 OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF, 638 596 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16, ··· 1092 1050 OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF << 1093 1051 OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT 1094 1052 }; 1053 + 1095 1054 struct ocrdma_modify_qp_rsp { 1096 1055 struct ocrdma_mqe_hdr hdr; 1097 1056 struct ocrdma_mbx_rsp rsp; ··· 1105 1062 struct ocrdma_mqe_hdr hdr; 1106 1063 struct ocrdma_mbx_hdr req; 1107 1064 1108 - #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 1109 - #define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF 1065 + #define OCRDMA_QUERY_UP_QP_ID_SHIFT 0 1066 + #define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF 1110 1067 u32 qp_id; 1111 1068 }; 1112 1069 ··· 1745 1702 struct ocrdma_grh grh; 1746 1703 u32 valid; 1747 1704 } __packed; 1705 + 1706 + struct ocrdma_rsrc_stats { 1707 + u32 dpp_pds; 1708 + u32 non_dpp_pds; 1709 + u32 rc_dpp_qps; 1710 + u32 uc_dpp_qps; 1711 + u32 ud_dpp_qps; 1712 + u32 rc_non_dpp_qps; 1713 + u32 rsvd; 1714 + u32 uc_non_dpp_qps; 1715 + u32 ud_non_dpp_qps; 1716 + u32 rsvd1; 1717 + u32 srqs; 1718 + u32 rbqs; 1719 + u32 r64K_nsmr; 1720 + u32 r64K_to_2M_nsmr; 1721 + u32 r2M_to_44M_nsmr; 1722 + u32 r44M_to_1G_nsmr; 1723 + u32 r1G_to_4G_nsmr; 1724 + u32 nsmr_count_4G_to_32G; 1725 + u32 r32G_to_64G_nsmr; 1726 + u32 r64G_to_128G_nsmr; 1727 + u32 r128G_to_higher_nsmr; 1728 + u32 embedded_nsmr; 1729 + u32 frmr; 1730 + u32 prefetch_qps; 1731 + u32 ondemand_qps; 1732 + u32 phy_mr; 1733 + u32 mw; 1734 + u32 rsvd2[7]; 1735 + }; 1736 + 1737 + struct ocrdma_db_err_stats { 1738 + u32 sq_doorbell_errors; 1739 + u32 cq_doorbell_errors; 1740 + u32 rq_srq_doorbell_errors; 1741 + u32 cq_overflow_errors; 1742 + u32 rsvd[4]; 1743 + }; 1744 + 1745 + struct ocrdma_wqe_stats { 1746 + u32 large_send_rc_wqes_lo; 1747 + u32 large_send_rc_wqes_hi; 1748 + u32 large_write_rc_wqes_lo; 1749 + u32 large_write_rc_wqes_hi; 1750 + u32 rsvd[4]; 1751 + u32 read_wqes_lo; 1752 + u32 read_wqes_hi; 1753 + u32 frmr_wqes_lo; 1754 + u32 frmr_wqes_hi; 1755 + u32 mw_bind_wqes_lo; 1756 + u32 mw_bind_wqes_hi; 1757 + u32 invalidate_wqes_lo; 1758 + u32 invalidate_wqes_hi; 1759 + u32 rsvd1[2]; 1760 + u32 dpp_wqe_drops; 1761 + u32 rsvd2[5]; 1762 + }; 1763 + 1764 + struct ocrdma_tx_stats { 1765 + u32 send_pkts_lo; 1766 + u32 send_pkts_hi; 1767 + u32 write_pkts_lo; 1768 + u32 write_pkts_hi; 1769 + u32 read_pkts_lo; 1770 + u32 read_pkts_hi; 1771 + u32 read_rsp_pkts_lo; 1772 + u32 read_rsp_pkts_hi; 1773 + u32 ack_pkts_lo; 1774 + u32 ack_pkts_hi; 1775 + u32 send_bytes_lo; 1776 + u32 send_bytes_hi; 1777 + u32 write_bytes_lo; 1778 + u32 write_bytes_hi; 1779 + u32 read_req_bytes_lo; 1780 + u32 read_req_bytes_hi; 1781 + u32 read_rsp_bytes_lo; 1782 + u32 read_rsp_bytes_hi; 1783 + u32 ack_timeouts; 1784 + u32 rsvd[5]; 1785 + }; 1786 + 1787 + 1788 + struct ocrdma_tx_qp_err_stats { 1789 + u32 local_length_errors; 1790 + u32 local_protection_errors; 1791 + u32 local_qp_operation_errors; 1792 + u32 retry_count_exceeded_errors; 1793 + u32 rnr_retry_count_exceeded_errors; 1794 + u32 rsvd[3]; 1795 + }; 1796 + 1797 + struct ocrdma_rx_stats { 1798 + u32 roce_frame_bytes_lo; 1799 + u32 roce_frame_bytes_hi; 1800 + u32 roce_frame_icrc_drops; 1801 + u32 roce_frame_payload_len_drops; 1802 + u32 ud_drops; 1803 + u32 qp1_drops; 1804 + u32 psn_error_request_packets; 1805 + u32 psn_error_resp_packets; 1806 + u32 rnr_nak_timeouts; 1807 + u32 rnr_nak_receives; 1808 + u32 roce_frame_rxmt_drops; 1809 + u32 nak_count_psn_sequence_errors; 1810 + u32 rc_drop_count_lookup_errors; 1811 + u32 rq_rnr_naks; 1812 + u32 srq_rnr_naks; 1813 + u32 roce_frames_lo; 1814 + u32 roce_frames_hi; 1815 + u32 rsvd; 1816 + }; 1817 + 1818 + struct ocrdma_rx_qp_err_stats { 1819 + u32 nak_invalid_requst_errors; 1820 + u32 nak_remote_operation_errors; 1821 + u32 nak_count_remote_access_errors; 1822 + u32 local_length_errors; 1823 + u32 local_protection_errors; 1824 + u32 local_qp_operation_errors; 1825 + u32 rsvd[2]; 1826 + }; 1827 + 1828 + struct ocrdma_tx_dbg_stats { 1829 + u32 data[100]; 1830 + }; 1831 + 1832 + struct ocrdma_rx_dbg_stats { 1833 + u32 data[200]; 1834 + }; 1835 + 1836 + struct ocrdma_rdma_stats_req { 1837 + struct ocrdma_mbx_hdr hdr; 1838 + u8 reset_stats; 1839 + u8 rsvd[3]; 1840 + } __packed; 1841 + 1842 + struct ocrdma_rdma_stats_resp { 1843 + struct ocrdma_mbx_hdr hdr; 1844 + struct ocrdma_rsrc_stats act_rsrc_stats; 1845 + struct ocrdma_rsrc_stats th_rsrc_stats; 1846 + struct ocrdma_db_err_stats db_err_stats; 1847 + struct ocrdma_wqe_stats wqe_stats; 1848 + struct ocrdma_tx_stats tx_stats; 1849 + struct ocrdma_tx_qp_err_stats tx_qp_err_stats; 1850 + struct ocrdma_rx_stats rx_stats; 1851 + struct ocrdma_rx_qp_err_stats rx_qp_err_stats; 1852 + struct ocrdma_tx_dbg_stats tx_dbg_stats; 1853 + struct ocrdma_rx_dbg_stats rx_dbg_stats; 1854 + } __packed; 1855 + 1856 + 1857 + struct mgmt_hba_attribs { 1858 + u8 flashrom_version_string[32]; 1859 + u8 manufacturer_name[32]; 1860 + u32 supported_modes; 1861 + u32 rsvd0[3]; 1862 + u8 ncsi_ver_string[12]; 1863 + u32 default_extended_timeout; 1864 + u8 controller_model_number[32]; 1865 + u8 controller_description[64]; 1866 + u8 controller_serial_number[32]; 1867 + u8 ip_version_string[32]; 1868 + u8 firmware_version_string[32]; 1869 + u8 bios_version_string[32]; 1870 + u8 redboot_version_string[32]; 1871 + u8 driver_version_string[32]; 1872 + u8 fw_on_flash_version_string[32]; 1873 + u32 functionalities_supported; 1874 + u16 max_cdblength; 1875 + u8 asic_revision; 1876 + u8 generational_guid[16]; 1877 + u8 hba_port_count; 1878 + u16 default_link_down_timeout; 1879 + u8 iscsi_ver_min_max; 1880 + u8 multifunction_device; 1881 + u8 cache_valid; 1882 + u8 hba_status; 1883 + u8 max_domains_supported; 1884 + u8 phy_port; 1885 + u32 firmware_post_status; 1886 + u32 hba_mtu[8]; 1887 + u32 rsvd1[4]; 1888 + }; 1889 + 1890 + struct mgmt_controller_attrib { 1891 + struct mgmt_hba_attribs hba_attribs; 1892 + u16 pci_vendor_id; 1893 + u16 pci_device_id; 1894 + u16 pci_sub_vendor_id; 1895 + u16 pci_sub_system_id; 1896 + u8 pci_bus_number; 1897 + u8 pci_device_number; 1898 + u8 pci_function_number; 1899 + u8 interface_type; 1900 + u64 unique_identifier; 1901 + u32 rsvd0[5]; 1902 + }; 1903 + 1904 + struct ocrdma_get_ctrl_attribs_rsp { 1905 + struct ocrdma_mbx_hdr hdr; 1906 + struct mgmt_controller_attrib ctrl_attribs; 1907 + }; 1908 + 1748 1909 1749 1910 #endif /* __OCRDMA_SLI_H__ */
+623
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2014 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #include <rdma/ib_addr.h> 29 + #include "ocrdma_stats.h" 30 + 31 + static struct dentry *ocrdma_dbgfs_dir; 32 + 33 + static int ocrdma_add_stat(char *start, char *pcur, 34 + char *name, u64 count) 35 + { 36 + char buff[128] = {0}; 37 + int cpy_len = 0; 38 + 39 + snprintf(buff, 128, "%s: %llu\n", name, count); 40 + cpy_len = strlen(buff); 41 + 42 + if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) { 43 + pr_err("%s: No space in stats buff\n", __func__); 44 + return 0; 45 + } 46 + 47 + memcpy(pcur, buff, cpy_len); 48 + return cpy_len; 49 + } 50 + 51 + static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) 52 + { 53 + struct stats_mem *mem = &dev->stats_mem; 54 + 55 + /* Alloc mbox command mem*/ 56 + mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 57 + sizeof(struct ocrdma_rdma_stats_resp)); 58 + 59 + mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size, 60 + &mem->pa, GFP_KERNEL); 61 + if (!mem->va) { 62 + pr_err("%s: stats mbox allocation failed\n", __func__); 63 + return false; 64 + } 65 + 66 + memset(mem->va, 0, mem->size); 67 + 68 + /* Alloc debugfs mem */ 69 + mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL); 70 + if (!mem->debugfs_mem) { 71 + pr_err("%s: stats debugfs mem allocation failed\n", __func__); 72 + return false; 73 + } 74 + 75 + return true; 76 + } 77 + 78 + static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) 79 + { 80 + struct stats_mem *mem = &dev->stats_mem; 81 + 82 + if (mem->va) 83 + dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, 84 + mem->va, mem->pa); 85 + kfree(mem->debugfs_mem); 86 + } 87 + 88 + static char *ocrdma_resource_stats(struct ocrdma_dev *dev) 89 + { 90 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 91 + struct ocrdma_rdma_stats_resp *rdma_stats = 92 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 93 + struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; 94 + 95 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 96 + 97 + pcur = stats; 98 + pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds", 99 + (u64)rsrc_stats->dpp_pds); 100 + pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds", 101 + (u64)rsrc_stats->non_dpp_pds); 102 + pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps", 103 + (u64)rsrc_stats->rc_dpp_qps); 104 + pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps", 105 + (u64)rsrc_stats->uc_dpp_qps); 106 + pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps", 107 + (u64)rsrc_stats->ud_dpp_qps); 108 + pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps", 109 + (u64)rsrc_stats->rc_non_dpp_qps); 110 + pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps", 111 + (u64)rsrc_stats->uc_non_dpp_qps); 112 + pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps", 113 + (u64)rsrc_stats->ud_non_dpp_qps); 114 + pcur += ocrdma_add_stat(stats, pcur, "active_srqs", 115 + (u64)rsrc_stats->srqs); 116 + pcur += ocrdma_add_stat(stats, pcur, "active_rbqs", 117 + (u64)rsrc_stats->rbqs); 118 + pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr", 119 + (u64)rsrc_stats->r64K_nsmr); 120 + pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr", 121 + (u64)rsrc_stats->r64K_to_2M_nsmr); 122 + pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr", 123 + (u64)rsrc_stats->r2M_to_44M_nsmr); 124 + pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr", 125 + (u64)rsrc_stats->r44M_to_1G_nsmr); 126 + pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr", 127 + (u64)rsrc_stats->r1G_to_4G_nsmr); 128 + pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G", 129 + (u64)rsrc_stats->nsmr_count_4G_to_32G); 130 + pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr", 131 + (u64)rsrc_stats->r32G_to_64G_nsmr); 132 + pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr", 133 + (u64)rsrc_stats->r64G_to_128G_nsmr); 134 + pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr", 135 + (u64)rsrc_stats->r128G_to_higher_nsmr); 136 + pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr", 137 + (u64)rsrc_stats->embedded_nsmr); 138 + pcur += ocrdma_add_stat(stats, pcur, "active_frmr", 139 + (u64)rsrc_stats->frmr); 140 + pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps", 141 + (u64)rsrc_stats->prefetch_qps); 142 + pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps", 143 + (u64)rsrc_stats->ondemand_qps); 144 + pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr", 145 + (u64)rsrc_stats->phy_mr); 146 + pcur += ocrdma_add_stat(stats, pcur, "active_mw", 147 + (u64)rsrc_stats->mw); 148 + 149 + /* Print the threshold stats */ 150 + rsrc_stats = &rdma_stats->th_rsrc_stats; 151 + 152 + pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds", 153 + (u64)rsrc_stats->dpp_pds); 154 + pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds", 155 + (u64)rsrc_stats->non_dpp_pds); 156 + pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps", 157 + (u64)rsrc_stats->rc_dpp_qps); 158 + pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps", 159 + (u64)rsrc_stats->uc_dpp_qps); 160 + pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps", 161 + (u64)rsrc_stats->ud_dpp_qps); 162 + pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps", 163 + (u64)rsrc_stats->rc_non_dpp_qps); 164 + pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps", 165 + (u64)rsrc_stats->uc_non_dpp_qps); 166 + pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps", 167 + (u64)rsrc_stats->ud_non_dpp_qps); 168 + pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs", 169 + (u64)rsrc_stats->srqs); 170 + pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs", 171 + (u64)rsrc_stats->rbqs); 172 + pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr", 173 + (u64)rsrc_stats->r64K_nsmr); 174 + pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr", 175 + (u64)rsrc_stats->r64K_to_2M_nsmr); 176 + pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr", 177 + (u64)rsrc_stats->r2M_to_44M_nsmr); 178 + pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr", 179 + (u64)rsrc_stats->r44M_to_1G_nsmr); 180 + pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr", 181 + (u64)rsrc_stats->r1G_to_4G_nsmr); 182 + pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G", 183 + (u64)rsrc_stats->nsmr_count_4G_to_32G); 184 + pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr", 185 + (u64)rsrc_stats->r32G_to_64G_nsmr); 186 + pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr", 187 + (u64)rsrc_stats->r64G_to_128G_nsmr); 188 + pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr", 189 + (u64)rsrc_stats->r128G_to_higher_nsmr); 190 + pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr", 191 + (u64)rsrc_stats->embedded_nsmr); 192 + pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr", 193 + (u64)rsrc_stats->frmr); 194 + pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps", 195 + (u64)rsrc_stats->prefetch_qps); 196 + pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps", 197 + (u64)rsrc_stats->ondemand_qps); 198 + pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr", 199 + (u64)rsrc_stats->phy_mr); 200 + pcur += ocrdma_add_stat(stats, pcur, "threshold_mw", 201 + (u64)rsrc_stats->mw); 202 + return stats; 203 + } 204 + 205 + static char *ocrdma_rx_stats(struct ocrdma_dev *dev) 206 + { 207 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 208 + struct ocrdma_rdma_stats_resp *rdma_stats = 209 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 210 + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; 211 + 212 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 213 + 214 + pcur = stats; 215 + pcur += ocrdma_add_stat 216 + (stats, pcur, "roce_frame_bytes", 217 + convert_to_64bit(rx_stats->roce_frame_bytes_lo, 218 + rx_stats->roce_frame_bytes_hi)); 219 + pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops", 220 + (u64)rx_stats->roce_frame_icrc_drops); 221 + pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops", 222 + (u64)rx_stats->roce_frame_payload_len_drops); 223 + pcur += ocrdma_add_stat(stats, pcur, "ud_drops", 224 + (u64)rx_stats->ud_drops); 225 + pcur += ocrdma_add_stat(stats, pcur, "qp1_drops", 226 + (u64)rx_stats->qp1_drops); 227 + pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets", 228 + (u64)rx_stats->psn_error_request_packets); 229 + pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets", 230 + (u64)rx_stats->psn_error_resp_packets); 231 + pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts", 232 + (u64)rx_stats->rnr_nak_timeouts); 233 + pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives", 234 + (u64)rx_stats->rnr_nak_receives); 235 + pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops", 236 + (u64)rx_stats->roce_frame_rxmt_drops); 237 + pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors", 238 + (u64)rx_stats->nak_count_psn_sequence_errors); 239 + pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors", 240 + (u64)rx_stats->rc_drop_count_lookup_errors); 241 + pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks", 242 + (u64)rx_stats->rq_rnr_naks); 243 + pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks", 244 + (u64)rx_stats->srq_rnr_naks); 245 + pcur += ocrdma_add_stat(stats, pcur, "roce_frames", 246 + convert_to_64bit(rx_stats->roce_frames_lo, 247 + rx_stats->roce_frames_hi)); 248 + 249 + return stats; 250 + } 251 + 252 + static char *ocrdma_tx_stats(struct ocrdma_dev *dev) 253 + { 254 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 255 + struct ocrdma_rdma_stats_resp *rdma_stats = 256 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 257 + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; 258 + 259 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 260 + 261 + pcur = stats; 262 + pcur += ocrdma_add_stat(stats, pcur, "send_pkts", 263 + convert_to_64bit(tx_stats->send_pkts_lo, 264 + tx_stats->send_pkts_hi)); 265 + pcur += ocrdma_add_stat(stats, pcur, "write_pkts", 266 + convert_to_64bit(tx_stats->write_pkts_lo, 267 + tx_stats->write_pkts_hi)); 268 + pcur += ocrdma_add_stat(stats, pcur, "read_pkts", 269 + convert_to_64bit(tx_stats->read_pkts_lo, 270 + tx_stats->read_pkts_hi)); 271 + pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts", 272 + convert_to_64bit(tx_stats->read_rsp_pkts_lo, 273 + tx_stats->read_rsp_pkts_hi)); 274 + pcur += ocrdma_add_stat(stats, pcur, "ack_pkts", 275 + convert_to_64bit(tx_stats->ack_pkts_lo, 276 + tx_stats->ack_pkts_hi)); 277 + pcur += ocrdma_add_stat(stats, pcur, "send_bytes", 278 + convert_to_64bit(tx_stats->send_bytes_lo, 279 + tx_stats->send_bytes_hi)); 280 + pcur += ocrdma_add_stat(stats, pcur, "write_bytes", 281 + convert_to_64bit(tx_stats->write_bytes_lo, 282 + tx_stats->write_bytes_hi)); 283 + pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes", 284 + convert_to_64bit(tx_stats->read_req_bytes_lo, 285 + tx_stats->read_req_bytes_hi)); 286 + pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes", 287 + convert_to_64bit(tx_stats->read_rsp_bytes_lo, 288 + tx_stats->read_rsp_bytes_hi)); 289 + pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts", 290 + (u64)tx_stats->ack_timeouts); 291 + 292 + return stats; 293 + } 294 + 295 + static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) 296 + { 297 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 298 + struct ocrdma_rdma_stats_resp *rdma_stats = 299 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 300 + struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats; 301 + 302 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 303 + 304 + pcur = stats; 305 + pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes", 306 + convert_to_64bit(wqe_stats->large_send_rc_wqes_lo, 307 + wqe_stats->large_send_rc_wqes_hi)); 308 + pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes", 309 + convert_to_64bit(wqe_stats->large_write_rc_wqes_lo, 310 + wqe_stats->large_write_rc_wqes_hi)); 311 + pcur += ocrdma_add_stat(stats, pcur, "read_wqes", 312 + convert_to_64bit(wqe_stats->read_wqes_lo, 313 + wqe_stats->read_wqes_hi)); 314 + pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes", 315 + convert_to_64bit(wqe_stats->frmr_wqes_lo, 316 + wqe_stats->frmr_wqes_hi)); 317 + pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes", 318 + convert_to_64bit(wqe_stats->mw_bind_wqes_lo, 319 + wqe_stats->mw_bind_wqes_hi)); 320 + pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes", 321 + convert_to_64bit(wqe_stats->invalidate_wqes_lo, 322 + wqe_stats->invalidate_wqes_hi)); 323 + pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops", 324 + (u64)wqe_stats->dpp_wqe_drops); 325 + return stats; 326 + } 327 + 328 + static char *ocrdma_db_errstats(struct ocrdma_dev *dev) 329 + { 330 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 331 + struct ocrdma_rdma_stats_resp *rdma_stats = 332 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 333 + struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats; 334 + 335 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 336 + 337 + pcur = stats; 338 + pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors", 339 + (u64)db_err_stats->sq_doorbell_errors); 340 + pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors", 341 + (u64)db_err_stats->cq_doorbell_errors); 342 + pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors", 343 + (u64)db_err_stats->rq_srq_doorbell_errors); 344 + pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors", 345 + (u64)db_err_stats->cq_overflow_errors); 346 + return stats; 347 + } 348 + 349 + static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev) 350 + { 351 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 352 + struct ocrdma_rdma_stats_resp *rdma_stats = 353 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 354 + struct ocrdma_rx_qp_err_stats *rx_qp_err_stats = 355 + &rdma_stats->rx_qp_err_stats; 356 + 357 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 358 + 359 + pcur = stats; 360 + pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors", 361 + (u64)rx_qp_err_stats->nak_invalid_requst_errors); 362 + pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors", 363 + (u64)rx_qp_err_stats->nak_remote_operation_errors); 364 + pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors", 365 + (u64)rx_qp_err_stats->nak_count_remote_access_errors); 366 + pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", 367 + (u64)rx_qp_err_stats->local_length_errors); 368 + pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", 369 + (u64)rx_qp_err_stats->local_protection_errors); 370 + pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", 371 + (u64)rx_qp_err_stats->local_qp_operation_errors); 372 + return stats; 373 + } 374 + 375 + static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev) 376 + { 377 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 378 + struct ocrdma_rdma_stats_resp *rdma_stats = 379 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 380 + struct ocrdma_tx_qp_err_stats *tx_qp_err_stats = 381 + &rdma_stats->tx_qp_err_stats; 382 + 383 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 384 + 385 + pcur = stats; 386 + pcur += ocrdma_add_stat(stats, pcur, "local_length_errors", 387 + (u64)tx_qp_err_stats->local_length_errors); 388 + pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors", 389 + (u64)tx_qp_err_stats->local_protection_errors); 390 + pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors", 391 + (u64)tx_qp_err_stats->local_qp_operation_errors); 392 + pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors", 393 + (u64)tx_qp_err_stats->retry_count_exceeded_errors); 394 + pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors", 395 + (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors); 396 + return stats; 397 + } 398 + 399 + static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev) 400 + { 401 + int i; 402 + char *pstats = dev->stats_mem.debugfs_mem; 403 + struct ocrdma_rdma_stats_resp *rdma_stats = 404 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 405 + struct ocrdma_tx_dbg_stats *tx_dbg_stats = 406 + &rdma_stats->tx_dbg_stats; 407 + 408 + memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); 409 + 410 + for (i = 0; i < 100; i++) 411 + pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, 412 + tx_dbg_stats->data[i]); 413 + 414 + return dev->stats_mem.debugfs_mem; 415 + } 416 + 417 + static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev) 418 + { 419 + int i; 420 + char *pstats = dev->stats_mem.debugfs_mem; 421 + struct ocrdma_rdma_stats_resp *rdma_stats = 422 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 423 + struct ocrdma_rx_dbg_stats *rx_dbg_stats = 424 + &rdma_stats->rx_dbg_stats; 425 + 426 + memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM)); 427 + 428 + for (i = 0; i < 200; i++) 429 + pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i, 430 + rx_dbg_stats->data[i]); 431 + 432 + return dev->stats_mem.debugfs_mem; 433 + } 434 + 435 + static void ocrdma_update_stats(struct ocrdma_dev *dev) 436 + { 437 + ulong now = jiffies, secs; 438 + int status = 0; 439 + 440 + secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; 441 + if (secs) { 442 + /* update */ 443 + status = ocrdma_mbx_rdma_stats(dev, false); 444 + if (status) 445 + pr_err("%s: stats mbox failed with status = %d\n", 446 + __func__, status); 447 + dev->last_stats_time = jiffies; 448 + } 449 + } 450 + 451 + static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, 452 + size_t usr_buf_len, loff_t *ppos) 453 + { 454 + struct ocrdma_stats *pstats = filp->private_data; 455 + struct ocrdma_dev *dev = pstats->dev; 456 + ssize_t status = 0; 457 + char *data = NULL; 458 + 459 + /* No partial reads */ 460 + if (*ppos != 0) 461 + return 0; 462 + 463 + mutex_lock(&dev->stats_lock); 464 + 465 + ocrdma_update_stats(dev); 466 + 467 + switch (pstats->type) { 468 + case OCRDMA_RSRC_STATS: 469 + data = ocrdma_resource_stats(dev); 470 + break; 471 + case OCRDMA_RXSTATS: 472 + data = ocrdma_rx_stats(dev); 473 + break; 474 + case OCRDMA_WQESTATS: 475 + data = ocrdma_wqe_stats(dev); 476 + break; 477 + case OCRDMA_TXSTATS: 478 + data = ocrdma_tx_stats(dev); 479 + break; 480 + case OCRDMA_DB_ERRSTATS: 481 + data = ocrdma_db_errstats(dev); 482 + break; 483 + case OCRDMA_RXQP_ERRSTATS: 484 + data = ocrdma_rxqp_errstats(dev); 485 + break; 486 + case OCRDMA_TXQP_ERRSTATS: 487 + data = ocrdma_txqp_errstats(dev); 488 + break; 489 + case OCRDMA_TX_DBG_STATS: 490 + data = ocrdma_tx_dbg_stats(dev); 491 + break; 492 + case OCRDMA_RX_DBG_STATS: 493 + data = ocrdma_rx_dbg_stats(dev); 494 + break; 495 + 496 + default: 497 + status = -EFAULT; 498 + goto exit; 499 + } 500 + 501 + if (usr_buf_len < strlen(data)) { 502 + status = -ENOSPC; 503 + goto exit; 504 + } 505 + 506 + status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data, 507 + strlen(data)); 508 + exit: 509 + mutex_unlock(&dev->stats_lock); 510 + return status; 511 + } 512 + 513 + static int ocrdma_debugfs_open(struct inode *inode, struct file *file) 514 + { 515 + if (inode->i_private) 516 + file->private_data = inode->i_private; 517 + return 0; 518 + } 519 + 520 + static const struct file_operations ocrdma_dbg_ops = { 521 + .owner = THIS_MODULE, 522 + .open = ocrdma_debugfs_open, 523 + .read = ocrdma_dbgfs_ops_read, 524 + }; 525 + 526 + void ocrdma_add_port_stats(struct ocrdma_dev *dev) 527 + { 528 + if (!ocrdma_dbgfs_dir) 529 + return; 530 + 531 + /* Create post stats base dir */ 532 + dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir); 533 + if (!dev->dir) 534 + goto err; 535 + 536 + dev->rsrc_stats.type = OCRDMA_RSRC_STATS; 537 + dev->rsrc_stats.dev = dev; 538 + if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir, 539 + &dev->rsrc_stats, &ocrdma_dbg_ops)) 540 + goto err; 541 + 542 + dev->rx_stats.type = OCRDMA_RXSTATS; 543 + dev->rx_stats.dev = dev; 544 + if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir, 545 + &dev->rx_stats, &ocrdma_dbg_ops)) 546 + goto err; 547 + 548 + dev->wqe_stats.type = OCRDMA_WQESTATS; 549 + dev->wqe_stats.dev = dev; 550 + if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, 551 + &dev->wqe_stats, &ocrdma_dbg_ops)) 552 + goto err; 553 + 554 + dev->tx_stats.type = OCRDMA_TXSTATS; 555 + dev->tx_stats.dev = dev; 556 + if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir, 557 + &dev->tx_stats, &ocrdma_dbg_ops)) 558 + goto err; 559 + 560 + dev->db_err_stats.type = OCRDMA_DB_ERRSTATS; 561 + dev->db_err_stats.dev = dev; 562 + if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir, 563 + &dev->db_err_stats, &ocrdma_dbg_ops)) 564 + goto err; 565 + 566 + 567 + dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS; 568 + dev->tx_qp_err_stats.dev = dev; 569 + if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir, 570 + &dev->tx_qp_err_stats, &ocrdma_dbg_ops)) 571 + goto err; 572 + 573 + dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS; 574 + dev->rx_qp_err_stats.dev = dev; 575 + if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir, 576 + &dev->rx_qp_err_stats, &ocrdma_dbg_ops)) 577 + goto err; 578 + 579 + 580 + dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS; 581 + dev->tx_dbg_stats.dev = dev; 582 + if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir, 583 + &dev->tx_dbg_stats, &ocrdma_dbg_ops)) 584 + goto err; 585 + 586 + dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS; 587 + dev->rx_dbg_stats.dev = dev; 588 + if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir, 589 + &dev->rx_dbg_stats, &ocrdma_dbg_ops)) 590 + goto err; 591 + 592 + /* Now create dma_mem for stats mbx command */ 593 + if (!ocrdma_alloc_stats_mem(dev)) 594 + goto err; 595 + 596 + mutex_init(&dev->stats_lock); 597 + 598 + return; 599 + err: 600 + ocrdma_release_stats_mem(dev); 601 + debugfs_remove_recursive(dev->dir); 602 + dev->dir = NULL; 603 + } 604 + 605 + void ocrdma_rem_port_stats(struct ocrdma_dev *dev) 606 + { 607 + if (!dev->dir) 608 + return; 609 + mutex_destroy(&dev->stats_lock); 610 + ocrdma_release_stats_mem(dev); 611 + debugfs_remove(dev->dir); 612 + } 613 + 614 + void ocrdma_init_debugfs(void) 615 + { 616 + /* Create base dir in debugfs root dir */ 617 + ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL); 618 + } 619 + 620 + void ocrdma_rem_debugfs(void) 621 + { 622 + debugfs_remove_recursive(ocrdma_dbgfs_dir); 623 + }
+54
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
··· 1 + /******************************************************************* 2 + * This file is part of the Emulex RoCE Device Driver for * 3 + * RoCE (RDMA over Converged Ethernet) adapters. * 4 + * Copyright (C) 2008-2014 Emulex. All rights reserved. * 5 + * EMULEX and SLI are trademarks of Emulex. * 6 + * www.emulex.com * 7 + * * 8 + * This program is free software; you can redistribute it and/or * 9 + * modify it under the terms of version 2 of the GNU General * 10 + * Public License as published by the Free Software Foundation. * 11 + * This program is distributed in the hope that it will be useful. * 12 + * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 + * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 + * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 + * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 + * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 + * more details, a copy of which can be found in the file COPYING * 18 + * included with this package. * 19 + * 20 + * Contact Information: 21 + * linux-drivers@emulex.com 22 + * 23 + * Emulex 24 + * 3333 Susan Street 25 + * Costa Mesa, CA 92626 26 + *******************************************************************/ 27 + 28 + #ifndef __OCRDMA_STATS_H__ 29 + #define __OCRDMA_STATS_H__ 30 + 31 + #include <linux/debugfs.h> 32 + #include "ocrdma.h" 33 + #include "ocrdma_hw.h" 34 + 35 + #define OCRDMA_MAX_DBGFS_MEM 4096 36 + 37 + enum OCRDMA_STATS_TYPE { 38 + OCRDMA_RSRC_STATS, 39 + OCRDMA_RXSTATS, 40 + OCRDMA_WQESTATS, 41 + OCRDMA_TXSTATS, 42 + OCRDMA_DB_ERRSTATS, 43 + OCRDMA_RXQP_ERRSTATS, 44 + OCRDMA_TXQP_ERRSTATS, 45 + OCRDMA_TX_DBG_STATS, 46 + OCRDMA_RX_DBG_STATS 47 + }; 48 + 49 + void ocrdma_rem_debugfs(void); 50 + void ocrdma_init_debugfs(void); 51 + void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 52 + void ocrdma_add_port_stats(struct ocrdma_dev *dev); 53 + 54 + #endif /* __OCRDMA_STATS_H__ */
+96 -71
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 53 53 54 54 dev = get_ocrdma_dev(ibdev); 55 55 memset(sgid, 0, sizeof(*sgid)); 56 - if (index >= OCRDMA_MAX_SGID) 56 + if (index > OCRDMA_MAX_SGID) 57 57 return -EINVAL; 58 58 59 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); ··· 89 89 attr->max_cq = dev->attr.max_cq; 90 90 attr->max_cqe = dev->attr.max_cqe; 91 91 attr->max_mr = dev->attr.max_mr; 92 - attr->max_mw = 0; 92 + attr->max_mw = dev->attr.max_mw; 93 93 attr->max_pd = dev->attr.max_pd; 94 94 attr->atomic_cap = 0; 95 95 attr->max_fmr = 0; ··· 143 143 *ib_width = IB_WIDTH_1X; 144 144 } 145 145 } 146 - 147 146 148 147 int ocrdma_query_port(struct ib_device *ibdev, 149 148 u8 port, struct ib_port_attr *props) ··· 266 267 267 268 if (udata && uctx) { 268 269 pd->dpp_enabled = 269 - dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY; 270 + ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R; 270 271 pd->num_dpp_qp = 271 272 pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0; 272 273 } ··· 837 838 838 839 status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 839 840 840 - if (mr->hwmr.fr_mr == 0) 841 - ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 841 + ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 842 842 843 843 /* it could be user registered memory. */ 844 844 if (mr->umem) ··· 906 908 spin_lock_init(&cq->comp_handler_lock); 907 909 INIT_LIST_HEAD(&cq->sq_head); 908 910 INIT_LIST_HEAD(&cq->rq_head); 911 + cq->first_arm = true; 909 912 910 913 if (ib_ctx) { 911 914 uctx = get_ocrdma_ucontext(ib_ctx); ··· 924 925 goto ctx_err; 925 926 } 926 927 cq->phase = OCRDMA_CQE_VALID; 927 - cq->arm_needed = true; 928 928 dev->cq_tbl[cq->id] = cq; 929 - 930 929 return &cq->ibcq; 931 930 932 931 ctx_err: ··· 947 950 return status; 948 951 } 949 952 953 + static void ocrdma_flush_cq(struct ocrdma_cq *cq) 954 + { 955 + int cqe_cnt; 956 + int valid_count = 0; 957 + unsigned long flags; 958 + 959 + struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device); 960 + struct ocrdma_cqe *cqe = NULL; 961 + 962 + cqe = cq->va; 963 + cqe_cnt = cq->cqe_cnt; 964 + 965 + /* Last irq might have scheduled a polling thread 966 + * sync-up with it before hard flushing. 967 + */ 968 + spin_lock_irqsave(&cq->cq_lock, flags); 969 + while (cqe_cnt) { 970 + if (is_cqe_valid(cq, cqe)) 971 + valid_count++; 972 + cqe++; 973 + cqe_cnt--; 974 + } 975 + ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count); 976 + spin_unlock_irqrestore(&cq->cq_lock, flags); 977 + } 978 + 950 979 int ocrdma_destroy_cq(struct ib_cq *ibcq) 951 980 { 952 981 int status; 953 982 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 983 + struct ocrdma_eq *eq = NULL; 954 984 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 955 985 int pdid = 0; 986 + u32 irq, indx; 987 + 988 + dev->cq_tbl[cq->id] = NULL; 989 + indx = ocrdma_get_eq_table_index(dev, cq->eqn); 990 + if (indx == -EINVAL) 991 + BUG(); 992 + 993 + eq = &dev->eq_tbl[indx]; 994 + irq = ocrdma_get_irq(dev, eq); 995 + synchronize_irq(irq); 996 + ocrdma_flush_cq(cq); 956 997 957 998 status = ocrdma_mbx_destroy_cq(dev, cq); 958 - 959 999 if (cq->ucontext) { 960 1000 pdid = cq->ucontext->cntxt_pd->id; 961 1001 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, ··· 1001 967 ocrdma_get_db_addr(dev, pdid), 1002 968 dev->nic_info.db_page_size); 1003 969 } 1004 - dev->cq_tbl[cq->id] = NULL; 1005 970 1006 971 kfree(cq); 1007 972 return status; ··· 1123 1090 } 1124 1091 uresp.db_page_addr = usr_db; 1125 1092 uresp.db_page_size = dev->nic_info.db_page_size; 1126 - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1127 - uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; 1128 - uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1129 - uresp.db_shift = 24; 1130 - } else { 1131 - uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET; 1132 - uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 1133 - uresp.db_shift = 16; 1134 - } 1093 + uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET; 1094 + uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1095 + uresp.db_shift = OCRDMA_DB_RQ_SHIFT; 1135 1096 1136 1097 if (qp->dpp_enabled) { 1137 1098 uresp.dpp_credit = dpp_credit_lmt; ··· 1157 1130 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp, 1158 1131 struct ocrdma_pd *pd) 1159 1132 { 1160 - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1133 + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1161 1134 qp->sq_db = dev->nic_info.db + 1162 1135 (pd->id * dev->nic_info.db_page_size) + 1163 1136 OCRDMA_DB_GEN2_SQ_OFFSET; ··· 1206 1179 qp->state = OCRDMA_QPS_RST; 1207 1180 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; 1208 1181 } 1209 - 1210 1182 1211 1183 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev, 1212 1184 struct ib_qp_init_attr *attrs) ··· 1292 1266 return ERR_PTR(status); 1293 1267 } 1294 1268 1295 - 1296 - static void ocrdma_flush_rq_db(struct ocrdma_qp *qp) 1297 - { 1298 - if (qp->db_cache) { 1299 - u32 val = qp->rq.dbid | (qp->db_cache << 1300 - ocrdma_get_num_posted_shift(qp)); 1301 - iowrite32(val, qp->rq_db); 1302 - qp->db_cache = 0; 1303 - } 1304 - } 1305 - 1306 1269 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1307 1270 int attr_mask) 1308 1271 { ··· 1309 1294 */ 1310 1295 if (status < 0) 1311 1296 return status; 1312 - status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps); 1313 - if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR) 1314 - ocrdma_flush_rq_db(qp); 1297 + status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); 1315 1298 1316 1299 return status; 1317 1300 } ··· 1521 1508 int discard_cnt = 0; 1522 1509 u32 cur_getp, stop_getp; 1523 1510 struct ocrdma_cqe *cqe; 1524 - u32 qpn = 0; 1511 + u32 qpn = 0, wqe_idx = 0; 1525 1512 1526 1513 spin_lock_irqsave(&cq->cq_lock, cq_flags); 1527 1514 ··· 1550 1537 if (qpn == 0 || qpn != qp->id) 1551 1538 goto skip_cqe; 1552 1539 1553 - /* mark cqe discarded so that it is not picked up later 1554 - * in the poll_cq(). 1555 - */ 1556 - discard_cnt += 1; 1557 - cqe->cmn.qpn = 0; 1558 1540 if (is_cqe_for_sq(cqe)) { 1559 1541 ocrdma_hwq_inc_tail(&qp->sq); 1560 1542 } else { 1561 1543 if (qp->srq) { 1544 + wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 1545 + OCRDMA_CQE_BUFTAG_SHIFT) & 1546 + qp->srq->rq.max_wqe_idx; 1547 + if (wqe_idx < 1) 1548 + BUG(); 1562 1549 spin_lock_irqsave(&qp->srq->q_lock, flags); 1563 1550 ocrdma_hwq_inc_tail(&qp->srq->rq); 1564 - ocrdma_srq_toggle_bit(qp->srq, cur_getp); 1551 + ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1); 1565 1552 spin_unlock_irqrestore(&qp->srq->q_lock, flags); 1566 1553 1567 1554 } else { 1568 1555 ocrdma_hwq_inc_tail(&qp->rq); 1569 1556 } 1570 1557 } 1558 + /* mark cqe discarded so that it is not picked up later 1559 + * in the poll_cq(). 1560 + */ 1561 + discard_cnt += 1; 1562 + cqe->cmn.qpn = 0; 1571 1563 skip_cqe: 1572 1564 cur_getp = (cur_getp + 1) % cq->max_hw_cqe; 1573 1565 } while (cur_getp != stop_getp); ··· 1675 1657 (srq->pd->id * dev->nic_info.db_page_size); 1676 1658 uresp.db_page_size = dev->nic_info.db_page_size; 1677 1659 uresp.num_rqe_allocated = srq->rq.max_cnt; 1678 - if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1660 + if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { 1679 1661 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET; 1680 1662 uresp.db_shift = 24; 1681 1663 } else { ··· 2025 2007 fast_reg->num_sges = wr->wr.fast_reg.page_list_len; 2026 2008 fast_reg->size_sge = 2027 2009 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2028 - mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) & 2029 - (OCRDMA_MAX_STAG - 1)]; 2010 + mr = (struct ocrdma_mr *) (unsigned long) 2011 + qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2030 2012 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2031 2013 return 0; 2032 2014 } 2033 2015 2034 2016 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp) 2035 2017 { 2036 - u32 val = qp->sq.dbid | (1 << 16); 2018 + u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); 2037 2019 2038 2020 iowrite32(val, qp->sq_db); 2039 2021 } ··· 2138 2120 2139 2121 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp) 2140 2122 { 2141 - u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp)); 2123 + u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT); 2142 2124 2143 - if (qp->state != OCRDMA_QPS_INIT) 2144 - iowrite32(val, qp->rq_db); 2145 - else 2146 - qp->db_cache++; 2125 + iowrite32(val, qp->rq_db); 2147 2126 } 2148 2127 2149 2128 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr, ··· 2226 2211 2227 2212 if (row == srq->bit_fields_len) 2228 2213 BUG(); 2229 - return indx; 2214 + return indx + 1; /* Use from index 1 */ 2230 2215 } 2231 2216 2232 2217 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq) ··· 2563 2548 2564 2549 srq = get_ocrdma_srq(qp->ibqp.srq); 2565 2550 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> 2566 - OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; 2551 + OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx; 2552 + if (wqe_idx < 1) 2553 + BUG(); 2554 + 2567 2555 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx]; 2568 2556 spin_lock_irqsave(&srq->q_lock, flags); 2569 - ocrdma_srq_toggle_bit(srq, wqe_idx); 2557 + ocrdma_srq_toggle_bit(srq, wqe_idx - 1); 2570 2558 spin_unlock_irqrestore(&srq->q_lock, flags); 2571 2559 ocrdma_hwq_inc_tail(&srq->rq); 2572 2560 } ··· 2721 2703 } 2722 2704 stop_cqe: 2723 2705 cq->getp = cur_getp; 2724 - if (polled_hw_cqes || expand || stop) { 2725 - ocrdma_ring_cq_db(dev, cq->id, cq->armed, cq->solicited, 2706 + if (cq->deferred_arm) { 2707 + ocrdma_ring_cq_db(dev, cq->id, true, cq->deferred_sol, 2726 2708 polled_hw_cqes); 2709 + cq->deferred_arm = false; 2710 + cq->deferred_sol = false; 2711 + } else { 2712 + /* We need to pop the CQE. No need to arm */ 2713 + ocrdma_ring_cq_db(dev, cq->id, false, cq->deferred_sol, 2714 + polled_hw_cqes); 2715 + cq->deferred_sol = false; 2727 2716 } 2717 + 2728 2718 return i; 2729 2719 } 2730 2720 ··· 2804 2778 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 2805 2779 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); 2806 2780 u16 cq_id; 2807 - u16 cur_getp; 2808 - struct ocrdma_cqe *cqe; 2809 2781 unsigned long flags; 2782 + bool arm_needed = false, sol_needed = false; 2810 2783 2811 2784 cq_id = cq->id; 2812 2785 2813 2786 spin_lock_irqsave(&cq->cq_lock, flags); 2814 2787 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED) 2815 - cq->armed = true; 2788 + arm_needed = true; 2816 2789 if (cq_flags & IB_CQ_SOLICITED) 2817 - cq->solicited = true; 2790 + sol_needed = true; 2818 2791 2819 - cur_getp = cq->getp; 2820 - cqe = cq->va + cur_getp; 2821 - 2822 - /* check whether any valid cqe exist or not, if not then safe to 2823 - * arm. If cqe is not yet consumed, then let it get consumed and then 2824 - * we arm it to avoid false interrupts. 2825 - */ 2826 - if (!is_cqe_valid(cq, cqe) || cq->arm_needed) { 2827 - cq->arm_needed = false; 2828 - ocrdma_ring_cq_db(dev, cq_id, cq->armed, cq->solicited, 0); 2792 + if (cq->first_arm) { 2793 + ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0); 2794 + cq->first_arm = false; 2795 + goto skip_defer; 2829 2796 } 2797 + cq->deferred_arm = true; 2798 + 2799 + skip_defer: 2800 + cq->deferred_sol = sol_needed; 2830 2801 spin_unlock_irqrestore(&cq->cq_lock, flags); 2802 + 2831 2803 return 0; 2832 2804 } 2833 2805 ··· 2860 2836 goto mbx_err; 2861 2837 mr->ibmr.rkey = mr->hwmr.lkey; 2862 2838 mr->ibmr.lkey = mr->hwmr.lkey; 2863 - dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = mr; 2839 + dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = 2840 + (unsigned long) mr; 2864 2841 return &mr->ibmr; 2865 2842 mbx_err: 2866 2843 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+9 -3
drivers/infiniband/hw/qib/qib.h
··· 868 868 /* last buffer for user use */ 869 869 u32 lastctxt_piobuf; 870 870 871 - /* saturating counter of (non-port-specific) device interrupts */ 872 - u32 int_counter; 871 + /* reset value */ 872 + u64 z_int_counter; 873 + /* percpu intcounter */ 874 + u64 __percpu *int_counter; 873 875 874 876 /* pio bufs allocated per ctxt */ 875 877 u32 pbufsctxt; ··· 1186 1184 void qib_set_ctxtcnt(struct qib_devdata *); 1187 1185 int qib_create_ctxts(struct qib_devdata *dd); 1188 1186 struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *, u32, int); 1189 - void qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); 1187 + int qib_init_pportdata(struct qib_pportdata *, struct qib_devdata *, u8, u8); 1190 1188 void qib_free_ctxtdata(struct qib_devdata *, struct qib_ctxtdata *); 1191 1189 1192 1190 u32 qib_kreceive(struct qib_ctxtdata *, u32 *, u32 *); ··· 1451 1449 void qib_nomsix(struct qib_devdata *); 1452 1450 void qib_pcie_getcmd(struct qib_devdata *, u16 *, u8 *, u8 *); 1453 1451 void qib_pcie_reenable(struct qib_devdata *, u16, u8, u8); 1452 + /* interrupts for device */ 1453 + u64 qib_int_counter(struct qib_devdata *); 1454 + /* interrupt for all devices */ 1455 + u64 qib_sps_ints(void); 1454 1456 1455 1457 /* 1456 1458 * dma_addr wrappers - all 0's invalid for hw
+26 -26
drivers/infiniband/hw/qib/qib_diag.c
··· 546 546 size_t count, loff_t *off) 547 547 { 548 548 u32 __iomem *piobuf; 549 - u32 plen, clen, pbufn; 549 + u32 plen, pbufn, maxlen_reserve; 550 550 struct qib_diag_xpkt dp; 551 551 u32 *tmpbuf = NULL; 552 552 struct qib_devdata *dd; ··· 590 590 } 591 591 ppd = &dd->pport[dp.port - 1]; 592 592 593 - /* need total length before first word written */ 594 - /* +1 word is for the qword padding */ 595 - plen = sizeof(u32) + dp.len; 596 - clen = dp.len >> 2; 597 - 598 - if ((plen + 4) > ppd->ibmaxlen) { 593 + /* 594 + * need total length before first word written, plus 2 Dwords. One Dword 595 + * is for padding so we get the full user data when not aligned on 596 + * a word boundary. The other Dword is to make sure we have room for the 597 + * ICRC which gets tacked on later. 598 + */ 599 + maxlen_reserve = 2 * sizeof(u32); 600 + if (dp.len > ppd->ibmaxlen - maxlen_reserve) { 599 601 ret = -EINVAL; 600 - goto bail; /* before writing pbc */ 602 + goto bail; 601 603 } 604 + 605 + plen = sizeof(u32) + dp.len; 606 + 602 607 tmpbuf = vmalloc(plen); 603 608 if (!tmpbuf) { 604 609 qib_devinfo(dd->pcidev, ··· 643 638 */ 644 639 if (dd->flags & QIB_PIO_FLUSH_WC) { 645 640 qib_flush_wc(); 646 - qib_pio_copy(piobuf + 2, tmpbuf, clen - 1); 641 + qib_pio_copy(piobuf + 2, tmpbuf, plen - 1); 647 642 qib_flush_wc(); 648 - __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); 643 + __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); 649 644 } else 650 - qib_pio_copy(piobuf + 2, tmpbuf, clen); 645 + qib_pio_copy(piobuf + 2, tmpbuf, plen); 651 646 652 647 if (dd->flags & QIB_USE_SPCL_TRIG) { 653 648 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; ··· 694 689 const struct diag_observer *op) 695 690 { 696 691 struct diag_observer_list_elt *olp; 697 - int ret = -EINVAL; 692 + unsigned long flags; 698 693 699 694 if (!dd || !op) 700 - goto bail; 701 - ret = -ENOMEM; 695 + return -EINVAL; 702 696 olp = vmalloc(sizeof *olp); 703 697 if (!olp) { 704 698 pr_err("vmalloc for observer failed\n"); 705 - goto bail; 699 + return -ENOMEM; 706 700 } 707 - if (olp) { 708 - unsigned long flags; 709 701 710 - spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 711 - olp->op = op; 712 - olp->next = dd->diag_observer_list; 713 - dd->diag_observer_list = olp; 714 - spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 715 - ret = 0; 716 - } 717 - bail: 718 - return ret; 702 + spin_lock_irqsave(&dd->qib_diag_trans_lock, flags); 703 + olp->op = op; 704 + olp->next = dd->diag_observer_list; 705 + dd->diag_observer_list = olp; 706 + spin_unlock_irqrestore(&dd->qib_diag_trans_lock, flags); 707 + 708 + return 0; 719 709 } 720 710 721 711 /* Remove all registered observers when device is closed */
+4 -17
drivers/infiniband/hw/qib/qib_dma.c
··· 108 108 ret = 0; 109 109 break; 110 110 } 111 + sg->dma_address = addr + sg->offset; 112 + #ifdef CONFIG_NEED_SG_DMA_LENGTH 113 + sg->dma_length = sg->length; 114 + #endif 111 115 } 112 116 return ret; 113 117 } ··· 121 117 enum dma_data_direction direction) 122 118 { 123 119 BUG_ON(!valid_dma_direction(direction)); 124 - } 125 - 126 - static u64 qib_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) 127 - { 128 - u64 addr = (u64) page_address(sg_page(sg)); 129 - 130 - if (addr) 131 - addr += sg->offset; 132 - return addr; 133 - } 134 - 135 - static unsigned int qib_sg_dma_len(struct ib_device *dev, 136 - struct scatterlist *sg) 137 - { 138 - return sg->length; 139 120 } 140 121 141 122 static void qib_sync_single_for_cpu(struct ib_device *dev, u64 addr, ··· 162 173 .unmap_page = qib_dma_unmap_page, 163 174 .map_sg = qib_map_sg, 164 175 .unmap_sg = qib_unmap_sg, 165 - .dma_address = qib_sg_dma_address, 166 - .dma_len = qib_sg_dma_len, 167 176 .sync_single_for_cpu = qib_sync_single_for_cpu, 168 177 .sync_single_for_device = qib_sync_single_for_device, 169 178 .alloc_coherent = qib_dma_alloc_coherent,
+3 -2
drivers/infiniband/hw/qib/qib_file_ops.c
··· 1459 1459 cused++; 1460 1460 else 1461 1461 cfree++; 1462 - if (pusable && cfree && cused < inuse) { 1462 + if (cfree && cused < inuse) { 1463 1463 udd = dd; 1464 1464 inuse = cused; 1465 1465 } ··· 1578 1578 struct qib_ctxtdata *rcd = fd->rcd; 1579 1579 struct qib_devdata *dd = rcd->dd; 1580 1580 1581 - if (dd->flags & QIB_HAS_SEND_DMA) 1581 + if (dd->flags & QIB_HAS_SEND_DMA) { 1582 1582 1583 1583 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, 1584 1584 dd->unit, ··· 1586 1586 fd->subctxt); 1587 1587 if (!fd->pq) 1588 1588 return -ENOMEM; 1589 + } 1589 1590 1590 1591 return 0; 1591 1592 }
+1
drivers/infiniband/hw/qib/qib_fs.c
··· 105 105 static ssize_t driver_stats_read(struct file *file, char __user *buf, 106 106 size_t count, loff_t *ppos) 107 107 { 108 + qib_stats.sps_ints = qib_sps_ints(); 108 109 return simple_read_from_buffer(buf, count, ppos, &qib_stats, 109 110 sizeof qib_stats); 110 111 }
+6 -5
drivers/infiniband/hw/qib/qib_iba6120.c
··· 1634 1634 goto bail; 1635 1635 } 1636 1636 1637 - qib_stats.sps_ints++; 1638 - if (dd->int_counter != (u32) -1) 1639 - dd->int_counter++; 1637 + this_cpu_inc(*dd->int_counter); 1640 1638 1641 1639 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | 1642 1640 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) ··· 1806 1808 * isn't set. 1807 1809 */ 1808 1810 dd->flags &= ~(QIB_INITTED | QIB_PRESENT); 1809 - dd->int_counter = 0; /* so we check interrupts work again */ 1811 + /* so we check interrupts work again */ 1812 + dd->z_int_counter = qib_int_counter(dd); 1810 1813 val = dd->control | QLOGIC_IB_C_RESET; 1811 1814 writeq(val, &dd->kregbase[kr_control]); 1812 1815 mb(); /* prevent compiler re-ordering around actual reset */ ··· 3265 3266 3266 3267 dd->eep_st_masks[2].errs_to_log = ERR_MASK(ResetNegated); 3267 3268 3268 - qib_init_pportdata(ppd, dd, 0, 1); 3269 + ret = qib_init_pportdata(ppd, dd, 0, 1); 3270 + if (ret) 3271 + goto bail; 3269 3272 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; 3270 3273 ppd->link_speed_supported = QIB_IB_SDR; 3271 3274 ppd->link_width_enabled = IB_WIDTH_4X;
+6 -6
drivers/infiniband/hw/qib/qib_iba7220.c
··· 1962 1962 goto bail; 1963 1963 } 1964 1964 1965 - qib_stats.sps_ints++; 1966 - if (dd->int_counter != (u32) -1) 1967 - dd->int_counter++; 1968 - 1965 + this_cpu_inc(*dd->int_counter); 1969 1966 if (unlikely(istat & (~QLOGIC_IB_I_BITSEXTANT | 1970 1967 QLOGIC_IB_I_GPIO | QLOGIC_IB_I_ERROR))) 1971 1968 unlikely_7220_intr(dd, istat); ··· 2117 2120 * isn't set. 2118 2121 */ 2119 2122 dd->flags &= ~(QIB_INITTED | QIB_PRESENT); 2120 - dd->int_counter = 0; /* so we check interrupts work again */ 2123 + /* so we check interrupts work again */ 2124 + dd->z_int_counter = qib_int_counter(dd); 2121 2125 val = dd->control | QLOGIC_IB_C_RESET; 2122 2126 writeq(val, &dd->kregbase[kr_control]); 2123 2127 mb(); /* prevent compiler reordering around actual reset */ ··· 4059 4061 init_waitqueue_head(&cpspec->autoneg_wait); 4060 4062 INIT_DELAYED_WORK(&cpspec->autoneg_work, autoneg_7220_work); 4061 4063 4062 - qib_init_pportdata(ppd, dd, 0, 1); 4064 + ret = qib_init_pportdata(ppd, dd, 0, 1); 4065 + if (ret) 4066 + goto bail; 4063 4067 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; 4064 4068 ppd->link_speed_supported = QIB_IB_SDR | QIB_IB_DDR; 4065 4069
+14 -23
drivers/infiniband/hw/qib/qib_iba7322.c
··· 3115 3115 goto bail; 3116 3116 } 3117 3117 3118 - qib_stats.sps_ints++; 3119 - if (dd->int_counter != (u32) -1) 3120 - dd->int_counter++; 3118 + this_cpu_inc(*dd->int_counter); 3121 3119 3122 3120 /* handle "errors" of various kinds first, device ahead of port */ 3123 3121 if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO | ··· 3184 3186 */ 3185 3187 return IRQ_HANDLED; 3186 3188 3187 - qib_stats.sps_ints++; 3188 - if (dd->int_counter != (u32) -1) 3189 - dd->int_counter++; 3189 + this_cpu_inc(*dd->int_counter); 3190 3190 3191 3191 /* Clear the interrupt bit we expect to be set. */ 3192 3192 qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) | ··· 3211 3215 */ 3212 3216 return IRQ_HANDLED; 3213 3217 3214 - qib_stats.sps_ints++; 3215 - if (dd->int_counter != (u32) -1) 3216 - dd->int_counter++; 3218 + this_cpu_inc(*dd->int_counter); 3217 3219 3218 3220 /* Clear the interrupt bit we expect to be set. */ 3219 3221 qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL); ··· 3242 3248 */ 3243 3249 return IRQ_HANDLED; 3244 3250 3245 - qib_stats.sps_ints++; 3246 - if (dd->int_counter != (u32) -1) 3247 - dd->int_counter++; 3251 + this_cpu_inc(*dd->int_counter); 3248 3252 3249 3253 /* Clear the interrupt bit we expect to be set. */ 3250 3254 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? ··· 3269 3277 */ 3270 3278 return IRQ_HANDLED; 3271 3279 3272 - qib_stats.sps_ints++; 3273 - if (dd->int_counter != (u32) -1) 3274 - dd->int_counter++; 3280 + this_cpu_inc(*dd->int_counter); 3275 3281 3276 3282 /* Clear the interrupt bit we expect to be set. */ 3277 3283 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? ··· 3296 3306 */ 3297 3307 return IRQ_HANDLED; 3298 3308 3299 - qib_stats.sps_ints++; 3300 - if (dd->int_counter != (u32) -1) 3301 - dd->int_counter++; 3309 + this_cpu_inc(*dd->int_counter); 3302 3310 3303 3311 /* Clear the interrupt bit we expect to be set. */ 3304 3312 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? ··· 3324 3336 */ 3325 3337 return IRQ_HANDLED; 3326 3338 3327 - qib_stats.sps_ints++; 3328 - if (dd->int_counter != (u32) -1) 3329 - dd->int_counter++; 3339 + this_cpu_inc(*dd->int_counter); 3330 3340 3331 3341 /* Clear the interrupt bit we expect to be set. */ 3332 3342 qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ? ··· 3709 3723 dd->pport->cpspec->ibsymdelta = 0; 3710 3724 dd->pport->cpspec->iblnkerrdelta = 0; 3711 3725 dd->pport->cpspec->ibmalfdelta = 0; 3712 - dd->int_counter = 0; /* so we check interrupts work again */ 3726 + /* so we check interrupts work again */ 3727 + dd->z_int_counter = qib_int_counter(dd); 3713 3728 3714 3729 /* 3715 3730 * Keep chip from being accessed until we are ready. Use ··· 6544 6557 } 6545 6558 6546 6559 dd->num_pports++; 6547 - qib_init_pportdata(ppd, dd, pidx, dd->num_pports); 6560 + ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports); 6561 + if (ret) { 6562 + dd->num_pports--; 6563 + goto bail; 6564 + } 6548 6565 6549 6566 ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; 6550 6567 ppd->link_width_enabled = IB_WIDTH_4X;
+67 -29
drivers/infiniband/hw/qib/qib_init.c
··· 130 130 int qib_create_ctxts(struct qib_devdata *dd) 131 131 { 132 132 unsigned i; 133 - int ret; 134 133 int local_node_id = pcibus_to_node(dd->pcidev->bus); 135 134 136 135 if (local_node_id < 0) ··· 144 145 if (!dd->rcd) { 145 146 qib_dev_err(dd, 146 147 "Unable to allocate ctxtdata array, failing\n"); 147 - ret = -ENOMEM; 148 - goto done; 148 + return -ENOMEM; 149 149 } 150 150 151 151 /* create (one or more) kctxt */ ··· 161 163 if (!rcd) { 162 164 qib_dev_err(dd, 163 165 "Unable to allocate ctxtdata for Kernel ctxt, failing\n"); 164 - ret = -ENOMEM; 165 - goto done; 166 + kfree(dd->rcd); 167 + dd->rcd = NULL; 168 + return -ENOMEM; 166 169 } 167 170 rcd->pkeys[0] = QIB_DEFAULT_P_KEY; 168 171 rcd->seq_cnt = 1; 169 172 } 170 - ret = 0; 171 - done: 172 - return ret; 173 + return 0; 173 174 } 174 175 175 176 /* ··· 230 233 /* 231 234 * Common code for initializing the physical port structure. 232 235 */ 233 - void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, 236 + int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, 234 237 u8 hw_pidx, u8 port) 235 238 { 236 239 int size; ··· 240 243 241 244 spin_lock_init(&ppd->sdma_lock); 242 245 spin_lock_init(&ppd->lflags_lock); 246 + spin_lock_init(&ppd->cc_shadow_lock); 243 247 init_waitqueue_head(&ppd->state_wait); 244 248 245 249 init_timer(&ppd->symerr_clear_timer); ··· 248 250 ppd->symerr_clear_timer.data = (unsigned long)ppd; 249 251 250 252 ppd->qib_wq = NULL; 251 - 252 - spin_lock_init(&ppd->cc_shadow_lock); 253 + ppd->ibport_data.pmastats = 254 + alloc_percpu(struct qib_pma_counters); 255 + if (!ppd->ibport_data.pmastats) 256 + return -ENOMEM; 253 257 254 258 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) 255 259 goto bail; ··· 299 299 goto bail_3; 300 300 } 301 301 302 - return; 302 + return 0; 303 303 304 304 bail_3: 305 305 kfree(ppd->ccti_entries_shadow); ··· 313 313 bail: 314 314 /* User is intentionally disabling the congestion control agent */ 315 315 if (!qib_cc_table_size) 316 - return; 316 + return 0; 317 317 318 318 if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) { 319 319 qib_cc_table_size = 0; ··· 324 324 325 325 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", 326 326 port); 327 - return; 327 + return 0; 328 328 } 329 329 330 330 static int init_pioavailregs(struct qib_devdata *dd) ··· 525 525 static void verify_interrupt(unsigned long opaque) 526 526 { 527 527 struct qib_devdata *dd = (struct qib_devdata *) opaque; 528 + u64 int_counter; 528 529 529 530 if (!dd) 530 531 return; /* being torn down */ ··· 534 533 * If we don't have a lid or any interrupts, let the user know and 535 534 * don't bother checking again. 536 535 */ 537 - if (dd->int_counter == 0) { 536 + int_counter = qib_int_counter(dd) - dd->z_int_counter; 537 + if (int_counter == 0) { 538 538 if (!dd->f_intr_fallback(dd)) 539 539 dev_err(&dd->pcidev->dev, 540 540 "No interrupts detected, not usable.\n"); ··· 633 631 } 634 632 } 635 633 return -ENOMEM; 634 + } 635 + 636 + static void qib_free_pportdata(struct qib_pportdata *ppd) 637 + { 638 + free_percpu(ppd->ibport_data.pmastats); 639 + ppd->ibport_data.pmastats = NULL; 636 640 } 637 641 638 642 /** ··· 928 920 destroy_workqueue(ppd->qib_wq); 929 921 ppd->qib_wq = NULL; 930 922 } 923 + qib_free_pportdata(ppd); 931 924 } 932 925 933 926 qib_update_eeprom_log(dd); ··· 1088 1079 #ifdef CONFIG_DEBUG_FS 1089 1080 qib_dbg_ibdev_exit(&dd->verbs_dev); 1090 1081 #endif 1082 + free_percpu(dd->int_counter); 1091 1083 ib_dealloc_device(&dd->verbs_dev.ibdev); 1084 + } 1085 + 1086 + u64 qib_int_counter(struct qib_devdata *dd) 1087 + { 1088 + int cpu; 1089 + u64 int_counter = 0; 1090 + 1091 + for_each_possible_cpu(cpu) 1092 + int_counter += *per_cpu_ptr(dd->int_counter, cpu); 1093 + return int_counter; 1094 + } 1095 + 1096 + u64 qib_sps_ints(void) 1097 + { 1098 + unsigned long flags; 1099 + struct qib_devdata *dd; 1100 + u64 sps_ints = 0; 1101 + 1102 + spin_lock_irqsave(&qib_devs_lock, flags); 1103 + list_for_each_entry(dd, &qib_dev_list, list) { 1104 + sps_ints += qib_int_counter(dd); 1105 + } 1106 + spin_unlock_irqrestore(&qib_devs_lock, flags); 1107 + return sps_ints; 1092 1108 } 1093 1109 1094 1110 /* ··· 1131 1097 int ret; 1132 1098 1133 1099 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); 1134 - if (!dd) { 1135 - dd = ERR_PTR(-ENOMEM); 1136 - goto bail; 1137 - } 1100 + if (!dd) 1101 + return ERR_PTR(-ENOMEM); 1138 1102 1139 - #ifdef CONFIG_DEBUG_FS 1140 - qib_dbg_ibdev_init(&dd->verbs_dev); 1141 - #endif 1103 + INIT_LIST_HEAD(&dd->list); 1142 1104 1143 1105 idr_preload(GFP_KERNEL); 1144 1106 spin_lock_irqsave(&qib_devs_lock, flags); ··· 1151 1121 if (ret < 0) { 1152 1122 qib_early_err(&pdev->dev, 1153 1123 "Could not allocate unit ID: error %d\n", -ret); 1154 - #ifdef CONFIG_DEBUG_FS 1155 - qib_dbg_ibdev_exit(&dd->verbs_dev); 1156 - #endif 1157 - ib_dealloc_device(&dd->verbs_dev.ibdev); 1158 - dd = ERR_PTR(ret); 1124 + goto bail; 1125 + } 1126 + dd->int_counter = alloc_percpu(u64); 1127 + if (!dd->int_counter) { 1128 + ret = -ENOMEM; 1129 + qib_early_err(&pdev->dev, 1130 + "Could not allocate per-cpu int_counter\n"); 1159 1131 goto bail; 1160 1132 } 1161 1133 ··· 1171 1139 qib_early_err(&pdev->dev, 1172 1140 "Could not alloc cpulist info, cpu affinity might be wrong\n"); 1173 1141 } 1174 - 1175 - bail: 1142 + #ifdef CONFIG_DEBUG_FS 1143 + qib_dbg_ibdev_init(&dd->verbs_dev); 1144 + #endif 1176 1145 return dd; 1146 + bail: 1147 + if (!list_empty(&dd->list)) 1148 + list_del_init(&dd->list); 1149 + ib_dealloc_device(&dd->verbs_dev.ibdev); 1150 + return ERR_PTR(ret);; 1177 1151 } 1178 1152 1179 1153 /*
+36 -8
drivers/infiniband/hw/qib/qib_mad.c
··· 1634 1634 return reply((struct ib_smp *)pmp); 1635 1635 } 1636 1636 1637 + static void qib_snapshot_pmacounters( 1638 + struct qib_ibport *ibp, 1639 + struct qib_pma_counters *pmacounters) 1640 + { 1641 + struct qib_pma_counters *p; 1642 + int cpu; 1643 + 1644 + memset(pmacounters, 0, sizeof(*pmacounters)); 1645 + for_each_possible_cpu(cpu) { 1646 + p = per_cpu_ptr(ibp->pmastats, cpu); 1647 + pmacounters->n_unicast_xmit += p->n_unicast_xmit; 1648 + pmacounters->n_unicast_rcv += p->n_unicast_rcv; 1649 + pmacounters->n_multicast_xmit += p->n_multicast_xmit; 1650 + pmacounters->n_multicast_rcv += p->n_multicast_rcv; 1651 + } 1652 + } 1653 + 1637 1654 static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, 1638 1655 struct ib_device *ibdev, u8 port) 1639 1656 { ··· 1659 1642 struct qib_ibport *ibp = to_iport(ibdev, port); 1660 1643 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1661 1644 u64 swords, rwords, spkts, rpkts, xwait; 1645 + struct qib_pma_counters pma; 1662 1646 u8 port_select = p->port_select; 1663 1647 1664 1648 memset(pmp->data, 0, sizeof(pmp->data)); ··· 1682 1664 p->port_rcv_data = cpu_to_be64(rwords); 1683 1665 p->port_xmit_packets = cpu_to_be64(spkts); 1684 1666 p->port_rcv_packets = cpu_to_be64(rpkts); 1685 - p->port_unicast_xmit_packets = cpu_to_be64(ibp->n_unicast_xmit); 1686 - p->port_unicast_rcv_packets = cpu_to_be64(ibp->n_unicast_rcv); 1687 - p->port_multicast_xmit_packets = cpu_to_be64(ibp->n_multicast_xmit); 1688 - p->port_multicast_rcv_packets = cpu_to_be64(ibp->n_multicast_rcv); 1667 + 1668 + qib_snapshot_pmacounters(ibp, &pma); 1669 + 1670 + p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit 1671 + - ibp->z_unicast_xmit); 1672 + p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv 1673 + - ibp->z_unicast_rcv); 1674 + p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit 1675 + - ibp->z_multicast_xmit); 1676 + p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv 1677 + - ibp->z_multicast_rcv); 1689 1678 1690 1679 bail: 1691 1680 return reply((struct ib_smp *) pmp); ··· 1820 1795 struct qib_ibport *ibp = to_iport(ibdev, port); 1821 1796 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1822 1797 u64 swords, rwords, spkts, rpkts, xwait; 1798 + struct qib_pma_counters pma; 1823 1799 1824 1800 qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); 1825 1801 ··· 1836 1810 if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) 1837 1811 ibp->z_port_rcv_packets = rpkts; 1838 1812 1813 + qib_snapshot_pmacounters(ibp, &pma); 1814 + 1839 1815 if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) 1840 - ibp->n_unicast_xmit = 0; 1816 + ibp->z_unicast_xmit = pma.n_unicast_xmit; 1841 1817 1842 1818 if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) 1843 - ibp->n_unicast_rcv = 0; 1819 + ibp->z_unicast_rcv = pma.n_unicast_rcv; 1844 1820 1845 1821 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) 1846 - ibp->n_multicast_xmit = 0; 1822 + ibp->z_multicast_xmit = pma.n_multicast_xmit; 1847 1823 1848 1824 if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) 1849 - ibp->n_multicast_rcv = 0; 1825 + ibp->z_multicast_rcv = pma.n_multicast_rcv; 1850 1826 1851 1827 return pma_get_portcounters_ext(pmp, ibdev, port); 1852 1828 }
+1 -1
drivers/infiniband/hw/qib/qib_rc.c
··· 752 752 qib_flush_wc(); 753 753 qib_sendbuf_done(dd, pbufn); 754 754 755 - ibp->n_unicast_xmit++; 755 + this_cpu_inc(ibp->pmastats->n_unicast_xmit); 756 756 goto done; 757 757 758 758 queue_ack:
+1
drivers/infiniband/hw/qib/qib_ruc.c
··· 703 703 ohdr->bth[0] = cpu_to_be32(bth0); 704 704 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 705 705 ohdr->bth[2] = cpu_to_be32(bth2); 706 + this_cpu_inc(ibp->pmastats->n_unicast_xmit); 706 707 } 707 708 708 709 /**
+3 -3
drivers/infiniband/hw/qib/qib_ud.c
··· 280 280 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; 281 281 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { 282 282 if (ah_attr->dlid != QIB_PERMISSIVE_LID) 283 - ibp->n_multicast_xmit++; 283 + this_cpu_inc(ibp->pmastats->n_multicast_xmit); 284 284 else 285 - ibp->n_unicast_xmit++; 285 + this_cpu_inc(ibp->pmastats->n_unicast_xmit); 286 286 } else { 287 - ibp->n_unicast_xmit++; 287 + this_cpu_inc(ibp->pmastats->n_unicast_xmit); 288 288 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); 289 289 if (unlikely(lid == ppd->lid)) { 290 290 /*
+124 -14
drivers/infiniband/hw/qib/qib_user_sdma.c
··· 52 52 /* attempt to drain the queue for 5secs */ 53 53 #define QIB_USER_SDMA_DRAIN_TIMEOUT 500 54 54 55 + /* 56 + * track how many times a process open this driver. 57 + */ 58 + static struct rb_root qib_user_sdma_rb_root = RB_ROOT; 59 + 60 + struct qib_user_sdma_rb_node { 61 + struct rb_node node; 62 + int refcount; 63 + pid_t pid; 64 + }; 65 + 55 66 struct qib_user_sdma_pkt { 56 67 struct list_head list; /* list element */ 57 68 ··· 131 120 /* dma page table */ 132 121 struct rb_root dma_pages_root; 133 122 123 + struct qib_user_sdma_rb_node *sdma_rb_node; 124 + 134 125 /* protect everything above... */ 135 126 struct mutex lock; 136 127 }; 128 + 129 + static struct qib_user_sdma_rb_node * 130 + qib_user_sdma_rb_search(struct rb_root *root, pid_t pid) 131 + { 132 + struct qib_user_sdma_rb_node *sdma_rb_node; 133 + struct rb_node *node = root->rb_node; 134 + 135 + while (node) { 136 + sdma_rb_node = container_of(node, 137 + struct qib_user_sdma_rb_node, node); 138 + if (pid < sdma_rb_node->pid) 139 + node = node->rb_left; 140 + else if (pid > sdma_rb_node->pid) 141 + node = node->rb_right; 142 + else 143 + return sdma_rb_node; 144 + } 145 + return NULL; 146 + } 147 + 148 + static int 149 + qib_user_sdma_rb_insert(struct rb_root *root, struct qib_user_sdma_rb_node *new) 150 + { 151 + struct rb_node **node = &(root->rb_node); 152 + struct rb_node *parent = NULL; 153 + struct qib_user_sdma_rb_node *got; 154 + 155 + while (*node) { 156 + got = container_of(*node, struct qib_user_sdma_rb_node, node); 157 + parent = *node; 158 + if (new->pid < got->pid) 159 + node = &((*node)->rb_left); 160 + else if (new->pid > got->pid) 161 + node = &((*node)->rb_right); 162 + else 163 + return 0; 164 + } 165 + 166 + rb_link_node(&new->node, parent, node); 167 + rb_insert_color(&new->node, root); 168 + return 1; 169 + } 137 170 138 171 struct qib_user_sdma_queue * 139 172 qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) 140 173 { 141 174 struct qib_user_sdma_queue *pq = 142 175 kmalloc(sizeof(struct qib_user_sdma_queue), GFP_KERNEL); 176 + struct qib_user_sdma_rb_node *sdma_rb_node; 143 177 144 178 if (!pq) 145 179 goto done; ··· 194 138 pq->num_pending = 0; 195 139 pq->num_sending = 0; 196 140 pq->added = 0; 141 + pq->sdma_rb_node = NULL; 197 142 198 143 INIT_LIST_HEAD(&pq->sent); 199 144 spin_lock_init(&pq->sent_lock); ··· 220 163 221 164 pq->dma_pages_root = RB_ROOT; 222 165 166 + sdma_rb_node = qib_user_sdma_rb_search(&qib_user_sdma_rb_root, 167 + current->pid); 168 + if (sdma_rb_node) { 169 + sdma_rb_node->refcount++; 170 + } else { 171 + int ret; 172 + sdma_rb_node = kmalloc(sizeof( 173 + struct qib_user_sdma_rb_node), GFP_KERNEL); 174 + if (!sdma_rb_node) 175 + goto err_rb; 176 + 177 + sdma_rb_node->refcount = 1; 178 + sdma_rb_node->pid = current->pid; 179 + 180 + ret = qib_user_sdma_rb_insert(&qib_user_sdma_rb_root, 181 + sdma_rb_node); 182 + BUG_ON(ret == 0); 183 + } 184 + pq->sdma_rb_node = sdma_rb_node; 185 + 223 186 goto done; 224 187 188 + err_rb: 189 + dma_pool_destroy(pq->header_cache); 225 190 err_slab: 226 191 kmem_cache_destroy(pq->pkt_slab); 227 192 err_kfree: ··· 1099 1020 if (!pq) 1100 1021 return; 1101 1022 1102 - kmem_cache_destroy(pq->pkt_slab); 1023 + pq->sdma_rb_node->refcount--; 1024 + if (pq->sdma_rb_node->refcount == 0) { 1025 + rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root); 1026 + kfree(pq->sdma_rb_node); 1027 + } 1103 1028 dma_pool_destroy(pq->header_cache); 1029 + kmem_cache_destroy(pq->pkt_slab); 1104 1030 kfree(pq); 1105 1031 } 1106 1032 ··· 1325 1241 struct qib_user_sdma_queue *pq, 1326 1242 struct list_head *pktlist, int count) 1327 1243 { 1328 - int ret = 0; 1329 1244 unsigned long flags; 1330 1245 1331 1246 if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE))) 1332 1247 return -ECOMM; 1333 1248 1334 - spin_lock_irqsave(&ppd->sdma_lock, flags); 1335 - 1336 - if (unlikely(!__qib_sdma_running(ppd))) { 1337 - ret = -ECOMM; 1338 - goto unlock; 1249 + /* non-blocking mode */ 1250 + if (pq->sdma_rb_node->refcount > 1) { 1251 + spin_lock_irqsave(&ppd->sdma_lock, flags); 1252 + if (unlikely(!__qib_sdma_running(ppd))) { 1253 + spin_unlock_irqrestore(&ppd->sdma_lock, flags); 1254 + return -ECOMM; 1255 + } 1256 + pq->num_pending += count; 1257 + list_splice_tail_init(pktlist, &ppd->sdma_userpending); 1258 + qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); 1259 + spin_unlock_irqrestore(&ppd->sdma_lock, flags); 1260 + return 0; 1339 1261 } 1340 1262 1341 - pq->num_pending += count; 1342 - list_splice_tail_init(pktlist, &ppd->sdma_userpending); 1343 - qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending); 1263 + /* In this case, descriptors from this process are not 1264 + * linked to ppd pending queue, interrupt handler 1265 + * won't update this process, it is OK to directly 1266 + * modify without sdma lock. 1267 + */ 1344 1268 1345 - unlock: 1346 - spin_unlock_irqrestore(&ppd->sdma_lock, flags); 1347 - return ret; 1269 + 1270 + pq->num_pending += count; 1271 + /* 1272 + * Blocking mode for single rail process, we must 1273 + * release/regain sdma_lock to give other process 1274 + * chance to make progress. This is important for 1275 + * performance. 1276 + */ 1277 + do { 1278 + spin_lock_irqsave(&ppd->sdma_lock, flags); 1279 + if (unlikely(!__qib_sdma_running(ppd))) { 1280 + spin_unlock_irqrestore(&ppd->sdma_lock, flags); 1281 + return -ECOMM; 1282 + } 1283 + qib_user_sdma_send_desc(ppd, pktlist); 1284 + if (!list_empty(pktlist)) 1285 + qib_sdma_make_progress(ppd); 1286 + spin_unlock_irqrestore(&ppd->sdma_lock, flags); 1287 + } while (!list_empty(pktlist)); 1288 + 1289 + return 0; 1348 1290 } 1349 1291 1350 1292 int qib_user_sdma_writev(struct qib_ctxtdata *rcd, ··· 1400 1290 qib_user_sdma_queue_clean(ppd, pq); 1401 1291 1402 1292 while (dim) { 1403 - int mxp = 8; 1293 + int mxp = 1; 1404 1294 int ndesc = 0; 1405 1295 1406 1296 ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
+4 -4
drivers/infiniband/hw/qib/qib_verbs.c
··· 662 662 mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); 663 663 if (mcast == NULL) 664 664 goto drop; 665 - ibp->n_multicast_rcv++; 665 + this_cpu_inc(ibp->pmastats->n_multicast_rcv); 666 666 list_for_each_entry_rcu(p, &mcast->qp_list, list) 667 667 qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); 668 668 /* ··· 678 678 &rcd->lookaside_qp->refcount)) 679 679 wake_up( 680 680 &rcd->lookaside_qp->wait); 681 - rcd->lookaside_qp = NULL; 682 - } 681 + rcd->lookaside_qp = NULL; 682 + } 683 683 } 684 684 if (!rcd->lookaside_qp) { 685 685 qp = qib_lookup_qpn(ibp, qp_num); ··· 689 689 rcd->lookaside_qpn = qp_num; 690 690 } else 691 691 qp = rcd->lookaside_qp; 692 - ibp->n_unicast_rcv++; 692 + this_cpu_inc(ibp->pmastats->n_unicast_rcv); 693 693 qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); 694 694 } 695 695 return;
+12 -4
drivers/infiniband/hw/qib/qib_verbs.h
··· 664 664 struct qib_opcode_stats stats[128]; 665 665 }; 666 666 667 + struct qib_pma_counters { 668 + u64 n_unicast_xmit; /* total unicast packets sent */ 669 + u64 n_unicast_rcv; /* total unicast packets received */ 670 + u64 n_multicast_xmit; /* total multicast packets sent */ 671 + u64 n_multicast_rcv; /* total multicast packets received */ 672 + }; 673 + 667 674 struct qib_ibport { 668 675 struct qib_qp __rcu *qp0; 669 676 struct qib_qp __rcu *qp1; ··· 687 680 __be64 mkey; 688 681 __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ 689 682 u64 tid; /* TID for traps */ 690 - u64 n_unicast_xmit; /* total unicast packets sent */ 691 - u64 n_unicast_rcv; /* total unicast packets received */ 692 - u64 n_multicast_xmit; /* total multicast packets sent */ 693 - u64 n_multicast_rcv; /* total multicast packets received */ 683 + struct qib_pma_counters __percpu *pmastats; 684 + u64 z_unicast_xmit; /* starting count for PMA */ 685 + u64 z_unicast_rcv; /* starting count for PMA */ 686 + u64 z_multicast_xmit; /* starting count for PMA */ 687 + u64 z_multicast_rcv; /* starting count for PMA */ 694 688 u64 z_symbol_error_counter; /* starting count for PMA */ 695 689 u64 z_link_error_recovery_counter; /* starting count for PMA */ 696 690 u64 z_link_downed_counter; /* starting count for PMA */
+1 -1
drivers/infiniband/hw/usnic/usnic_uiom.c
··· 286 286 err = iommu_map(pd->domain, va_start, pa_start, 287 287 size, flags); 288 288 if (err) { 289 - usnic_err("Failed to map va 0x%lx pa 0x%pa size 0x%zx with err %d\n", 289 + usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", 290 290 va_start, &pa_start, size, err); 291 291 goto err_out; 292 292 }
+64 -29
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 5 5 * Copyright (C) 2004 Alex Aizman 6 6 * Copyright (C) 2005 Mike Christie 7 7 * Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved. 8 - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. 8 + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 9 9 * maintained by openib-general@openib.org 10 10 * 11 11 * This software is available to you under a choice of one of two ··· 82 82 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); 83 83 84 84 int iser_debug_level = 0; 85 + bool iser_pi_enable = false; 86 + int iser_pi_guard = 0; 85 87 86 88 MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); 87 89 MODULE_LICENSE("Dual BSD/GPL"); ··· 92 90 93 91 module_param_named(debug_level, iser_debug_level, int, 0644); 94 92 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); 93 + 94 + module_param_named(pi_enable, iser_pi_enable, bool, 0644); 95 + MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); 96 + 97 + module_param_named(pi_guard, iser_pi_guard, int, 0644); 98 + MODULE_PARM_DESC(pi_guard, "T10-PI guard_type, 0:CRC|1:IP_CSUM (default:CRC)"); 95 99 96 100 struct iser_global ig; 97 101 ··· 146 138 int iser_initialize_task_headers(struct iscsi_task *task, 147 139 struct iser_tx_desc *tx_desc) 148 140 { 149 - struct iscsi_iser_conn *iser_conn = task->conn->dd_data; 150 - struct iser_device *device = iser_conn->ib_conn->device; 141 + struct iser_conn *ib_conn = task->conn->dd_data; 142 + struct iser_device *device = ib_conn->device; 151 143 struct iscsi_iser_task *iser_task = task->dd_data; 152 144 u64 dma_addr; 153 145 ··· 161 153 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; 162 154 tx_desc->tx_sg[0].lkey = device->mr->lkey; 163 155 164 - iser_task->iser_conn = iser_conn; 156 + iser_task->ib_conn = ib_conn; 165 157 return 0; 166 158 } 167 159 /** ··· 184 176 185 177 iser_task->command_sent = 0; 186 178 iser_task_rdma_init(iser_task); 179 + iser_task->sc = task->sc; 180 + 187 181 return 0; 188 182 } 189 183 ··· 288 278 static void iscsi_iser_cleanup_task(struct iscsi_task *task) 289 279 { 290 280 struct iscsi_iser_task *iser_task = task->dd_data; 291 - struct iser_tx_desc *tx_desc = &iser_task->desc; 292 - 293 - struct iscsi_iser_conn *iser_conn = task->conn->dd_data; 294 - struct iser_device *device = iser_conn->ib_conn->device; 281 + struct iser_tx_desc *tx_desc = &iser_task->desc; 282 + struct iser_conn *ib_conn = task->conn->dd_data; 283 + struct iser_device *device = ib_conn->device; 295 284 296 285 ib_dma_unmap_single(device->ib_device, 297 286 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); ··· 305 296 } 306 297 } 307 298 299 + static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) 300 + { 301 + struct iscsi_iser_task *iser_task = task->dd_data; 302 + 303 + if (iser_task->dir[ISER_DIR_IN]) 304 + return iser_check_task_pi_status(iser_task, ISER_DIR_IN, 305 + sector); 306 + else 307 + return iser_check_task_pi_status(iser_task, ISER_DIR_OUT, 308 + sector); 309 + } 310 + 308 311 static struct iscsi_cls_conn * 309 312 iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) 310 313 { 311 314 struct iscsi_conn *conn; 312 315 struct iscsi_cls_conn *cls_conn; 313 - struct iscsi_iser_conn *iser_conn; 314 316 315 - cls_conn = iscsi_conn_setup(cls_session, sizeof(*iser_conn), conn_idx); 317 + cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx); 316 318 if (!cls_conn) 317 319 return NULL; 318 320 conn = cls_conn->dd_data; ··· 334 314 */ 335 315 conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN; 336 316 337 - iser_conn = conn->dd_data; 338 - conn->dd_data = iser_conn; 339 - iser_conn->iscsi_conn = conn; 340 - 341 317 return cls_conn; 342 318 } 343 319 ··· 341 325 iscsi_iser_conn_destroy(struct iscsi_cls_conn *cls_conn) 342 326 { 343 327 struct iscsi_conn *conn = cls_conn->dd_data; 344 - struct iscsi_iser_conn *iser_conn = conn->dd_data; 345 - struct iser_conn *ib_conn = iser_conn->ib_conn; 328 + struct iser_conn *ib_conn = conn->dd_data; 346 329 347 330 iscsi_conn_teardown(cls_conn); 348 331 /* ··· 350 335 * we free it here. 351 336 */ 352 337 if (ib_conn) { 353 - ib_conn->iser_conn = NULL; 338 + ib_conn->iscsi_conn = NULL; 354 339 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ 355 340 } 356 341 } ··· 361 346 int is_leading) 362 347 { 363 348 struct iscsi_conn *conn = cls_conn->dd_data; 364 - struct iscsi_iser_conn *iser_conn; 365 349 struct iscsi_session *session; 366 350 struct iser_conn *ib_conn; 367 351 struct iscsi_endpoint *ep; ··· 387 373 /* binds the iSER connection retrieved from the previously 388 374 * connected ep_handle to the iSCSI layer connection. exchanges 389 375 * connection pointers */ 390 - iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n", 391 - conn, conn->dd_data, ib_conn); 392 - iser_conn = conn->dd_data; 393 - ib_conn->iser_conn = iser_conn; 394 - iser_conn->ib_conn = ib_conn; 376 + iser_info("binding iscsi conn %p to ib_conn %p\n", conn, ib_conn); 377 + 378 + conn->dd_data = ib_conn; 379 + ib_conn->iscsi_conn = conn; 380 + 395 381 iser_conn_get(ib_conn); /* ref iscsi/ib conn binding */ 396 382 return 0; 397 383 } ··· 400 386 iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) 401 387 { 402 388 struct iscsi_conn *conn = cls_conn->dd_data; 403 - struct iscsi_iser_conn *iser_conn = conn->dd_data; 404 - struct iser_conn *ib_conn = iser_conn->ib_conn; 389 + struct iser_conn *ib_conn = conn->dd_data; 405 390 406 391 /* 407 392 * Userspace may have goofed up and not bound the connection or ··· 414 401 */ 415 402 iser_conn_put(ib_conn, 1); /* deref iscsi/ib conn unbinding */ 416 403 } 417 - iser_conn->ib_conn = NULL; 404 + conn->dd_data = NULL; 418 405 } 419 406 420 407 static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) ··· 424 411 iscsi_session_teardown(cls_session); 425 412 iscsi_host_remove(shost); 426 413 iscsi_host_free(shost); 414 + } 415 + 416 + static inline unsigned int 417 + iser_dif_prot_caps(int prot_caps) 418 + { 419 + return ((prot_caps & IB_PROT_T10DIF_TYPE_1) ? SHOST_DIF_TYPE1_PROTECTION | 420 + SHOST_DIX_TYPE1_PROTECTION : 0) | 421 + ((prot_caps & IB_PROT_T10DIF_TYPE_2) ? SHOST_DIF_TYPE2_PROTECTION | 422 + SHOST_DIX_TYPE2_PROTECTION : 0) | 423 + ((prot_caps & IB_PROT_T10DIF_TYPE_3) ? SHOST_DIF_TYPE3_PROTECTION | 424 + SHOST_DIX_TYPE3_PROTECTION : 0); 427 425 } 428 426 429 427 static struct iscsi_cls_session * ··· 461 437 * older userspace tools (before 2.0-870) did not pass us 462 438 * the leading conn's ep so this will be NULL; 463 439 */ 464 - if (ep) 440 + if (ep) { 465 441 ib_conn = ep->dd_data; 442 + if (ib_conn->pi_support) { 443 + u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap; 444 + 445 + scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); 446 + if (iser_pi_guard) 447 + scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 448 + else 449 + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); 450 + } 451 + } 466 452 467 453 if (iscsi_host_add(shost, 468 454 ep ? ib_conn->device->ib_device->dma_device : NULL)) ··· 652 618 struct iser_conn *ib_conn; 653 619 654 620 ib_conn = ep->dd_data; 655 - if (ib_conn->iser_conn) 621 + if (ib_conn->iscsi_conn) 656 622 /* 657 623 * Must suspend xmit path if the ep is bound to the 658 624 * iscsi_conn, so we know we are not accessing the ib_conn ··· 660 626 * 661 627 * This may not be bound if the ep poll failed. 662 628 */ 663 - iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); 629 + iscsi_suspend_tx(ib_conn->iscsi_conn); 664 630 665 631 666 632 iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); ··· 766 732 .xmit_task = iscsi_iser_task_xmit, 767 733 .cleanup_task = iscsi_iser_cleanup_task, 768 734 .alloc_pdu = iscsi_iser_pdu_alloc, 735 + .check_protection = iscsi_iser_check_protection, 769 736 /* recovery */ 770 737 .session_recovery_timedout = iscsi_session_recovery_timedout, 771 738
+62 -23
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 8 8 * 9 9 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 10 10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 11 - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. 11 + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 12 12 * 13 13 * This software is available to you under a choice of one of two 14 14 * licenses. You may choose to be licensed under the terms of the GNU ··· 46 46 #include <linux/printk.h> 47 47 #include <scsi/libiscsi.h> 48 48 #include <scsi/scsi_transport_iscsi.h> 49 + #include <scsi/scsi_cmnd.h> 50 + #include <scsi/scsi_device.h> 49 51 50 52 #include <linux/interrupt.h> 51 53 #include <linux/wait.h> ··· 69 67 70 68 #define DRV_NAME "iser" 71 69 #define PFX DRV_NAME ": " 72 - #define DRV_VER "1.1" 70 + #define DRV_VER "1.3" 73 71 74 72 #define iser_dbg(fmt, arg...) \ 75 73 do { \ ··· 136 134 ISER_MAX_TX_MISC_PDUS + \ 137 135 ISER_MAX_RX_MISC_PDUS) 138 136 137 + /* Max registration work requests per command */ 138 + #define ISER_MAX_REG_WR_PER_CMD 5 139 + 140 + /* For Signature we don't support DATAOUTs so no need to make room for them */ 141 + #define ISER_QP_SIG_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \ 142 + (1 + ISER_MAX_REG_WR_PER_CMD) + \ 143 + ISER_MAX_TX_MISC_PDUS + \ 144 + ISER_MAX_RX_MISC_PDUS) 145 + 139 146 #define ISER_VER 0x10 140 147 #define ISER_WSV 0x08 141 148 #define ISER_RSV 0x04 149 + 150 + #define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL 142 151 143 152 struct iser_hdr { 144 153 u8 flags; ··· 214 201 /* fwd declarations */ 215 202 struct iser_device; 216 203 struct iser_cq_desc; 217 - struct iscsi_iser_conn; 218 204 struct iscsi_iser_task; 219 205 struct iscsi_endpoint; 220 206 ··· 270 258 struct iser_device { 271 259 struct ib_device *ib_device; 272 260 struct ib_pd *pd; 261 + struct ib_device_attr dev_attr; 273 262 struct ib_cq *rx_cq[ISER_MAX_CQ]; 274 263 struct ib_cq *tx_cq[ISER_MAX_CQ]; 275 264 struct ib_mr *mr; ··· 290 277 enum iser_data_dir cmd_dir); 291 278 }; 292 279 280 + #define ISER_CHECK_GUARD 0xc0 281 + #define ISER_CHECK_REFTAG 0x0f 282 + #define ISER_CHECK_APPTAG 0x30 283 + 284 + enum iser_reg_indicator { 285 + ISER_DATA_KEY_VALID = 1 << 0, 286 + ISER_PROT_KEY_VALID = 1 << 1, 287 + ISER_SIG_KEY_VALID = 1 << 2, 288 + ISER_FASTREG_PROTECTED = 1 << 3, 289 + }; 290 + 291 + struct iser_pi_context { 292 + struct ib_mr *prot_mr; 293 + struct ib_fast_reg_page_list *prot_frpl; 294 + struct ib_mr *sig_mr; 295 + }; 296 + 293 297 struct fast_reg_descriptor { 294 298 struct list_head list; 295 299 /* For fast registration - FRWR */ 296 300 struct ib_mr *data_mr; 297 301 struct ib_fast_reg_page_list *data_frpl; 298 - /* Valid for fast registration flag */ 299 - bool valid; 302 + struct iser_pi_context *pi_ctx; 303 + /* registration indicators container */ 304 + u8 reg_indicators; 300 305 }; 301 306 302 307 struct iser_conn { 303 - struct iscsi_iser_conn *iser_conn; /* iser conn for upcalls */ 308 + struct iscsi_conn *iscsi_conn; 304 309 struct iscsi_endpoint *ep; 305 310 enum iser_ib_conn_state state; /* rdma connection state */ 306 311 atomic_t refcount; ··· 341 310 unsigned int rx_desc_head; 342 311 struct iser_rx_desc *rx_descs; 343 312 struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; 313 + bool pi_support; 314 + 315 + /* Connection memory registration pool */ 344 316 union { 345 317 struct { 346 318 struct ib_fmr_pool *pool; /* pool of IB FMRs */ ··· 353 319 struct { 354 320 struct list_head pool; 355 321 int pool_size; 356 - } frwr; 357 - } fastreg; 358 - }; 359 - 360 - struct iscsi_iser_conn { 361 - struct iscsi_conn *iscsi_conn;/* ptr to iscsi conn */ 362 - struct iser_conn *ib_conn; /* iSER IB conn */ 322 + } fastreg; 323 + }; 363 324 }; 364 325 365 326 struct iscsi_iser_task { 366 327 struct iser_tx_desc desc; 367 - struct iscsi_iser_conn *iser_conn; 328 + struct iser_conn *ib_conn; 368 329 enum iser_task_status status; 330 + struct scsi_cmnd *sc; 369 331 int command_sent; /* set if command sent */ 370 332 int dir[ISER_DIRS_NUM]; /* set if dir use*/ 371 333 struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ 372 334 struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ 373 335 struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ 336 + struct iser_data_buf prot[ISER_DIRS_NUM]; /* prot desc */ 337 + struct iser_data_buf prot_copy[ISER_DIRS_NUM];/* prot copy */ 374 338 }; 375 339 376 340 struct iser_page_vec { ··· 394 362 395 363 extern struct iser_global ig; 396 364 extern int iser_debug_level; 365 + extern bool iser_pi_enable; 366 + extern int iser_pi_guard; 397 367 398 368 /* allocate connection resources needed for rdma functionality */ 399 369 int iser_conn_set_full_featured_mode(struct iscsi_conn *conn); ··· 435 401 436 402 void iser_free_rx_descriptors(struct iser_conn *ib_conn); 437 403 438 - void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, 439 - enum iser_data_dir cmd_dir); 404 + void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 405 + struct iser_data_buf *mem, 406 + struct iser_data_buf *mem_copy, 407 + enum iser_data_dir cmd_dir); 440 408 441 409 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task, 442 410 enum iser_data_dir cmd_dir); 443 - int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task, 444 - enum iser_data_dir cmd_dir); 411 + int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *task, 412 + enum iser_data_dir cmd_dir); 445 413 446 414 int iser_connect(struct iser_conn *ib_conn, 447 415 struct sockaddr_in *src_addr, ··· 456 420 457 421 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task, 458 422 enum iser_data_dir cmd_dir); 459 - void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task, 460 - enum iser_data_dir cmd_dir); 423 + void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, 424 + enum iser_data_dir cmd_dir); 461 425 462 426 int iser_post_recvl(struct iser_conn *ib_conn); 463 427 int iser_post_recvm(struct iser_conn *ib_conn, int count); ··· 468 432 enum iser_data_dir iser_dir, 469 433 enum dma_data_direction dma_dir); 470 434 471 - void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); 435 + void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 436 + struct iser_data_buf *data); 472 437 int iser_initialize_task_headers(struct iscsi_task *task, 473 438 struct iser_tx_desc *tx_desc); 474 439 int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session); 475 440 int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max); 476 441 void iser_free_fmr_pool(struct iser_conn *ib_conn); 477 - int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max); 478 - void iser_free_frwr_pool(struct iser_conn *ib_conn); 442 + int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max); 443 + void iser_free_fastreg_pool(struct iser_conn *ib_conn); 444 + u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, 445 + enum iser_data_dir cmd_dir, sector_t *sector); 479 446 #endif
+109 -45
drivers/infiniband/ulp/iser/iser_initiator.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. 3 + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two 6 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 49 49 50 50 { 51 51 struct iscsi_iser_task *iser_task = task->dd_data; 52 - struct iser_device *device = iser_task->iser_conn->ib_conn->device; 52 + struct iser_device *device = iser_task->ib_conn->device; 53 53 struct iser_regd_buf *regd_buf; 54 54 int err; 55 55 struct iser_hdr *hdr = &iser_task->desc.iser_header; ··· 62 62 if (err) 63 63 return err; 64 64 65 + if (scsi_prot_sg_count(iser_task->sc)) { 66 + struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN]; 67 + 68 + err = iser_dma_map_task_data(iser_task, 69 + pbuf_in, 70 + ISER_DIR_IN, 71 + DMA_FROM_DEVICE); 72 + if (err) 73 + return err; 74 + } 75 + 65 76 if (edtl > iser_task->data[ISER_DIR_IN].data_len) { 66 77 iser_err("Total data length: %ld, less than EDTL: " 67 78 "%d, in READ cmd BHS itt: %d, conn: 0x%p\n", 68 79 iser_task->data[ISER_DIR_IN].data_len, edtl, 69 - task->itt, iser_task->iser_conn); 80 + task->itt, iser_task->ib_conn); 70 81 return -EINVAL; 71 82 } 72 83 ··· 110 99 unsigned int edtl) 111 100 { 112 101 struct iscsi_iser_task *iser_task = task->dd_data; 113 - struct iser_device *device = iser_task->iser_conn->ib_conn->device; 102 + struct iser_device *device = iser_task->ib_conn->device; 114 103 struct iser_regd_buf *regd_buf; 115 104 int err; 116 105 struct iser_hdr *hdr = &iser_task->desc.iser_header; ··· 123 112 DMA_TO_DEVICE); 124 113 if (err) 125 114 return err; 115 + 116 + if (scsi_prot_sg_count(iser_task->sc)) { 117 + struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT]; 118 + 119 + err = iser_dma_map_task_data(iser_task, 120 + pbuf_out, 121 + ISER_DIR_OUT, 122 + DMA_TO_DEVICE); 123 + if (err) 124 + return err; 125 + } 126 126 127 127 if (edtl > iser_task->data[ISER_DIR_OUT].data_len) { 128 128 iser_err("Total data length: %ld, less than EDTL: %d, " ··· 349 327 350 328 static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) 351 329 { 352 - struct iscsi_iser_conn *iser_conn = conn->dd_data; 330 + struct iser_conn *ib_conn = conn->dd_data; 353 331 struct iscsi_session *session = conn->session; 354 332 355 333 iser_dbg("req op %x flags %x\n", req->opcode, req->flags); ··· 362 340 * response) and no posted send buffers left - they must have been 363 341 * consumed during previous login phases. 364 342 */ 365 - WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); 366 - WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); 343 + WARN_ON(ib_conn->post_recv_buf_count != 1); 344 + WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0); 367 345 368 346 if (session->discovery_sess) { 369 347 iser_info("Discovery session, re-using login RX buffer\n"); 370 348 return 0; 371 349 } else 372 350 iser_info("Normal session, posting batch of RX %d buffers\n", 373 - iser_conn->ib_conn->min_posted_rx); 351 + ib_conn->min_posted_rx); 374 352 375 353 /* Initial post receive buffers */ 376 - if (iser_post_recvm(iser_conn->ib_conn, 377 - iser_conn->ib_conn->min_posted_rx)) 354 + if (iser_post_recvm(ib_conn, ib_conn->min_posted_rx)) 378 355 return -ENOMEM; 379 356 380 357 return 0; ··· 385 364 int iser_send_command(struct iscsi_conn *conn, 386 365 struct iscsi_task *task) 387 366 { 388 - struct iscsi_iser_conn *iser_conn = conn->dd_data; 367 + struct iser_conn *ib_conn = conn->dd_data; 389 368 struct iscsi_iser_task *iser_task = task->dd_data; 390 369 unsigned long edtl; 391 370 int err; 392 - struct iser_data_buf *data_buf; 371 + struct iser_data_buf *data_buf, *prot_buf; 393 372 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; 394 373 struct scsi_cmnd *sc = task->sc; 395 374 struct iser_tx_desc *tx_desc = &iser_task->desc; ··· 398 377 399 378 /* build the tx desc regd header and add it to the tx desc dto */ 400 379 tx_desc->type = ISCSI_TX_SCSI_COMMAND; 401 - iser_create_send_desc(iser_conn->ib_conn, tx_desc); 380 + iser_create_send_desc(ib_conn, tx_desc); 402 381 403 - if (hdr->flags & ISCSI_FLAG_CMD_READ) 382 + if (hdr->flags & ISCSI_FLAG_CMD_READ) { 404 383 data_buf = &iser_task->data[ISER_DIR_IN]; 405 - else 384 + prot_buf = &iser_task->prot[ISER_DIR_IN]; 385 + } else { 406 386 data_buf = &iser_task->data[ISER_DIR_OUT]; 387 + prot_buf = &iser_task->prot[ISER_DIR_OUT]; 388 + } 407 389 408 390 if (scsi_sg_count(sc)) { /* using a scatter list */ 409 391 data_buf->buf = scsi_sglist(sc); 410 392 data_buf->size = scsi_sg_count(sc); 411 393 } 412 - 413 394 data_buf->data_len = scsi_bufflen(sc); 395 + 396 + if (scsi_prot_sg_count(sc)) { 397 + prot_buf->buf = scsi_prot_sglist(sc); 398 + prot_buf->size = scsi_prot_sg_count(sc); 399 + prot_buf->data_len = sc->prot_sdb->length; 400 + } 414 401 415 402 if (hdr->flags & ISCSI_FLAG_CMD_READ) { 416 403 err = iser_prepare_read_cmd(task, edtl); ··· 437 408 438 409 iser_task->status = ISER_TASK_STATUS_STARTED; 439 410 440 - err = iser_post_send(iser_conn->ib_conn, tx_desc); 411 + err = iser_post_send(ib_conn, tx_desc); 441 412 if (!err) 442 413 return 0; 443 414 ··· 453 424 struct iscsi_task *task, 454 425 struct iscsi_data *hdr) 455 426 { 456 - struct iscsi_iser_conn *iser_conn = conn->dd_data; 427 + struct iser_conn *ib_conn = conn->dd_data; 457 428 struct iscsi_iser_task *iser_task = task->dd_data; 458 429 struct iser_tx_desc *tx_desc = NULL; 459 430 struct iser_regd_buf *regd_buf; ··· 502 473 itt, buf_offset, data_seg_len); 503 474 504 475 505 - err = iser_post_send(iser_conn->ib_conn, tx_desc); 476 + err = iser_post_send(ib_conn, tx_desc); 506 477 if (!err) 507 478 return 0; 508 479 ··· 515 486 int iser_send_control(struct iscsi_conn *conn, 516 487 struct iscsi_task *task) 517 488 { 518 - struct iscsi_iser_conn *iser_conn = conn->dd_data; 489 + struct iser_conn *ib_conn = conn->dd_data; 519 490 struct iscsi_iser_task *iser_task = task->dd_data; 520 491 struct iser_tx_desc *mdesc = &iser_task->desc; 521 492 unsigned long data_seg_len; 522 493 int err = 0; 523 494 struct iser_device *device; 524 - struct iser_conn *ib_conn = iser_conn->ib_conn; 525 495 526 496 /* build the tx desc regd header and add it to the tx desc dto */ 527 497 mdesc->type = ISCSI_TX_CONTROL; 528 - iser_create_send_desc(iser_conn->ib_conn, mdesc); 498 + iser_create_send_desc(ib_conn, mdesc); 529 499 530 - device = iser_conn->ib_conn->device; 500 + device = ib_conn->device; 531 501 532 502 data_seg_len = ntoh24(task->hdr->dlength); 533 503 ··· 541 513 ib_conn->login_req_dma, task->data_count, 542 514 DMA_TO_DEVICE); 543 515 544 - memcpy(iser_conn->ib_conn->login_req_buf, task->data, 545 - task->data_count); 516 + memcpy(ib_conn->login_req_buf, task->data, task->data_count); 546 517 547 518 ib_dma_sync_single_for_device(device->ib_device, 548 519 ib_conn->login_req_dma, task->data_count, 549 520 DMA_TO_DEVICE); 550 521 551 - tx_dsg->addr = iser_conn->ib_conn->login_req_dma; 522 + tx_dsg->addr = ib_conn->login_req_dma; 552 523 tx_dsg->length = task->data_count; 553 524 tx_dsg->lkey = device->mr->lkey; 554 525 mdesc->num_sge = 2; ··· 556 529 if (task == conn->login_task) { 557 530 iser_dbg("op %x dsl %lx, posting login rx buffer\n", 558 531 task->hdr->opcode, data_seg_len); 559 - err = iser_post_recvl(iser_conn->ib_conn); 532 + err = iser_post_recvl(ib_conn); 560 533 if (err) 561 534 goto send_control_error; 562 535 err = iser_post_rx_bufs(conn, task->hdr); ··· 564 537 goto send_control_error; 565 538 } 566 539 567 - err = iser_post_send(iser_conn->ib_conn, mdesc); 540 + err = iser_post_send(ib_conn, mdesc); 568 541 if (!err) 569 542 return 0; 570 543 ··· 580 553 unsigned long rx_xfer_len, 581 554 struct iser_conn *ib_conn) 582 555 { 583 - struct iscsi_iser_conn *conn = ib_conn->iser_conn; 584 556 struct iscsi_hdr *hdr; 585 557 u64 rx_dma; 586 558 int rx_buflen, outstanding, count, err; ··· 601 575 iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, 602 576 hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); 603 577 604 - iscsi_iser_recv(conn->iscsi_conn, hdr, 605 - rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN); 578 + iscsi_iser_recv(ib_conn->iscsi_conn, hdr, rx_desc->data, 579 + rx_xfer_len - ISER_HEADERS_LEN); 606 580 607 581 ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, 608 - rx_buflen, DMA_FROM_DEVICE); 582 + rx_buflen, DMA_FROM_DEVICE); 609 583 610 584 /* decrementing conn->post_recv_buf_count only --after-- freeing the * 611 585 * task eliminates the need to worry on tasks which are completed in * 612 586 * parallel to the execution of iser_conn_term. So the code that waits * 613 587 * for the posted rx bufs refcount to become zero handles everything */ 614 - conn->ib_conn->post_recv_buf_count--; 588 + ib_conn->post_recv_buf_count--; 615 589 616 590 if (rx_dma == ib_conn->login_resp_dma) 617 591 return; ··· 661 635 iser_task->data[ISER_DIR_IN].data_len = 0; 662 636 iser_task->data[ISER_DIR_OUT].data_len = 0; 663 637 638 + iser_task->prot[ISER_DIR_IN].data_len = 0; 639 + iser_task->prot[ISER_DIR_OUT].data_len = 0; 640 + 664 641 memset(&iser_task->rdma_regd[ISER_DIR_IN], 0, 665 642 sizeof(struct iser_regd_buf)); 666 643 memset(&iser_task->rdma_regd[ISER_DIR_OUT], 0, ··· 672 643 673 644 void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) 674 645 { 675 - struct iser_device *device = iser_task->iser_conn->ib_conn->device; 676 - int is_rdma_aligned = 1; 646 + struct iser_device *device = iser_task->ib_conn->device; 647 + int is_rdma_data_aligned = 1; 648 + int is_rdma_prot_aligned = 1; 649 + int prot_count = scsi_prot_sg_count(iser_task->sc); 677 650 678 651 /* if we were reading, copy back to unaligned sglist, 679 652 * anyway dma_unmap and free the copy 680 653 */ 681 654 if (iser_task->data_copy[ISER_DIR_IN].copy_buf != NULL) { 682 - is_rdma_aligned = 0; 683 - iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_IN); 655 + is_rdma_data_aligned = 0; 656 + iser_finalize_rdma_unaligned_sg(iser_task, 657 + &iser_task->data[ISER_DIR_IN], 658 + &iser_task->data_copy[ISER_DIR_IN], 659 + ISER_DIR_IN); 684 660 } 661 + 685 662 if (iser_task->data_copy[ISER_DIR_OUT].copy_buf != NULL) { 686 - is_rdma_aligned = 0; 687 - iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); 663 + is_rdma_data_aligned = 0; 664 + iser_finalize_rdma_unaligned_sg(iser_task, 665 + &iser_task->data[ISER_DIR_OUT], 666 + &iser_task->data_copy[ISER_DIR_OUT], 667 + ISER_DIR_OUT); 688 668 } 689 669 690 - if (iser_task->dir[ISER_DIR_IN]) 670 + if (iser_task->prot_copy[ISER_DIR_IN].copy_buf != NULL) { 671 + is_rdma_prot_aligned = 0; 672 + iser_finalize_rdma_unaligned_sg(iser_task, 673 + &iser_task->prot[ISER_DIR_IN], 674 + &iser_task->prot_copy[ISER_DIR_IN], 675 + ISER_DIR_IN); 676 + } 677 + 678 + if (iser_task->prot_copy[ISER_DIR_OUT].copy_buf != NULL) { 679 + is_rdma_prot_aligned = 0; 680 + iser_finalize_rdma_unaligned_sg(iser_task, 681 + &iser_task->prot[ISER_DIR_OUT], 682 + &iser_task->prot_copy[ISER_DIR_OUT], 683 + ISER_DIR_OUT); 684 + } 685 + 686 + if (iser_task->dir[ISER_DIR_IN]) { 691 687 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); 688 + if (is_rdma_data_aligned) 689 + iser_dma_unmap_task_data(iser_task, 690 + &iser_task->data[ISER_DIR_IN]); 691 + if (prot_count && is_rdma_prot_aligned) 692 + iser_dma_unmap_task_data(iser_task, 693 + &iser_task->prot[ISER_DIR_IN]); 694 + } 692 695 693 - if (iser_task->dir[ISER_DIR_OUT]) 696 + if (iser_task->dir[ISER_DIR_OUT]) { 694 697 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); 695 - 696 - /* if the data was unaligned, it was already unmapped and then copied */ 697 - if (is_rdma_aligned) 698 - iser_dma_unmap_task_data(iser_task); 698 + if (is_rdma_data_aligned) 699 + iser_dma_unmap_task_data(iser_task, 700 + &iser_task->data[ISER_DIR_OUT]); 701 + if (prot_count && is_rdma_prot_aligned) 702 + iser_dma_unmap_task_data(iser_task, 703 + &iser_task->prot[ISER_DIR_OUT]); 704 + } 699 705 }
+345 -125
drivers/infiniband/ulp/iser/iser_memory.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. 3 + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two 6 6 * licenses. You may choose to be licensed under the terms of the GNU ··· 45 45 * iser_start_rdma_unaligned_sg 46 46 */ 47 47 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 48 + struct iser_data_buf *data, 49 + struct iser_data_buf *data_copy, 48 50 enum iser_data_dir cmd_dir) 49 51 { 50 - int dma_nents; 51 - struct ib_device *dev; 52 + struct ib_device *dev = iser_task->ib_conn->device->ib_device; 53 + struct scatterlist *sgl = (struct scatterlist *)data->buf; 54 + struct scatterlist *sg; 52 55 char *mem = NULL; 53 - struct iser_data_buf *data = &iser_task->data[cmd_dir]; 54 - unsigned long cmd_data_len = data->data_len; 56 + unsigned long cmd_data_len = 0; 57 + int dma_nents, i; 58 + 59 + for_each_sg(sgl, sg, data->size, i) 60 + cmd_data_len += ib_sg_dma_len(dev, sg); 55 61 56 62 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 57 63 mem = (void *)__get_free_pages(GFP_ATOMIC, ··· 67 61 68 62 if (mem == NULL) { 69 63 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 70 - data->size,(int)cmd_data_len); 64 + data->size, (int)cmd_data_len); 71 65 return -ENOMEM; 72 66 } 73 67 74 68 if (cmd_dir == ISER_DIR_OUT) { 75 69 /* copy the unaligned sg the buffer which is used for RDMA */ 76 - struct scatterlist *sgl = (struct scatterlist *)data->buf; 77 - struct scatterlist *sg; 78 70 int i; 79 71 char *p, *from; 80 72 73 + sgl = (struct scatterlist *)data->buf; 81 74 p = mem; 82 75 for_each_sg(sgl, sg, data->size, i) { 83 76 from = kmap_atomic(sg_page(sg)); ··· 88 83 } 89 84 } 90 85 91 - sg_init_one(&iser_task->data_copy[cmd_dir].sg_single, mem, cmd_data_len); 92 - iser_task->data_copy[cmd_dir].buf = 93 - &iser_task->data_copy[cmd_dir].sg_single; 94 - iser_task->data_copy[cmd_dir].size = 1; 86 + sg_init_one(&data_copy->sg_single, mem, cmd_data_len); 87 + data_copy->buf = &data_copy->sg_single; 88 + data_copy->size = 1; 89 + data_copy->copy_buf = mem; 95 90 96 - iser_task->data_copy[cmd_dir].copy_buf = mem; 97 - 98 - dev = iser_task->iser_conn->ib_conn->device->ib_device; 99 - dma_nents = ib_dma_map_sg(dev, 100 - &iser_task->data_copy[cmd_dir].sg_single, 101 - 1, 91 + dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1, 102 92 (cmd_dir == ISER_DIR_OUT) ? 103 93 DMA_TO_DEVICE : DMA_FROM_DEVICE); 104 94 BUG_ON(dma_nents == 0); 105 95 106 - iser_task->data_copy[cmd_dir].dma_nents = dma_nents; 96 + data_copy->dma_nents = dma_nents; 97 + data_copy->data_len = cmd_data_len; 98 + 107 99 return 0; 108 100 } 109 101 110 102 /** 111 103 * iser_finalize_rdma_unaligned_sg 112 104 */ 105 + 113 106 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 114 - enum iser_data_dir cmd_dir) 107 + struct iser_data_buf *data, 108 + struct iser_data_buf *data_copy, 109 + enum iser_data_dir cmd_dir) 115 110 { 116 111 struct ib_device *dev; 117 - struct iser_data_buf *mem_copy; 118 112 unsigned long cmd_data_len; 119 113 120 - dev = iser_task->iser_conn->ib_conn->device->ib_device; 121 - mem_copy = &iser_task->data_copy[cmd_dir]; 114 + dev = iser_task->ib_conn->device->ib_device; 122 115 123 - ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, 116 + ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, 124 117 (cmd_dir == ISER_DIR_OUT) ? 125 118 DMA_TO_DEVICE : DMA_FROM_DEVICE); 126 119 ··· 130 127 int i; 131 128 132 129 /* copy back read RDMA to unaligned sg */ 133 - mem = mem_copy->copy_buf; 130 + mem = data_copy->copy_buf; 134 131 135 - sgl = (struct scatterlist *)iser_task->data[ISER_DIR_IN].buf; 136 - sg_size = iser_task->data[ISER_DIR_IN].size; 132 + sgl = (struct scatterlist *)data->buf; 133 + sg_size = data->size; 137 134 138 135 p = mem; 139 136 for_each_sg(sgl, sg, sg_size, i) { ··· 146 143 } 147 144 } 148 145 149 - cmd_data_len = iser_task->data[cmd_dir].data_len; 146 + cmd_data_len = data->data_len; 150 147 151 148 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 152 - free_pages((unsigned long)mem_copy->copy_buf, 149 + free_pages((unsigned long)data_copy->copy_buf, 153 150 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 154 151 else 155 - kfree(mem_copy->copy_buf); 152 + kfree(data_copy->copy_buf); 156 153 157 - mem_copy->copy_buf = NULL; 154 + data_copy->copy_buf = NULL; 158 155 } 159 156 160 157 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) ··· 322 319 struct ib_device *dev; 323 320 324 321 iser_task->dir[iser_dir] = 1; 325 - dev = iser_task->iser_conn->ib_conn->device->ib_device; 322 + dev = iser_task->ib_conn->device->ib_device; 326 323 327 324 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 328 325 if (data->dma_nents == 0) { ··· 332 329 return 0; 333 330 } 334 331 335 - void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task) 332 + void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 333 + struct iser_data_buf *data) 336 334 { 337 335 struct ib_device *dev; 338 - struct iser_data_buf *data; 339 336 340 - dev = iser_task->iser_conn->ib_conn->device->ib_device; 341 - 342 - if (iser_task->dir[ISER_DIR_IN]) { 343 - data = &iser_task->data[ISER_DIR_IN]; 344 - ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 345 - } 346 - 347 - if (iser_task->dir[ISER_DIR_OUT]) { 348 - data = &iser_task->data[ISER_DIR_OUT]; 349 - ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); 350 - } 337 + dev = iser_task->ib_conn->device->ib_device; 338 + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 351 339 } 352 340 353 341 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 354 342 struct ib_device *ibdev, 343 + struct iser_data_buf *mem, 344 + struct iser_data_buf *mem_copy, 355 345 enum iser_data_dir cmd_dir, 356 346 int aligned_len) 357 347 { 358 - struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn; 359 - struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 348 + struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; 360 349 361 350 iscsi_conn->fmr_unalign_cnt++; 362 351 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", ··· 358 363 iser_data_buf_dump(mem, ibdev); 359 364 360 365 /* unmap the command data before accessing it */ 361 - iser_dma_unmap_task_data(iser_task); 366 + iser_dma_unmap_task_data(iser_task, mem); 362 367 363 368 /* allocate copy buf, if we are writing, copy the */ 364 369 /* unaligned scatterlist, dma map the copy */ 365 - if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0) 366 - return -ENOMEM; 370 + if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0) 371 + return -ENOMEM; 367 372 368 373 return 0; 369 374 } ··· 377 382 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, 378 383 enum iser_data_dir cmd_dir) 379 384 { 380 - struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; 385 + struct iser_conn *ib_conn = iser_task->ib_conn; 381 386 struct iser_device *device = ib_conn->device; 382 387 struct ib_device *ibdev = device->ib_device; 383 388 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; ··· 391 396 392 397 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 393 398 if (aligned_len != mem->dma_nents) { 394 - err = fall_to_bounce_buf(iser_task, ibdev, 399 + err = fall_to_bounce_buf(iser_task, ibdev, mem, 400 + &iser_task->data_copy[cmd_dir], 395 401 cmd_dir, aligned_len); 396 402 if (err) { 397 403 iser_err("failed to allocate bounce buffer\n"); ··· 418 422 (unsigned long)regd_buf->reg.va, 419 423 (unsigned long)regd_buf->reg.len); 420 424 } else { /* use FMR for multiple dma entries */ 421 - iser_page_vec_build(mem, ib_conn->fastreg.fmr.page_vec, ibdev); 422 - err = iser_reg_page_vec(ib_conn, ib_conn->fastreg.fmr.page_vec, 425 + iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); 426 + err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, 423 427 &regd_buf->reg); 424 428 if (err && err != -EAGAIN) { 425 429 iser_data_buf_dump(mem, ibdev); ··· 427 431 mem->dma_nents, 428 432 ntoh24(iser_task->desc.iscsi_header.dlength)); 429 433 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 430 - ib_conn->fastreg.fmr.page_vec->data_size, 431 - ib_conn->fastreg.fmr.page_vec->length, 432 - ib_conn->fastreg.fmr.page_vec->offset); 433 - for (i = 0; i < ib_conn->fastreg.fmr.page_vec->length; i++) 434 + ib_conn->fmr.page_vec->data_size, 435 + ib_conn->fmr.page_vec->length, 436 + ib_conn->fmr.page_vec->offset); 437 + for (i = 0; i < ib_conn->fmr.page_vec->length; i++) 434 438 iser_err("page_vec[%d] = 0x%llx\n", i, 435 - (unsigned long long) ib_conn->fastreg.fmr.page_vec->pages[i]); 439 + (unsigned long long) ib_conn->fmr.page_vec->pages[i]); 436 440 } 437 441 if (err) 438 442 return err; ··· 440 444 return 0; 441 445 } 442 446 443 - static int iser_fast_reg_mr(struct fast_reg_descriptor *desc, 444 - struct iser_conn *ib_conn, 445 - struct iser_regd_buf *regd_buf, 446 - u32 offset, unsigned int data_size, 447 - unsigned int page_list_len) 447 + static inline enum ib_t10_dif_type 448 + scsi2ib_prot_type(unsigned char prot_type) 448 449 { 450 + switch (prot_type) { 451 + case SCSI_PROT_DIF_TYPE0: 452 + return IB_T10DIF_NONE; 453 + case SCSI_PROT_DIF_TYPE1: 454 + return IB_T10DIF_TYPE1; 455 + case SCSI_PROT_DIF_TYPE2: 456 + return IB_T10DIF_TYPE2; 457 + case SCSI_PROT_DIF_TYPE3: 458 + return IB_T10DIF_TYPE3; 459 + default: 460 + return IB_T10DIF_NONE; 461 + } 462 + } 463 + 464 + 465 + static int 466 + iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) 467 + { 468 + unsigned char scsi_ptype = scsi_get_prot_type(sc); 469 + 470 + sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF; 471 + sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF; 472 + sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size; 473 + sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size; 474 + 475 + switch (scsi_get_prot_op(sc)) { 476 + case SCSI_PROT_WRITE_INSERT: 477 + case SCSI_PROT_READ_STRIP: 478 + sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; 479 + sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 480 + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 481 + sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & 482 + 0xffffffff; 483 + break; 484 + case SCSI_PROT_READ_INSERT: 485 + case SCSI_PROT_WRITE_STRIP: 486 + sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 487 + sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; 488 + sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & 489 + 0xffffffff; 490 + sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; 491 + break; 492 + case SCSI_PROT_READ_PASS: 493 + case SCSI_PROT_WRITE_PASS: 494 + sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 495 + sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; 496 + sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & 497 + 0xffffffff; 498 + sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 499 + sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 500 + sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & 501 + 0xffffffff; 502 + break; 503 + default: 504 + iser_err("Unsupported PI operation %d\n", 505 + scsi_get_prot_op(sc)); 506 + return -EINVAL; 507 + } 508 + return 0; 509 + } 510 + 511 + 512 + static int 513 + iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) 514 + { 515 + switch (scsi_get_prot_type(sc)) { 516 + case SCSI_PROT_DIF_TYPE0: 517 + *mask = 0x0; 518 + break; 519 + case SCSI_PROT_DIF_TYPE1: 520 + case SCSI_PROT_DIF_TYPE2: 521 + *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG; 522 + break; 523 + case SCSI_PROT_DIF_TYPE3: 524 + *mask = ISER_CHECK_GUARD; 525 + break; 526 + default: 527 + iser_err("Unsupported protection type %d\n", 528 + scsi_get_prot_type(sc)); 529 + return -EINVAL; 530 + } 531 + 532 + return 0; 533 + } 534 + 535 + static int 536 + iser_reg_sig_mr(struct iscsi_iser_task *iser_task, 537 + struct fast_reg_descriptor *desc, struct ib_sge *data_sge, 538 + struct ib_sge *prot_sge, struct ib_sge *sig_sge) 539 + { 540 + struct iser_conn *ib_conn = iser_task->ib_conn; 541 + struct iser_pi_context *pi_ctx = desc->pi_ctx; 542 + struct ib_send_wr sig_wr, inv_wr; 543 + struct ib_send_wr *bad_wr, *wr = NULL; 544 + struct ib_sig_attrs sig_attrs; 545 + int ret; 546 + u32 key; 547 + 548 + memset(&sig_attrs, 0, sizeof(sig_attrs)); 549 + ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); 550 + if (ret) 551 + goto err; 552 + 553 + ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); 554 + if (ret) 555 + goto err; 556 + 557 + if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) { 558 + memset(&inv_wr, 0, sizeof(inv_wr)); 559 + inv_wr.opcode = IB_WR_LOCAL_INV; 560 + inv_wr.wr_id = ISER_FASTREG_LI_WRID; 561 + inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; 562 + wr = &inv_wr; 563 + /* Bump the key */ 564 + key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); 565 + ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); 566 + } 567 + 568 + memset(&sig_wr, 0, sizeof(sig_wr)); 569 + sig_wr.opcode = IB_WR_REG_SIG_MR; 570 + sig_wr.wr_id = ISER_FASTREG_LI_WRID; 571 + sig_wr.sg_list = data_sge; 572 + sig_wr.num_sge = 1; 573 + sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 574 + sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 575 + if (scsi_prot_sg_count(iser_task->sc)) 576 + sig_wr.wr.sig_handover.prot = prot_sge; 577 + sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | 578 + IB_ACCESS_REMOTE_READ | 579 + IB_ACCESS_REMOTE_WRITE; 580 + 581 + if (!wr) 582 + wr = &sig_wr; 583 + else 584 + wr->next = &sig_wr; 585 + 586 + ret = ib_post_send(ib_conn->qp, wr, &bad_wr); 587 + if (ret) { 588 + iser_err("reg_sig_mr failed, ret:%d\n", ret); 589 + goto err; 590 + } 591 + desc->reg_indicators &= ~ISER_SIG_KEY_VALID; 592 + 593 + sig_sge->lkey = pi_ctx->sig_mr->lkey; 594 + sig_sge->addr = 0; 595 + sig_sge->length = data_sge->length + prot_sge->length; 596 + if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT || 597 + scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) { 598 + sig_sge->length += (data_sge->length / 599 + iser_task->sc->device->sector_size) * 8; 600 + } 601 + 602 + iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n", 603 + sig_sge->addr, sig_sge->length, 604 + sig_sge->lkey); 605 + err: 606 + return ret; 607 + } 608 + 609 + static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, 610 + struct iser_regd_buf *regd_buf, 611 + struct iser_data_buf *mem, 612 + enum iser_reg_indicator ind, 613 + struct ib_sge *sge) 614 + { 615 + struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; 616 + struct iser_conn *ib_conn = iser_task->ib_conn; 617 + struct iser_device *device = ib_conn->device; 618 + struct ib_device *ibdev = device->ib_device; 619 + struct ib_mr *mr; 620 + struct ib_fast_reg_page_list *frpl; 449 621 struct ib_send_wr fastreg_wr, inv_wr; 450 622 struct ib_send_wr *bad_wr, *wr = NULL; 451 623 u8 key; 452 - int ret; 624 + int ret, offset, size, plen; 453 625 454 - if (!desc->valid) { 626 + /* if there a single dma entry, dma mr suffices */ 627 + if (mem->dma_nents == 1) { 628 + struct scatterlist *sg = (struct scatterlist *)mem->buf; 629 + 630 + sge->lkey = device->mr->lkey; 631 + sge->addr = ib_sg_dma_address(ibdev, &sg[0]); 632 + sge->length = ib_sg_dma_len(ibdev, &sg[0]); 633 + 634 + iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n", 635 + sge->lkey, sge->addr, sge->length); 636 + return 0; 637 + } 638 + 639 + if (ind == ISER_DATA_KEY_VALID) { 640 + mr = desc->data_mr; 641 + frpl = desc->data_frpl; 642 + } else { 643 + mr = desc->pi_ctx->prot_mr; 644 + frpl = desc->pi_ctx->prot_frpl; 645 + } 646 + 647 + plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, 648 + &offset, &size); 649 + if (plen * SIZE_4K < size) { 650 + iser_err("fast reg page_list too short to hold this SG\n"); 651 + return -EINVAL; 652 + } 653 + 654 + if (!(desc->reg_indicators & ind)) { 455 655 memset(&inv_wr, 0, sizeof(inv_wr)); 656 + inv_wr.wr_id = ISER_FASTREG_LI_WRID; 456 657 inv_wr.opcode = IB_WR_LOCAL_INV; 457 - inv_wr.send_flags = IB_SEND_SIGNALED; 458 - inv_wr.ex.invalidate_rkey = desc->data_mr->rkey; 658 + inv_wr.ex.invalidate_rkey = mr->rkey; 459 659 wr = &inv_wr; 460 660 /* Bump the key */ 461 - key = (u8)(desc->data_mr->rkey & 0x000000FF); 462 - ib_update_fast_reg_key(desc->data_mr, ++key); 661 + key = (u8)(mr->rkey & 0x000000FF); 662 + ib_update_fast_reg_key(mr, ++key); 463 663 } 464 664 465 665 /* Prepare FASTREG WR */ 466 666 memset(&fastreg_wr, 0, sizeof(fastreg_wr)); 667 + fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; 467 668 fastreg_wr.opcode = IB_WR_FAST_REG_MR; 468 - fastreg_wr.send_flags = IB_SEND_SIGNALED; 469 - fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset; 470 - fastreg_wr.wr.fast_reg.page_list = desc->data_frpl; 471 - fastreg_wr.wr.fast_reg.page_list_len = page_list_len; 669 + fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; 670 + fastreg_wr.wr.fast_reg.page_list = frpl; 671 + fastreg_wr.wr.fast_reg.page_list_len = plen; 472 672 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; 473 - fastreg_wr.wr.fast_reg.length = data_size; 474 - fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey; 673 + fastreg_wr.wr.fast_reg.length = size; 674 + fastreg_wr.wr.fast_reg.rkey = mr->rkey; 475 675 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 476 676 IB_ACCESS_REMOTE_WRITE | 477 677 IB_ACCESS_REMOTE_READ); 478 678 479 - if (!wr) { 679 + if (!wr) 480 680 wr = &fastreg_wr; 481 - atomic_inc(&ib_conn->post_send_buf_count); 482 - } else { 681 + else 483 682 wr->next = &fastreg_wr; 484 - atomic_add(2, &ib_conn->post_send_buf_count); 485 - } 486 683 487 684 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); 488 685 if (ret) { 489 - if (bad_wr->next) 490 - atomic_sub(2, &ib_conn->post_send_buf_count); 491 - else 492 - atomic_dec(&ib_conn->post_send_buf_count); 493 686 iser_err("fast registration failed, ret:%d\n", ret); 494 687 return ret; 495 688 } 496 - desc->valid = false; 689 + desc->reg_indicators &= ~ind; 497 690 498 - regd_buf->reg.mem_h = desc; 499 - regd_buf->reg.lkey = desc->data_mr->lkey; 500 - regd_buf->reg.rkey = desc->data_mr->rkey; 501 - regd_buf->reg.va = desc->data_frpl->page_list[0] + offset; 502 - regd_buf->reg.len = data_size; 503 - regd_buf->reg.is_mr = 1; 691 + sge->lkey = mr->lkey; 692 + sge->addr = frpl->page_list[0] + offset; 693 + sge->length = size; 504 694 505 695 return ret; 506 696 } 507 697 508 698 /** 509 - * iser_reg_rdma_mem_frwr - Registers memory intended for RDMA, 699 + * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, 510 700 * using Fast Registration WR (if possible) obtaining rkey and va 511 701 * 512 702 * returns 0 on success, errno code on failure 513 703 */ 514 - int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task, 515 - enum iser_data_dir cmd_dir) 704 + int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, 705 + enum iser_data_dir cmd_dir) 516 706 { 517 - struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; 707 + struct iser_conn *ib_conn = iser_task->ib_conn; 518 708 struct iser_device *device = ib_conn->device; 519 709 struct ib_device *ibdev = device->ib_device; 520 710 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 521 711 struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; 522 - struct fast_reg_descriptor *desc; 523 - unsigned int data_size, page_list_len; 712 + struct fast_reg_descriptor *desc = NULL; 713 + struct ib_sge data_sge; 524 714 int err, aligned_len; 525 715 unsigned long flags; 526 - u32 offset; 527 716 528 717 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 529 718 if (aligned_len != mem->dma_nents) { 530 - err = fall_to_bounce_buf(iser_task, ibdev, 719 + err = fall_to_bounce_buf(iser_task, ibdev, mem, 720 + &iser_task->data_copy[cmd_dir], 531 721 cmd_dir, aligned_len); 532 722 if (err) { 533 723 iser_err("failed to allocate bounce buffer\n"); ··· 722 540 mem = &iser_task->data_copy[cmd_dir]; 723 541 } 724 542 725 - /* if there a single dma entry, dma mr suffices */ 726 - if (mem->dma_nents == 1) { 727 - struct scatterlist *sg = (struct scatterlist *)mem->buf; 728 - 729 - regd_buf->reg.lkey = device->mr->lkey; 730 - regd_buf->reg.rkey = device->mr->rkey; 731 - regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); 732 - regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); 733 - regd_buf->reg.is_mr = 0; 734 - } else { 543 + if (mem->dma_nents != 1 || 544 + scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { 735 545 spin_lock_irqsave(&ib_conn->lock, flags); 736 - desc = list_first_entry(&ib_conn->fastreg.frwr.pool, 546 + desc = list_first_entry(&ib_conn->fastreg.pool, 737 547 struct fast_reg_descriptor, list); 738 548 list_del(&desc->list); 739 549 spin_unlock_irqrestore(&ib_conn->lock, flags); 740 - page_list_len = iser_sg_to_page_vec(mem, device->ib_device, 741 - desc->data_frpl->page_list, 742 - &offset, &data_size); 550 + regd_buf->reg.mem_h = desc; 551 + } 743 552 744 - if (page_list_len * SIZE_4K < data_size) { 745 - iser_err("fast reg page_list too short to hold this SG\n"); 746 - err = -EINVAL; 747 - goto err_reg; 553 + err = iser_fast_reg_mr(iser_task, regd_buf, mem, 554 + ISER_DATA_KEY_VALID, &data_sge); 555 + if (err) 556 + goto err_reg; 557 + 558 + if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { 559 + struct ib_sge prot_sge, sig_sge; 560 + 561 + memset(&prot_sge, 0, sizeof(prot_sge)); 562 + if (scsi_prot_sg_count(iser_task->sc)) { 563 + mem = &iser_task->prot[cmd_dir]; 564 + aligned_len = iser_data_buf_aligned_len(mem, ibdev); 565 + if (aligned_len != mem->dma_nents) { 566 + err = fall_to_bounce_buf(iser_task, ibdev, mem, 567 + &iser_task->prot_copy[cmd_dir], 568 + cmd_dir, aligned_len); 569 + if (err) { 570 + iser_err("failed to allocate bounce buffer\n"); 571 + return err; 572 + } 573 + mem = &iser_task->prot_copy[cmd_dir]; 574 + } 575 + 576 + err = iser_fast_reg_mr(iser_task, regd_buf, mem, 577 + ISER_PROT_KEY_VALID, &prot_sge); 578 + if (err) 579 + goto err_reg; 748 580 } 749 581 750 - err = iser_fast_reg_mr(desc, ib_conn, regd_buf, 751 - offset, data_size, page_list_len); 752 - if (err) 753 - goto err_reg; 582 + err = iser_reg_sig_mr(iser_task, desc, &data_sge, 583 + &prot_sge, &sig_sge); 584 + if (err) { 585 + iser_err("Failed to register signature mr\n"); 586 + return err; 587 + } 588 + desc->reg_indicators |= ISER_FASTREG_PROTECTED; 589 + 590 + regd_buf->reg.lkey = sig_sge.lkey; 591 + regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey; 592 + regd_buf->reg.va = sig_sge.addr; 593 + regd_buf->reg.len = sig_sge.length; 594 + regd_buf->reg.is_mr = 1; 595 + } else { 596 + if (desc) { 597 + regd_buf->reg.rkey = desc->data_mr->rkey; 598 + regd_buf->reg.is_mr = 1; 599 + } else { 600 + regd_buf->reg.rkey = device->mr->rkey; 601 + regd_buf->reg.is_mr = 0; 602 + } 603 + 604 + regd_buf->reg.lkey = data_sge.lkey; 605 + regd_buf->reg.va = data_sge.addr; 606 + regd_buf->reg.len = data_sge.length; 754 607 } 755 608 756 609 return 0; 757 610 err_reg: 758 - spin_lock_irqsave(&ib_conn->lock, flags); 759 - list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool); 760 - spin_unlock_irqrestore(&ib_conn->lock, flags); 611 + if (desc) { 612 + spin_lock_irqsave(&ib_conn->lock, flags); 613 + list_add_tail(&desc->list, &ib_conn->fastreg.pool); 614 + spin_unlock_irqrestore(&ib_conn->lock, flags); 615 + } 616 + 761 617 return err; 762 618 }
+235 -90
drivers/infiniband/ulp/iser/iser_verbs.c
··· 1 1 /* 2 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 - * Copyright (c) 2013 Mellanox Technologies. All rights reserved. 4 + * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 5 5 * 6 6 * This software is available to you under a choice of one of two 7 7 * licenses. You may choose to be licensed under the terms of the GNU ··· 71 71 */ 72 72 static int iser_create_device_ib_res(struct iser_device *device) 73 73 { 74 - int i, j; 75 74 struct iser_cq_desc *cq_desc; 76 - struct ib_device_attr *dev_attr; 75 + struct ib_device_attr *dev_attr = &device->dev_attr; 76 + int ret, i, j; 77 77 78 - dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL); 79 - if (!dev_attr) 80 - return -ENOMEM; 81 - 82 - if (ib_query_device(device->ib_device, dev_attr)) { 78 + ret = ib_query_device(device->ib_device, dev_attr); 79 + if (ret) { 83 80 pr_warn("Query device failed for %s\n", device->ib_device->name); 84 - goto dev_attr_err; 81 + return ret; 85 82 } 86 83 87 84 /* Assign function handles - based on FMR support */ ··· 91 94 device->iser_unreg_rdma_mem = iser_unreg_mem_fmr; 92 95 } else 93 96 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { 94 - iser_info("FRWR supported, using FRWR for registration\n"); 95 - device->iser_alloc_rdma_reg_res = iser_create_frwr_pool; 96 - device->iser_free_rdma_reg_res = iser_free_frwr_pool; 97 - device->iser_reg_rdma_mem = iser_reg_rdma_mem_frwr; 98 - device->iser_unreg_rdma_mem = iser_unreg_mem_frwr; 97 + iser_info("FastReg supported, using FastReg for registration\n"); 98 + device->iser_alloc_rdma_reg_res = iser_create_fastreg_pool; 99 + device->iser_free_rdma_reg_res = iser_free_fastreg_pool; 100 + device->iser_reg_rdma_mem = iser_reg_rdma_mem_fastreg; 101 + device->iser_unreg_rdma_mem = iser_unreg_mem_fastreg; 99 102 } else { 100 - iser_err("IB device does not support FMRs nor FRWRs, can't register memory\n"); 101 - goto dev_attr_err; 103 + iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n"); 104 + return -1; 102 105 } 103 106 104 107 device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); ··· 155 158 if (ib_register_event_handler(&device->event_handler)) 156 159 goto handler_err; 157 160 158 - kfree(dev_attr); 159 161 return 0; 160 162 161 163 handler_err: ··· 174 178 kfree(device->cq_desc); 175 179 cq_desc_err: 176 180 iser_err("failed to allocate an IB resource\n"); 177 - dev_attr_err: 178 - kfree(dev_attr); 179 181 return -1; 180 182 } 181 183 ··· 215 221 struct ib_fmr_pool_param params; 216 222 int ret = -ENOMEM; 217 223 218 - ib_conn->fastreg.fmr.page_vec = kmalloc(sizeof(struct iser_page_vec) + 219 - (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), 220 - GFP_KERNEL); 221 - if (!ib_conn->fastreg.fmr.page_vec) 224 + ib_conn->fmr.page_vec = kmalloc(sizeof(*ib_conn->fmr.page_vec) + 225 + (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)), 226 + GFP_KERNEL); 227 + if (!ib_conn->fmr.page_vec) 222 228 return ret; 223 229 224 - ib_conn->fastreg.fmr.page_vec->pages = (u64 *)(ib_conn->fastreg.fmr.page_vec + 1); 230 + ib_conn->fmr.page_vec->pages = (u64 *)(ib_conn->fmr.page_vec + 1); 225 231 226 232 params.page_shift = SHIFT_4K; 227 233 /* when the first/last SG element are not start/end * ··· 237 243 IB_ACCESS_REMOTE_WRITE | 238 244 IB_ACCESS_REMOTE_READ); 239 245 240 - ib_conn->fastreg.fmr.pool = ib_create_fmr_pool(device->pd, &params); 241 - if (!IS_ERR(ib_conn->fastreg.fmr.pool)) 246 + ib_conn->fmr.pool = ib_create_fmr_pool(device->pd, &params); 247 + if (!IS_ERR(ib_conn->fmr.pool)) 242 248 return 0; 243 249 244 250 /* no FMR => no need for page_vec */ 245 - kfree(ib_conn->fastreg.fmr.page_vec); 246 - ib_conn->fastreg.fmr.page_vec = NULL; 251 + kfree(ib_conn->fmr.page_vec); 252 + ib_conn->fmr.page_vec = NULL; 247 253 248 - ret = PTR_ERR(ib_conn->fastreg.fmr.pool); 249 - ib_conn->fastreg.fmr.pool = NULL; 254 + ret = PTR_ERR(ib_conn->fmr.pool); 255 + ib_conn->fmr.pool = NULL; 250 256 if (ret != -ENOSYS) { 251 257 iser_err("FMR allocation failed, err %d\n", ret); 252 258 return ret; ··· 262 268 void iser_free_fmr_pool(struct iser_conn *ib_conn) 263 269 { 264 270 iser_info("freeing conn %p fmr pool %p\n", 265 - ib_conn, ib_conn->fastreg.fmr.pool); 271 + ib_conn, ib_conn->fmr.pool); 266 272 267 - if (ib_conn->fastreg.fmr.pool != NULL) 268 - ib_destroy_fmr_pool(ib_conn->fastreg.fmr.pool); 273 + if (ib_conn->fmr.pool != NULL) 274 + ib_destroy_fmr_pool(ib_conn->fmr.pool); 269 275 270 - ib_conn->fastreg.fmr.pool = NULL; 276 + ib_conn->fmr.pool = NULL; 271 277 272 - kfree(ib_conn->fastreg.fmr.page_vec); 273 - ib_conn->fastreg.fmr.page_vec = NULL; 278 + kfree(ib_conn->fmr.page_vec); 279 + ib_conn->fmr.page_vec = NULL; 280 + } 281 + 282 + static int 283 + iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd, 284 + bool pi_enable, struct fast_reg_descriptor *desc) 285 + { 286 + int ret; 287 + 288 + desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device, 289 + ISCSI_ISER_SG_TABLESIZE + 1); 290 + if (IS_ERR(desc->data_frpl)) { 291 + ret = PTR_ERR(desc->data_frpl); 292 + iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", 293 + ret); 294 + return PTR_ERR(desc->data_frpl); 295 + } 296 + 297 + desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE + 1); 298 + if (IS_ERR(desc->data_mr)) { 299 + ret = PTR_ERR(desc->data_mr); 300 + iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); 301 + goto fast_reg_mr_failure; 302 + } 303 + desc->reg_indicators |= ISER_DATA_KEY_VALID; 304 + 305 + if (pi_enable) { 306 + struct ib_mr_init_attr mr_init_attr = {0}; 307 + struct iser_pi_context *pi_ctx = NULL; 308 + 309 + desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL); 310 + if (!desc->pi_ctx) { 311 + iser_err("Failed to allocate pi context\n"); 312 + ret = -ENOMEM; 313 + goto pi_ctx_alloc_failure; 314 + } 315 + pi_ctx = desc->pi_ctx; 316 + 317 + pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device, 318 + ISCSI_ISER_SG_TABLESIZE); 319 + if (IS_ERR(pi_ctx->prot_frpl)) { 320 + ret = PTR_ERR(pi_ctx->prot_frpl); 321 + iser_err("Failed to allocate prot frpl ret=%d\n", 322 + ret); 323 + goto prot_frpl_failure; 324 + } 325 + 326 + pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, 327 + ISCSI_ISER_SG_TABLESIZE + 1); 328 + if (IS_ERR(pi_ctx->prot_mr)) { 329 + ret = PTR_ERR(pi_ctx->prot_mr); 330 + iser_err("Failed to allocate prot frmr ret=%d\n", 331 + ret); 332 + goto prot_mr_failure; 333 + } 334 + desc->reg_indicators |= ISER_PROT_KEY_VALID; 335 + 336 + mr_init_attr.max_reg_descriptors = 2; 337 + mr_init_attr.flags |= IB_MR_SIGNATURE_EN; 338 + pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr); 339 + if (IS_ERR(pi_ctx->sig_mr)) { 340 + ret = PTR_ERR(pi_ctx->sig_mr); 341 + iser_err("Failed to allocate signature enabled mr err=%d\n", 342 + ret); 343 + goto sig_mr_failure; 344 + } 345 + desc->reg_indicators |= ISER_SIG_KEY_VALID; 346 + } 347 + desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; 348 + 349 + iser_dbg("Create fr_desc %p page_list %p\n", 350 + desc, desc->data_frpl->page_list); 351 + 352 + return 0; 353 + sig_mr_failure: 354 + ib_dereg_mr(desc->pi_ctx->prot_mr); 355 + prot_mr_failure: 356 + ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); 357 + prot_frpl_failure: 358 + kfree(desc->pi_ctx); 359 + pi_ctx_alloc_failure: 360 + ib_dereg_mr(desc->data_mr); 361 + fast_reg_mr_failure: 362 + ib_free_fast_reg_page_list(desc->data_frpl); 363 + 364 + return ret; 274 365 } 275 366 276 367 /** 277 - * iser_create_frwr_pool - Creates pool of fast_reg descriptors 368 + * iser_create_fastreg_pool - Creates pool of fast_reg descriptors 278 369 * for fast registration work requests. 279 370 * returns 0 on success, or errno code on failure 280 371 */ 281 - int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max) 372 + int iser_create_fastreg_pool(struct iser_conn *ib_conn, unsigned cmds_max) 282 373 { 283 374 struct iser_device *device = ib_conn->device; 284 375 struct fast_reg_descriptor *desc; 285 376 int i, ret; 286 377 287 - INIT_LIST_HEAD(&ib_conn->fastreg.frwr.pool); 288 - ib_conn->fastreg.frwr.pool_size = 0; 378 + INIT_LIST_HEAD(&ib_conn->fastreg.pool); 379 + ib_conn->fastreg.pool_size = 0; 289 380 for (i = 0; i < cmds_max; i++) { 290 - desc = kmalloc(sizeof(*desc), GFP_KERNEL); 381 + desc = kzalloc(sizeof(*desc), GFP_KERNEL); 291 382 if (!desc) { 292 383 iser_err("Failed to allocate a new fast_reg descriptor\n"); 293 384 ret = -ENOMEM; 294 385 goto err; 295 386 } 296 387 297 - desc->data_frpl = ib_alloc_fast_reg_page_list(device->ib_device, 298 - ISCSI_ISER_SG_TABLESIZE + 1); 299 - if (IS_ERR(desc->data_frpl)) { 300 - ret = PTR_ERR(desc->data_frpl); 301 - iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", ret); 302 - goto fast_reg_page_failure; 388 + ret = iser_create_fastreg_desc(device->ib_device, device->pd, 389 + ib_conn->pi_support, desc); 390 + if (ret) { 391 + iser_err("Failed to create fastreg descriptor err=%d\n", 392 + ret); 393 + kfree(desc); 394 + goto err; 303 395 } 304 396 305 - desc->data_mr = ib_alloc_fast_reg_mr(device->pd, 306 - ISCSI_ISER_SG_TABLESIZE + 1); 307 - if (IS_ERR(desc->data_mr)) { 308 - ret = PTR_ERR(desc->data_mr); 309 - iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret); 310 - goto fast_reg_mr_failure; 311 - } 312 - desc->valid = true; 313 - list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool); 314 - ib_conn->fastreg.frwr.pool_size++; 397 + list_add_tail(&desc->list, &ib_conn->fastreg.pool); 398 + ib_conn->fastreg.pool_size++; 315 399 } 316 400 317 401 return 0; 318 402 319 - fast_reg_mr_failure: 320 - ib_free_fast_reg_page_list(desc->data_frpl); 321 - fast_reg_page_failure: 322 - kfree(desc); 323 403 err: 324 - iser_free_frwr_pool(ib_conn); 404 + iser_free_fastreg_pool(ib_conn); 325 405 return ret; 326 406 } 327 407 328 408 /** 329 - * iser_free_frwr_pool - releases the pool of fast_reg descriptors 409 + * iser_free_fastreg_pool - releases the pool of fast_reg descriptors 330 410 */ 331 - void iser_free_frwr_pool(struct iser_conn *ib_conn) 411 + void iser_free_fastreg_pool(struct iser_conn *ib_conn) 332 412 { 333 413 struct fast_reg_descriptor *desc, *tmp; 334 414 int i = 0; 335 415 336 - if (list_empty(&ib_conn->fastreg.frwr.pool)) 416 + if (list_empty(&ib_conn->fastreg.pool)) 337 417 return; 338 418 339 - iser_info("freeing conn %p frwr pool\n", ib_conn); 419 + iser_info("freeing conn %p fr pool\n", ib_conn); 340 420 341 - list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.frwr.pool, list) { 421 + list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.pool, list) { 342 422 list_del(&desc->list); 343 423 ib_free_fast_reg_page_list(desc->data_frpl); 344 424 ib_dereg_mr(desc->data_mr); 425 + if (desc->pi_ctx) { 426 + ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl); 427 + ib_dereg_mr(desc->pi_ctx->prot_mr); 428 + ib_destroy_mr(desc->pi_ctx->sig_mr); 429 + kfree(desc->pi_ctx); 430 + } 345 431 kfree(desc); 346 432 ++i; 347 433 } 348 434 349 - if (i < ib_conn->fastreg.frwr.pool_size) 435 + if (i < ib_conn->fastreg.pool_size) 350 436 iser_warn("pool still has %d regions registered\n", 351 - ib_conn->fastreg.frwr.pool_size - i); 437 + ib_conn->fastreg.pool_size - i); 352 438 } 353 439 354 440 /** ··· 463 389 init_attr.qp_context = (void *)ib_conn; 464 390 init_attr.send_cq = device->tx_cq[min_index]; 465 391 init_attr.recv_cq = device->rx_cq[min_index]; 466 - init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 467 392 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; 468 393 init_attr.cap.max_send_sge = 2; 469 394 init_attr.cap.max_recv_sge = 1; 470 395 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; 471 396 init_attr.qp_type = IB_QPT_RC; 397 + if (ib_conn->pi_support) { 398 + init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS; 399 + init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN; 400 + } else { 401 + init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; 402 + } 472 403 473 404 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr); 474 405 if (ret) ··· 670 591 ib_conn = (struct iser_conn *)cma_id->context; 671 592 ib_conn->device = device; 672 593 594 + /* connection T10-PI support */ 595 + if (iser_pi_enable) { 596 + if (!(device->dev_attr.device_cap_flags & 597 + IB_DEVICE_SIGNATURE_HANDOVER)) { 598 + iser_warn("T10-PI requested but not supported on %s, " 599 + "continue without T10-PI\n", 600 + ib_conn->device->ib_device->name); 601 + ib_conn->pi_support = false; 602 + } else { 603 + ib_conn->pi_support = true; 604 + } 605 + } 606 + 673 607 ret = rdma_resolve_route(cma_id, 1000); 674 608 if (ret) { 675 609 iser_err("resolve route failed: %d\n", ret); ··· 728 636 static void iser_connected_handler(struct rdma_cm_id *cma_id) 729 637 { 730 638 struct iser_conn *ib_conn; 639 + struct ib_qp_attr attr; 640 + struct ib_qp_init_attr init_attr; 641 + 642 + (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr); 643 + iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num); 731 644 732 645 ib_conn = (struct iser_conn *)cma_id->context; 733 646 ib_conn->state = ISER_CONN_UP; ··· 750 653 * terminated asynchronously from the iSCSI layer's perspective. */ 751 654 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 752 655 ISER_CONN_TERMINATING)){ 753 - if (ib_conn->iser_conn) 754 - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 755 - ISCSI_ERR_CONN_FAILED); 656 + if (ib_conn->iscsi_conn) 657 + iscsi_conn_failure(ib_conn->iscsi_conn, ISCSI_ERR_CONN_FAILED); 756 658 else 757 659 iser_err("iscsi_iser connection isn't bound\n"); 758 660 } ··· 897 801 page_list = page_vec->pages; 898 802 io_addr = page_list[0]; 899 803 900 - mem = ib_fmr_pool_map_phys(ib_conn->fastreg.fmr.pool, 804 + mem = ib_fmr_pool_map_phys(ib_conn->fmr.pool, 901 805 page_list, 902 806 page_vec->length, 903 807 io_addr); ··· 951 855 reg->mem_h = NULL; 952 856 } 953 857 954 - void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task, 955 - enum iser_data_dir cmd_dir) 858 + void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, 859 + enum iser_data_dir cmd_dir) 956 860 { 957 861 struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; 958 - struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn; 862 + struct iser_conn *ib_conn = iser_task->ib_conn; 959 863 struct fast_reg_descriptor *desc = reg->mem_h; 960 864 961 865 if (!reg->is_mr) ··· 964 868 reg->mem_h = NULL; 965 869 reg->is_mr = 0; 966 870 spin_lock_bh(&ib_conn->lock); 967 - list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool); 871 + list_add_tail(&desc->list, &ib_conn->fastreg.pool); 968 872 spin_unlock_bh(&ib_conn->lock); 969 873 } 970 874 ··· 1065 969 * perspective. */ 1066 970 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, 1067 971 ISER_CONN_TERMINATING)) 1068 - iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn, 972 + iscsi_conn_failure(ib_conn->iscsi_conn, 1069 973 ISCSI_ERR_CONN_FAILED); 1070 974 1071 975 /* no more non completed posts to the QP, complete the ··· 1089 993 if (wc.status == IB_WC_SUCCESS) { 1090 994 if (wc.opcode == IB_WC_SEND) 1091 995 iser_snd_completion(tx_desc, ib_conn); 1092 - else if (wc.opcode == IB_WC_LOCAL_INV || 1093 - wc.opcode == IB_WC_FAST_REG_MR) { 1094 - atomic_dec(&ib_conn->post_send_buf_count); 1095 - continue; 1096 - } else 996 + else 1097 997 iser_err("expected opcode %d got %d\n", 1098 998 IB_WC_SEND, wc.opcode); 1099 999 } else { 1100 1000 iser_err("tx id %llx status %d vend_err %x\n", 1101 - wc.wr_id, wc.status, wc.vendor_err); 1102 - atomic_dec(&ib_conn->post_send_buf_count); 1103 - iser_handle_comp_error(tx_desc, ib_conn); 1001 + wc.wr_id, wc.status, wc.vendor_err); 1002 + if (wc.wr_id != ISER_FASTREG_LI_WRID) { 1003 + atomic_dec(&ib_conn->post_send_buf_count); 1004 + iser_handle_comp_error(tx_desc, ib_conn); 1005 + } 1104 1006 } 1105 1007 completed_tx++; 1106 1008 } ··· 1116 1022 struct iser_rx_desc *desc; 1117 1023 unsigned long xfer_len; 1118 1024 struct iser_conn *ib_conn; 1119 - int completed_tx, completed_rx; 1120 - completed_tx = completed_rx = 0; 1025 + int completed_tx, completed_rx = 0; 1026 + 1027 + /* First do tx drain, so in a case where we have rx flushes and a successful 1028 + * tx completion we will still go through completion error handling. 1029 + */ 1030 + completed_tx = iser_drain_tx_cq(device, cq_index); 1121 1031 1122 1032 while (ib_poll_cq(cq, 1, &wc) == 1) { 1123 1033 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; ··· 1149 1051 * " would not cause interrupts to be missed" */ 1150 1052 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 1151 1053 1152 - completed_tx += iser_drain_tx_cq(device, cq_index); 1153 1054 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); 1154 1055 } 1155 1056 ··· 1159 1062 int cq_index = cq_desc->cq_index; 1160 1063 1161 1064 tasklet_schedule(&device->cq_tasklet[cq_index]); 1065 + } 1066 + 1067 + u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, 1068 + enum iser_data_dir cmd_dir, sector_t *sector) 1069 + { 1070 + struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg; 1071 + struct fast_reg_descriptor *desc = reg->mem_h; 1072 + unsigned long sector_size = iser_task->sc->device->sector_size; 1073 + struct ib_mr_status mr_status; 1074 + int ret; 1075 + 1076 + if (desc && desc->reg_indicators & ISER_FASTREG_PROTECTED) { 1077 + desc->reg_indicators &= ~ISER_FASTREG_PROTECTED; 1078 + ret = ib_check_mr_status(desc->pi_ctx->sig_mr, 1079 + IB_MR_CHECK_SIG_STATUS, &mr_status); 1080 + if (ret) { 1081 + pr_err("ib_check_mr_status failed, ret %d\n", ret); 1082 + goto err; 1083 + } 1084 + 1085 + if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { 1086 + sector_t sector_off = mr_status.sig_err.sig_err_offset; 1087 + 1088 + do_div(sector_off, sector_size + 8); 1089 + *sector = scsi_get_lba(iser_task->sc) + sector_off; 1090 + 1091 + pr_err("PI error found type %d at sector %llx " 1092 + "expected %x vs actual %x\n", 1093 + mr_status.sig_err.err_type, 1094 + (unsigned long long)*sector, 1095 + mr_status.sig_err.expected, 1096 + mr_status.sig_err.actual); 1097 + 1098 + switch (mr_status.sig_err.err_type) { 1099 + case IB_SIG_BAD_GUARD: 1100 + return 0x1; 1101 + case IB_SIG_BAD_REFTAG: 1102 + return 0x3; 1103 + case IB_SIG_BAD_APPTAG: 1104 + return 0x2; 1105 + } 1106 + } 1107 + } 1108 + 1109 + return 0; 1110 + err: 1111 + /* Not alot we can do here, return ambiguous guard error */ 1112 + return 0x1; 1162 1113 }
+55 -28
drivers/infiniband/ulp/srp/ib_srp.c
··· 411 411 412 412 static int srp_lookup_path(struct srp_target_port *target) 413 413 { 414 + int ret; 415 + 414 416 target->path.numb_path = 1; 415 417 416 418 init_completion(&target->done); ··· 433 431 if (target->path_query_id < 0) 434 432 return target->path_query_id; 435 433 436 - wait_for_completion(&target->done); 434 + ret = wait_for_completion_interruptible(&target->done); 435 + if (ret < 0) 436 + return ret; 437 437 438 438 if (target->status < 0) 439 439 shost_printk(KERN_WARNING, target->scsi_host, ··· 714 710 ret = srp_send_req(target); 715 711 if (ret) 716 712 return ret; 717 - wait_for_completion(&target->done); 713 + ret = wait_for_completion_interruptible(&target->done); 714 + if (ret < 0) 715 + return ret; 718 716 719 717 /* 720 718 * The CM event handling code will set status to ··· 783 777 * srp_claim_req - Take ownership of the scmnd associated with a request. 784 778 * @target: SRP target port. 785 779 * @req: SRP request. 780 + * @sdev: If not NULL, only take ownership for this SCSI device. 786 781 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take 787 782 * ownership of @req->scmnd if it equals @scmnd. 788 783 * ··· 792 785 */ 793 786 static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, 794 787 struct srp_request *req, 788 + struct scsi_device *sdev, 795 789 struct scsi_cmnd *scmnd) 796 790 { 797 791 unsigned long flags; 798 792 799 793 spin_lock_irqsave(&target->lock, flags); 800 - if (!scmnd) { 794 + if (req->scmnd && 795 + (!sdev || req->scmnd->device == sdev) && 796 + (!scmnd || req->scmnd == scmnd)) { 801 797 scmnd = req->scmnd; 802 - req->scmnd = NULL; 803 - } else if (req->scmnd == scmnd) { 804 798 req->scmnd = NULL; 805 799 } else { 806 800 scmnd = NULL; ··· 829 821 } 830 822 831 823 static void srp_finish_req(struct srp_target_port *target, 832 - struct srp_request *req, int result) 824 + struct srp_request *req, struct scsi_device *sdev, 825 + int result) 833 826 { 834 - struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); 827 + struct scsi_cmnd *scmnd = srp_claim_req(target, req, sdev, NULL); 835 828 836 829 if (scmnd) { 837 830 srp_free_req(target, req, scmnd, 0); ··· 844 835 static void srp_terminate_io(struct srp_rport *rport) 845 836 { 846 837 struct srp_target_port *target = rport->lld_data; 838 + struct Scsi_Host *shost = target->scsi_host; 839 + struct scsi_device *sdev; 847 840 int i; 841 + 842 + /* 843 + * Invoking srp_terminate_io() while srp_queuecommand() is running 844 + * is not safe. Hence the warning statement below. 845 + */ 846 + shost_for_each_device(sdev, shost) 847 + WARN_ON_ONCE(sdev->request_queue->request_fn_active); 848 848 849 849 for (i = 0; i < target->req_ring_size; ++i) { 850 850 struct srp_request *req = &target->req_ring[i]; 851 - srp_finish_req(target, req, DID_TRANSPORT_FAILFAST << 16); 851 + srp_finish_req(target, req, NULL, DID_TRANSPORT_FAILFAST << 16); 852 852 } 853 853 } 854 854 ··· 894 876 895 877 for (i = 0; i < target->req_ring_size; ++i) { 896 878 struct srp_request *req = &target->req_ring[i]; 897 - srp_finish_req(target, req, DID_RESET << 16); 879 + srp_finish_req(target, req, NULL, DID_RESET << 16); 898 880 } 899 881 900 882 INIT_LIST_HEAD(&target->free_tx); ··· 1302 1284 complete(&target->tsk_mgmt_done); 1303 1285 } else { 1304 1286 req = &target->req_ring[rsp->tag]; 1305 - scmnd = srp_claim_req(target, req, NULL); 1287 + scmnd = srp_claim_req(target, req, NULL, NULL); 1306 1288 if (!scmnd) { 1307 1289 shost_printk(KERN_ERR, target->scsi_host, 1308 1290 "Null scmnd for RSP w/tag %016llx\n", ··· 1822 1804 shost_printk(KERN_WARNING, shost, 1823 1805 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n"); 1824 1806 else 1825 - shost_printk(KERN_WARNING, shost, 1826 - PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason); 1807 + shost_printk(KERN_WARNING, shost, PFX 1808 + "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n", 1809 + target->path.sgid.raw, 1810 + target->orig_dgid, reason); 1827 1811 } else 1828 1812 shost_printk(KERN_WARNING, shost, 1829 1813 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED," ··· 1883 1863 case IB_CM_TIMEWAIT_EXIT: 1884 1864 shost_printk(KERN_ERR, target->scsi_host, 1885 1865 PFX "connection closed\n"); 1866 + comp = 1; 1886 1867 1887 1868 target->status = 0; 1888 1869 break; ··· 2020 1999 2021 2000 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); 2022 2001 2023 - if (!req || !srp_claim_req(target, req, scmnd)) 2002 + if (!req || !srp_claim_req(target, req, NULL, scmnd)) 2024 2003 return SUCCESS; 2025 2004 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, 2026 2005 SRP_TSK_ABORT_TASK) == 0) ··· 2051 2030 2052 2031 for (i = 0; i < target->req_ring_size; ++i) { 2053 2032 struct srp_request *req = &target->req_ring[i]; 2054 - if (req->scmnd && req->scmnd->device == scmnd->device) 2055 - srp_finish_req(target, req, DID_RESET << 16); 2033 + srp_finish_req(target, req, scmnd->device, DID_RESET << 16); 2056 2034 } 2057 2035 2058 2036 return SUCCESS; ··· 2632 2612 target->tl_retry_count = 7; 2633 2613 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; 2634 2614 2615 + mutex_lock(&host->add_target_mutex); 2616 + 2635 2617 ret = srp_parse_options(buf, target); 2636 2618 if (ret) 2637 2619 goto err; ··· 2671 2649 if (ret) 2672 2650 goto err_free_mem; 2673 2651 2674 - ib_query_gid(ibdev, host->port, 0, &target->path.sgid); 2675 - 2676 - shost_printk(KERN_DEBUG, target->scsi_host, PFX 2677 - "new target: id_ext %016llx ioc_guid %016llx pkey %04x " 2678 - "service_id %016llx dgid %pI6\n", 2679 - (unsigned long long) be64_to_cpu(target->id_ext), 2680 - (unsigned long long) be64_to_cpu(target->ioc_guid), 2681 - be16_to_cpu(target->path.pkey), 2682 - (unsigned long long) be64_to_cpu(target->service_id), 2683 - target->path.dgid.raw); 2652 + ret = ib_query_gid(ibdev, host->port, 0, &target->path.sgid); 2653 + if (ret) 2654 + goto err_free_mem; 2684 2655 2685 2656 ret = srp_create_target_ib(target); 2686 2657 if (ret) ··· 2694 2679 if (ret) 2695 2680 goto err_disconnect; 2696 2681 2697 - return count; 2682 + shost_printk(KERN_DEBUG, target->scsi_host, PFX 2683 + "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n", 2684 + be64_to_cpu(target->id_ext), 2685 + be64_to_cpu(target->ioc_guid), 2686 + be16_to_cpu(target->path.pkey), 2687 + be64_to_cpu(target->service_id), 2688 + target->path.sgid.raw, target->path.dgid.raw); 2689 + 2690 + ret = count; 2691 + 2692 + out: 2693 + mutex_unlock(&host->add_target_mutex); 2694 + return ret; 2698 2695 2699 2696 err_disconnect: 2700 2697 srp_disconnect_target(target); ··· 2722 2695 2723 2696 err: 2724 2697 scsi_host_put(target_host); 2725 - 2726 - return ret; 2698 + goto out; 2727 2699 } 2728 2700 2729 2701 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target); ··· 2758 2732 INIT_LIST_HEAD(&host->target_list); 2759 2733 spin_lock_init(&host->target_lock); 2760 2734 init_completion(&host->released); 2735 + mutex_init(&host->add_target_mutex); 2761 2736 host->srp_dev = device; 2762 2737 host->port = port; 2763 2738
+1
drivers/infiniband/ulp/srp/ib_srp.h
··· 105 105 spinlock_t target_lock; 106 106 struct completion released; 107 107 struct list_head list; 108 + struct mutex add_target_mutex; 108 109 }; 109 110 110 111 struct srp_request {
+6
drivers/net/ethernet/emulex/benet/be_roce.c
··· 35 35 36 36 if (!ocrdma_drv) 37 37 return; 38 + 39 + if (ocrdma_drv->be_abi_version != BE_ROCE_ABI_VERSION) { 40 + dev_warn(&pdev->dev, "Cannot initialize RoCE due to ocrdma ABI mismatch\n"); 41 + return; 42 + } 43 + 38 44 if (pdev->device == OC_DEVICE_ID5) { 39 45 /* only msix is supported on these devices */ 40 46 if (!msix_enabled(adapter))
+3
drivers/net/ethernet/emulex/benet/be_roce.h
··· 21 21 #include <linux/pci.h> 22 22 #include <linux/netdevice.h> 23 23 24 + #define BE_ROCE_ABI_VERSION 1 25 + 24 26 struct ocrdma_dev; 25 27 26 28 enum be_interrupt_mode { ··· 54 52 /* ocrdma driver register's the callback functions with nic driver. */ 55 53 struct ocrdma_driver { 56 54 unsigned char name[32]; 55 + u32 be_abi_version; 57 56 struct ocrdma_dev *(*add) (struct be_dev_info *dev_info); 58 57 void (*remove) (struct ocrdma_dev *); 59 58 void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
+32
drivers/scsi/libiscsi.c
··· 395 395 if (rc) 396 396 return rc; 397 397 } 398 + 399 + if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) 400 + task->protected = true; 401 + 398 402 if (sc->sc_data_direction == DMA_TO_DEVICE) { 399 403 unsigned out_len = scsi_out(sc)->length; 400 404 struct iscsi_r2t_info *r2t = &task->unsol_r2t; ··· 826 822 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; 827 823 828 824 sc->result = (DID_OK << 16) | rhdr->cmd_status; 825 + 826 + if (task->protected) { 827 + sector_t sector; 828 + u8 ascq; 829 + 830 + /** 831 + * Transports that didn't implement check_protection 832 + * callback but still published T10-PI support to scsi-mid 833 + * deserve this BUG_ON. 834 + **/ 835 + BUG_ON(!session->tt->check_protection); 836 + 837 + ascq = session->tt->check_protection(task, &sector); 838 + if (ascq) { 839 + sc->result = DRIVER_SENSE << 24 | 840 + SAM_STAT_CHECK_CONDITION; 841 + scsi_build_sense_buffer(1, sc->sense_buffer, 842 + ILLEGAL_REQUEST, 0x10, ascq); 843 + sc->sense_buffer[7] = 0xc; /* Additional sense length */ 844 + sc->sense_buffer[8] = 0; /* Information desc type */ 845 + sc->sense_buffer[9] = 0xa; /* Additional desc length */ 846 + sc->sense_buffer[10] = 0x80; /* Validity bit */ 847 + 848 + put_unaligned_be64(sector, &sc->sense_buffer[12]); 849 + goto out; 850 + } 851 + } 829 852 830 853 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) { 831 854 sc->result = DID_ERROR << 16; ··· 1598 1567 task->have_checked_conn = false; 1599 1568 task->last_timeout = jiffies; 1600 1569 task->last_xfer = jiffies; 1570 + task->protected = false; 1601 1571 INIT_LIST_HEAD(&task->running); 1602 1572 return task; 1603 1573 }
+1
drivers/scsi/scsi_transport_srp.c
··· 810 810 811 811 /** 812 812 * srp_stop_rport_timers - stop the transport layer recovery timers 813 + * @rport: SRP remote port for which to stop the timers. 813 814 * 814 815 * Must be called after srp_remove_host() and scsi_remove_host(). The caller 815 816 * must hold a reference on the rport (rport->dev) and on the SCSI host
-1
include/rdma/ib_cm.h
··· 601 601 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 602 602 struct ib_cm_sidr_rep_param *param); 603 603 604 - int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac); 605 604 #endif /* IB_CM_H */
+6 -8
include/rdma/ib_verbs.h
··· 1412 1412 void (*unmap_sg)(struct ib_device *dev, 1413 1413 struct scatterlist *sg, int nents, 1414 1414 enum dma_data_direction direction); 1415 - u64 (*dma_address)(struct ib_device *dev, 1416 - struct scatterlist *sg); 1417 - unsigned int (*dma_len)(struct ib_device *dev, 1418 - struct scatterlist *sg); 1419 1415 void (*sync_single_for_cpu)(struct ib_device *dev, 1420 1416 u64 dma_handle, 1421 1417 size_t size, ··· 2236 2240 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 2237 2241 * @dev: The device for which the DMA addresses were created 2238 2242 * @sg: The scatter/gather entry 2243 + * 2244 + * Note: this function is obsolete. To do: change all occurrences of 2245 + * ib_sg_dma_address() into sg_dma_address(). 2239 2246 */ 2240 2247 static inline u64 ib_sg_dma_address(struct ib_device *dev, 2241 2248 struct scatterlist *sg) 2242 2249 { 2243 - if (dev->dma_ops) 2244 - return dev->dma_ops->dma_address(dev, sg); 2245 2250 return sg_dma_address(sg); 2246 2251 } 2247 2252 ··· 2250 2253 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 2251 2254 * @dev: The device for which the DMA addresses were created 2252 2255 * @sg: The scatter/gather entry 2256 + * 2257 + * Note: this function is obsolete. To do: change all occurrences of 2258 + * ib_sg_dma_len() into sg_dma_len(). 2253 2259 */ 2254 2260 static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 2255 2261 struct scatterlist *sg) 2256 2262 { 2257 - if (dev->dma_ops) 2258 - return dev->dma_ops->dma_len(dev, sg); 2259 2263 return sg_dma_len(sg); 2260 2264 } 2261 2265
+4
include/scsi/libiscsi.h
··· 133 133 unsigned long last_xfer; 134 134 unsigned long last_timeout; 135 135 bool have_checked_conn; 136 + 137 + /* T10 protection information */ 138 + bool protected; 139 + 136 140 /* state set/tested under session->lock */ 137 141 int state; 138 142 atomic_t refcount;
+1
include/scsi/scsi_transport_iscsi.h
··· 167 167 struct iscsi_bus_flash_conn *fnode_conn); 168 168 int (*logout_flashnode_sid) (struct iscsi_cls_session *cls_sess); 169 169 int (*get_host_stats) (struct Scsi_Host *shost, char *buf, int len); 170 + u8 (*check_protection)(struct iscsi_task *task, sector_t *sector); 170 171 }; 171 172 172 173 /*
-1
include/scsi/scsi_transport_srp.h
··· 41 41 * @mutex: Protects against concurrent rport reconnect / 42 42 * fast_io_fail / dev_loss_tmo activity. 43 43 * @state: rport state. 44 - * @deleted: Whether or not srp_rport_del() has already been invoked. 45 44 * @reconnect_delay: Reconnect delay in seconds. 46 45 * @failed_reconnects: Number of failed reconnect attempts. 47 46 * @reconnect_work: Work structure used for scheduling reconnect attempts.