Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull more infiniband changes from Roland Dreier:
"Second batch of InfiniBand/RDMA changes for 3.8:
- cxgb4 changes to fix lookup engine hash collisions
- mlx4 changes to make flow steering usable
- fix to IPoIB to avoid pinning dst reference for too long"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA/cxgb4: Fix bug for active and passive LE hash collision path
RDMA/cxgb4: Fix LE hash collision bug for passive open connection
RDMA/cxgb4: Fix LE hash collision bug for active open connection
mlx4_core: Allow choosing flow steering mode
mlx4_core: Adjustments to Flow Steering activation logic for SR-IOV
mlx4_core: Fix error flow in the flow steering wrapper
mlx4_core: Add QPN enforcement for flow steering rules set by VFs
cxgb4: Add LE hash collision bug fix path in LLD driver
cxgb4: Add T4 filter support
IPoIB: Call skb_dst_drop() once skb is enqueued for sending

Changed files
+2234 -214
drivers
include
linux
mlx4
+682 -109
drivers/infiniband/hw/cxgb4/cm.c
··· 38 38 #include <linux/inetdevice.h> 39 39 #include <linux/ip.h> 40 40 #include <linux/tcp.h> 41 + #include <linux/if_vlan.h> 41 42 42 43 #include <net/neighbour.h> 43 44 #include <net/netevent.h> 44 45 #include <net/route.h> 46 + #include <net/tcp.h> 45 47 46 48 #include "iw_cxgb4.h" 47 49 ··· 62 60 "dead", 63 61 NULL, 64 62 }; 63 + 64 + static int nocong; 65 + module_param(nocong, int, 0644); 66 + MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 67 + 68 + static int enable_ecn; 69 + module_param(enable_ecn, int, 0644); 70 + MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 65 71 66 72 static int dack_mode = 1; 67 73 module_param(dack_mode, int, 0644); ··· 275 265 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 276 266 dst_release(ep->dst); 277 267 cxgb4_l2t_release(ep->l2t); 268 + remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 278 269 } 279 270 kfree(ep); 280 271 } ··· 452 441 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 453 442 } 454 443 444 + #define VLAN_NONE 0xfff 445 + #define FILTER_SEL_VLAN_NONE 0xffff 446 + #define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ 447 + #define FILTER_SEL_WIDTH_VIN_P_FC \ 448 + (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ 449 + #define FILTER_SEL_WIDTH_TAG_P_FC \ 450 + (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ 451 + #define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) 452 + 453 + static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, 454 + struct l2t_entry *l2t) 455 + { 456 + unsigned int ntuple = 0; 457 + u32 viid; 458 + 459 + switch (dev->rdev.lldi.filt_mode) { 460 + 461 + /* default filter mode */ 462 + case HW_TPL_FR_MT_PR_IV_P_FC: 463 + if (l2t->vlan == VLAN_NONE) 464 + ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; 465 + else { 466 + ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; 467 + ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; 468 + } 469 + ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 470 + FILTER_SEL_WIDTH_VLD_TAG_P_FC; 471 + break; 472 + case HW_TPL_FR_MT_PR_OV_P_FC: { 473 + viid = cxgb4_port_viid(l2t->neigh->dev); 474 + 475 + ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; 476 + ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; 477 + ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; 478 + ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << 479 + FILTER_SEL_WIDTH_VLD_TAG_P_FC; 480 + break; 481 + } 482 + default: 483 + break; 484 + } 485 + return ntuple; 486 + } 487 + 455 488 static int send_connect(struct c4iw_ep *ep) 456 489 { 457 490 struct cpl_act_open_req *req; ··· 518 463 519 464 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 520 465 wscale = compute_wscale(rcv_win); 521 - opt0 = KEEP_ALIVE(1) | 466 + opt0 = (nocong ? NO_CONG(1) : 0) | 467 + KEEP_ALIVE(1) | 522 468 DELACK(1) | 523 469 WND_SCALE(wscale) | 524 470 MSS_IDX(mtu_idx) | ··· 530 474 ULP_MODE(ULP_MODE_TCPDDP) | 531 475 RCV_BUFSIZ(rcv_win>>10); 532 476 opt2 = RX_CHANNEL(0) | 477 + CCTRL_ECN(enable_ecn) | 533 478 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 534 479 if (enable_tcp_timestamps) 535 480 opt2 |= TSTAMPS_EN(1); ··· 549 492 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 550 493 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 551 494 req->opt0 = cpu_to_be64(opt0); 552 - req->params = 0; 495 + req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 553 496 req->opt2 = cpu_to_be32(opt2); 497 + set_bit(ACT_OPEN_REQ, &ep->com.history); 554 498 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 555 499 } 556 500 ··· 828 770 /* setup the hwtid for this connection */ 829 771 ep->hwtid = tid; 830 772 cxgb4_insert_tid(t, ep, tid); 773 + insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 831 774 832 775 ep->snd_seq = be32_to_cpu(req->snd_isn); 833 776 ep->rcv_seq = be32_to_cpu(req->rcv_isn); ··· 836 777 set_emss(ep, ntohs(req->tcp_opt)); 837 778 838 779 /* dealloc the atid */ 780 + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 839 781 cxgb4_free_atid(t, atid); 782 + set_bit(ACT_ESTAB, &ep->com.history); 840 783 841 784 /* start MPA negotiation */ 842 785 send_flowc(ep, NULL); ··· 864 803 ep->com.cm_id->rem_ref(ep->com.cm_id); 865 804 ep->com.cm_id = NULL; 866 805 ep->com.qp = NULL; 806 + set_bit(CLOSE_UPCALL, &ep->com.history); 867 807 } 868 808 } 869 809 ··· 873 811 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 874 812 close_complete_upcall(ep); 875 813 state_set(&ep->com, ABORTING); 814 + set_bit(ABORT_CONN, &ep->com.history); 876 815 return send_abort(ep, skb, gfp); 877 816 } 878 817 ··· 888 825 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 889 826 ep, ep->com.cm_id, ep->hwtid); 890 827 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 828 + set_bit(DISCONN_UPCALL, &ep->com.history); 891 829 } 892 830 } 893 831 ··· 907 843 ep->com.cm_id->rem_ref(ep->com.cm_id); 908 844 ep->com.cm_id = NULL; 909 845 ep->com.qp = NULL; 846 + set_bit(ABORT_UPCALL, &ep->com.history); 910 847 } 911 848 } 912 849 ··· 940 875 941 876 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 942 877 ep->hwtid, status); 878 + set_bit(CONN_RPL_UPCALL, &ep->com.history); 943 879 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 944 880 945 881 if (status < 0) { ··· 981 915 ep->parent_ep->com.cm_id, 982 916 &event); 983 917 } 918 + set_bit(CONNREQ_UPCALL, &ep->com.history); 984 919 c4iw_put_ep(&ep->parent_ep->com); 985 920 ep->parent_ep = NULL; 986 921 } ··· 998 931 if (ep->com.cm_id) { 999 932 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1000 933 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 934 + set_bit(ESTAB_UPCALL, &ep->com.history); 1001 935 } 1002 936 } 1003 937 ··· 1384 1316 unsigned int dlen = ntohs(hdr->len); 1385 1317 unsigned int tid = GET_TID(hdr); 1386 1318 struct tid_info *t = dev->rdev.lldi.tids; 1319 + __u8 status = hdr->status; 1387 1320 1388 1321 ep = lookup_tid(t, tid); 1389 1322 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); ··· 1407 1338 case MPA_REP_SENT: 1408 1339 break; 1409 1340 default: 1410 - printk(KERN_ERR MOD "%s Unexpected streaming data." 1411 - " ep %p state %d tid %u\n", 1412 - __func__, ep, state_read(&ep->com), ep->hwtid); 1341 + pr_err("%s Unexpected streaming data." \ 1342 + " ep %p state %d tid %u status %d\n", 1343 + __func__, ep, state_read(&ep->com), ep->hwtid, status); 1413 1344 1414 1345 /* 1415 1346 * The ep will timeout and inform the ULP of the failure. ··· 1452 1383 return 0; 1453 1384 } 1454 1385 1386 + static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1387 + { 1388 + struct sk_buff *skb; 1389 + struct fw_ofld_connection_wr *req; 1390 + unsigned int mtu_idx; 1391 + int wscale; 1392 + 1393 + skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1394 + req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1395 + memset(req, 0, sizeof(*req)); 1396 + req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1397 + req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1398 + req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, 1399 + ep->l2t)); 1400 + req->le.lport = ep->com.local_addr.sin_port; 1401 + req->le.pport = ep->com.remote_addr.sin_port; 1402 + req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; 1403 + req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; 1404 + req->tcb.t_state_to_astid = 1405 + htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 1406 + V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 1407 + req->tcb.cplrxdataack_cplpassacceptrpl = 1408 + htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1409 + req->tcb.tx_max = jiffies; 1410 + req->tcb.rcv_adv = htons(1); 1411 + cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1412 + wscale = compute_wscale(rcv_win); 1413 + req->tcb.opt0 = TCAM_BYPASS(1) | 1414 + (nocong ? NO_CONG(1) : 0) | 1415 + KEEP_ALIVE(1) | 1416 + DELACK(1) | 1417 + WND_SCALE(wscale) | 1418 + MSS_IDX(mtu_idx) | 1419 + L2T_IDX(ep->l2t->idx) | 1420 + TX_CHAN(ep->tx_chan) | 1421 + SMAC_SEL(ep->smac_idx) | 1422 + DSCP(ep->tos) | 1423 + ULP_MODE(ULP_MODE_TCPDDP) | 1424 + RCV_BUFSIZ(rcv_win >> 10); 1425 + req->tcb.opt2 = PACE(1) | 1426 + TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1427 + RX_CHANNEL(0) | 1428 + CCTRL_ECN(enable_ecn) | 1429 + RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1430 + if (enable_tcp_timestamps) 1431 + req->tcb.opt2 |= TSTAMPS_EN(1); 1432 + if (enable_tcp_sack) 1433 + req->tcb.opt2 |= SACK_EN(1); 1434 + if (wscale && enable_tcp_window_scaling) 1435 + req->tcb.opt2 |= WND_SCALE_EN(1); 1436 + req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); 1437 + req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); 1438 + set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1439 + set_bit(ACT_OFLD_CONN, &ep->com.history); 1440 + c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1441 + } 1442 + 1455 1443 /* 1456 1444 * Return whether a failed active open has allocated a TID 1457 1445 */ ··· 1516 1390 { 1517 1391 return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && 1518 1392 status != CPL_ERR_ARP_MISS; 1393 + } 1394 + 1395 + #define ACT_OPEN_RETRY_COUNT 2 1396 + 1397 + static int c4iw_reconnect(struct c4iw_ep *ep) 1398 + { 1399 + int err = 0; 1400 + struct rtable *rt; 1401 + struct port_info *pi; 1402 + struct net_device *pdev; 1403 + int step; 1404 + struct neighbour *neigh; 1405 + 1406 + PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 1407 + init_timer(&ep->timer); 1408 + 1409 + /* 1410 + * Allocate an active TID to initiate a TCP connection. 1411 + */ 1412 + ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 1413 + if (ep->atid == -1) { 1414 + pr_err("%s - cannot alloc atid.\n", __func__); 1415 + err = -ENOMEM; 1416 + goto fail2; 1417 + } 1418 + insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 1419 + 1420 + /* find a route */ 1421 + rt = find_route(ep->com.dev, 1422 + ep->com.cm_id->local_addr.sin_addr.s_addr, 1423 + ep->com.cm_id->remote_addr.sin_addr.s_addr, 1424 + ep->com.cm_id->local_addr.sin_port, 1425 + ep->com.cm_id->remote_addr.sin_port, 0); 1426 + if (!rt) { 1427 + pr_err("%s - cannot find route.\n", __func__); 1428 + err = -EHOSTUNREACH; 1429 + goto fail3; 1430 + } 1431 + ep->dst = &rt->dst; 1432 + 1433 + neigh = dst_neigh_lookup(ep->dst, 1434 + &ep->com.cm_id->remote_addr.sin_addr.s_addr); 1435 + /* get a l2t entry */ 1436 + if (neigh->dev->flags & IFF_LOOPBACK) { 1437 + PDBG("%s LOOPBACK\n", __func__); 1438 + pdev = ip_dev_find(&init_net, 1439 + ep->com.cm_id->remote_addr.sin_addr.s_addr); 1440 + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1441 + neigh, pdev, 0); 1442 + pi = (struct port_info *)netdev_priv(pdev); 1443 + ep->mtu = pdev->mtu; 1444 + ep->tx_chan = cxgb4_port_chan(pdev); 1445 + ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; 1446 + dev_put(pdev); 1447 + } else { 1448 + ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, 1449 + neigh, neigh->dev, 0); 1450 + pi = (struct port_info *)netdev_priv(neigh->dev); 1451 + ep->mtu = dst_mtu(ep->dst); 1452 + ep->tx_chan = cxgb4_port_chan(neigh->dev); 1453 + ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 1454 + 0x7F) << 1; 1455 + } 1456 + 1457 + step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan; 1458 + ep->txq_idx = pi->port_id * step; 1459 + ep->ctrlq_idx = pi->port_id; 1460 + step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; 1461 + ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step]; 1462 + 1463 + if (!ep->l2t) { 1464 + pr_err("%s - cannot alloc l2e.\n", __func__); 1465 + err = -ENOMEM; 1466 + goto fail4; 1467 + } 1468 + 1469 + PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 1470 + __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 1471 + ep->l2t->idx); 1472 + 1473 + state_set(&ep->com, CONNECTING); 1474 + ep->tos = 0; 1475 + 1476 + /* send connect request to rnic */ 1477 + err = send_connect(ep); 1478 + if (!err) 1479 + goto out; 1480 + 1481 + cxgb4_l2t_release(ep->l2t); 1482 + fail4: 1483 + dst_release(ep->dst); 1484 + fail3: 1485 + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 1486 + cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 1487 + fail2: 1488 + /* 1489 + * remember to send notification to upper layer. 1490 + * We are in here so the upper layer is not aware that this is 1491 + * re-connect attempt and so, upper layer is still waiting for 1492 + * response of 1st connect request. 1493 + */ 1494 + connect_reply_upcall(ep, -ECONNRESET); 1495 + c4iw_put_ep(&ep->com); 1496 + out: 1497 + return err; 1519 1498 } 1520 1499 1521 1500 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) ··· 1643 1412 return 0; 1644 1413 } 1645 1414 1415 + set_bit(ACT_OPEN_RPL, &ep->com.history); 1416 + 1646 1417 /* 1647 1418 * Log interesting failures. 1648 1419 */ 1649 1420 switch (status) { 1650 1421 case CPL_ERR_CONN_RESET: 1651 1422 case CPL_ERR_CONN_TIMEDOUT: 1423 + break; 1424 + case CPL_ERR_TCAM_FULL: 1425 + if (dev->rdev.lldi.enable_fw_ofld_conn) { 1426 + mutex_lock(&dev->rdev.stats.lock); 1427 + dev->rdev.stats.tcam_full++; 1428 + mutex_unlock(&dev->rdev.stats.lock); 1429 + send_fw_act_open_req(ep, 1430 + GET_TID_TID(GET_AOPEN_ATID( 1431 + ntohl(rpl->atid_status)))); 1432 + return 0; 1433 + } 1434 + break; 1435 + case CPL_ERR_CONN_EXIST: 1436 + if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 1437 + set_bit(ACT_RETRY_INUSE, &ep->com.history); 1438 + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 1439 + atid); 1440 + cxgb4_free_atid(t, atid); 1441 + dst_release(ep->dst); 1442 + cxgb4_l2t_release(ep->l2t); 1443 + c4iw_reconnect(ep); 1444 + return 0; 1445 + } 1652 1446 break; 1653 1447 default: 1654 1448 printk(KERN_INFO MOD "Active open failure - " ··· 1692 1436 if (status && act_open_has_tid(status)) 1693 1437 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1694 1438 1439 + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1695 1440 cxgb4_free_atid(t, atid); 1696 1441 dst_release(ep->dst); 1697 1442 cxgb4_l2t_release(ep->l2t); ··· 1709 1452 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1710 1453 1711 1454 if (!ep) { 1712 - printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1713 - return 0; 1455 + PDBG("%s stid %d lookup failure!\n", __func__, stid); 1456 + goto out; 1714 1457 } 1715 1458 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1716 1459 rpl->status, status2errno(rpl->status)); 1717 1460 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1718 1461 1462 + out: 1719 1463 return 0; 1720 1464 } 1721 1465 ··· 1768 1510 skb_get(skb); 1769 1511 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1770 1512 wscale = compute_wscale(rcv_win); 1771 - opt0 = KEEP_ALIVE(1) | 1513 + opt0 = (nocong ? NO_CONG(1) : 0) | 1514 + KEEP_ALIVE(1) | 1772 1515 DELACK(1) | 1773 1516 WND_SCALE(wscale) | 1774 1517 MSS_IDX(mtu_idx) | 1775 1518 L2T_IDX(ep->l2t->idx) | 1776 1519 TX_CHAN(ep->tx_chan) | 1777 1520 SMAC_SEL(ep->smac_idx) | 1778 - DSCP(ep->tos) | 1521 + DSCP(ep->tos >> 2) | 1779 1522 ULP_MODE(ULP_MODE_TCPDDP) | 1780 1523 RCV_BUFSIZ(rcv_win>>10); 1781 1524 opt2 = RX_CHANNEL(0) | ··· 1788 1529 opt2 |= SACK_EN(1); 1789 1530 if (wscale && enable_tcp_window_scaling) 1790 1531 opt2 |= WND_SCALE_EN(1); 1532 + if (enable_ecn) { 1533 + const struct tcphdr *tcph; 1534 + u32 hlen = ntohl(req->hdr_len); 1535 + 1536 + tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + 1537 + G_IP_HDR_LEN(hlen); 1538 + if (tcph->ece && tcph->cwr) 1539 + opt2 |= CCTRL_ECN(1); 1540 + } 1791 1541 1792 1542 rpl = cplhdr(skb); 1793 1543 INIT_TP_WR(rpl, ep->hwtid); ··· 1913 1645 1914 1646 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1915 1647 { 1916 - struct c4iw_ep *child_ep, *parent_ep; 1648 + struct c4iw_ep *child_ep = NULL, *parent_ep; 1917 1649 struct cpl_pass_accept_req *req = cplhdr(skb); 1918 1650 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1919 1651 struct tid_info *t = dev->rdev.lldi.tids; 1920 1652 unsigned int hwtid = GET_TID(req); 1921 1653 struct dst_entry *dst; 1922 1654 struct rtable *rt; 1923 - __be32 local_ip, peer_ip; 1655 + __be32 local_ip, peer_ip = 0; 1924 1656 __be16 local_port, peer_port; 1925 1657 int err; 1658 + u16 peer_mss = ntohs(req->tcpopt.mss); 1926 1659 1927 1660 parent_ep = lookup_stid(t, stid); 1928 - PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1929 - 1661 + if (!parent_ep) { 1662 + PDBG("%s connect request on invalid stid %d\n", __func__, stid); 1663 + goto reject; 1664 + } 1930 1665 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1666 + 1667 + PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \ 1668 + "rport %d peer_mss %d\n", __func__, parent_ep, hwtid, 1669 + ntohl(local_ip), ntohl(peer_ip), ntohs(local_port), 1670 + ntohs(peer_port), peer_mss); 1931 1671 1932 1672 if (state_read(&parent_ep->com) != LISTEN) { 1933 1673 printk(KERN_ERR "%s - listening ep not in LISTEN\n", ··· 1970 1694 goto reject; 1971 1695 } 1972 1696 1697 + if (peer_mss && child_ep->mtu > (peer_mss + 40)) 1698 + child_ep->mtu = peer_mss + 40; 1699 + 1973 1700 state_set(&child_ep->com, CONNECTING); 1974 1701 child_ep->com.dev = dev; 1975 1702 child_ep->com.cm_id = NULL; ··· 1994 1715 init_timer(&child_ep->timer); 1995 1716 cxgb4_insert_tid(t, child_ep, hwtid); 1996 1717 accept_cr(child_ep, peer_ip, skb, req); 1718 + set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 1997 1719 goto out; 1998 1720 reject: 1999 1721 reject_cr(dev, hwtid, peer_ip, skb); ··· 2014 1734 ep->snd_seq = be32_to_cpu(req->snd_isn); 2015 1735 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2016 1736 1737 + PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 1738 + ntohs(req->tcp_opt)); 1739 + 2017 1740 set_emss(ep, ntohs(req->tcp_opt)); 1741 + insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); 2018 1742 2019 1743 dst_confirm(ep->dst); 2020 1744 state_set(&ep->com, MPA_REQ_WAIT); 2021 1745 start_ep_timer(ep); 2022 1746 send_flowc(ep, skb); 1747 + set_bit(PASS_ESTAB, &ep->com.history); 2023 1748 2024 1749 return 0; 2025 1750 } ··· 2044 1759 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2045 1760 dst_confirm(ep->dst); 2046 1761 1762 + set_bit(PEER_CLOSE, &ep->com.history); 2047 1763 mutex_lock(&ep->com.mutex); 2048 1764 switch (ep->com.state) { 2049 1765 case MPA_REQ_WAIT: ··· 2124 1838 status == CPL_ERR_PERSIST_NEG_ADVICE; 2125 1839 } 2126 1840 2127 - static int c4iw_reconnect(struct c4iw_ep *ep) 2128 - { 2129 - struct rtable *rt; 2130 - int err = 0; 2131 - 2132 - PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 2133 - init_timer(&ep->timer); 2134 - 2135 - /* 2136 - * Allocate an active TID to initiate a TCP connection. 2137 - */ 2138 - ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 2139 - if (ep->atid == -1) { 2140 - printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2141 - err = -ENOMEM; 2142 - goto fail2; 2143 - } 2144 - 2145 - /* find a route */ 2146 - rt = find_route(ep->com.dev, 2147 - ep->com.cm_id->local_addr.sin_addr.s_addr, 2148 - ep->com.cm_id->remote_addr.sin_addr.s_addr, 2149 - ep->com.cm_id->local_addr.sin_port, 2150 - ep->com.cm_id->remote_addr.sin_port, 0); 2151 - if (!rt) { 2152 - printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2153 - err = -EHOSTUNREACH; 2154 - goto fail3; 2155 - } 2156 - ep->dst = &rt->dst; 2157 - 2158 - err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, 2159 - ep->dst, ep->com.dev, false); 2160 - if (err) { 2161 - printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 2162 - goto fail4; 2163 - } 2164 - 2165 - PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2166 - __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2167 - ep->l2t->idx); 2168 - 2169 - state_set(&ep->com, CONNECTING); 2170 - ep->tos = 0; 2171 - 2172 - /* send connect request to rnic */ 2173 - err = send_connect(ep); 2174 - if (!err) 2175 - goto out; 2176 - 2177 - cxgb4_l2t_release(ep->l2t); 2178 - fail4: 2179 - dst_release(ep->dst); 2180 - fail3: 2181 - cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2182 - fail2: 2183 - /* 2184 - * remember to send notification to upper layer. 2185 - * We are in here so the upper layer is not aware that this is 2186 - * re-connect attempt and so, upper layer is still waiting for 2187 - * response of 1st connect request. 2188 - */ 2189 - connect_reply_upcall(ep, -ECONNRESET); 2190 - c4iw_put_ep(&ep->com); 2191 - out: 2192 - return err; 2193 - } 2194 - 2195 1841 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2196 1842 { 2197 1843 struct cpl_abort_req_rss *req = cplhdr(skb); ··· 2144 1926 } 2145 1927 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2146 1928 ep->com.state); 1929 + set_bit(PEER_ABORT, &ep->com.history); 2147 1930 2148 1931 /* 2149 1932 * Wake up any threads in rdma_init() or rdma_fini(). ··· 2359 2140 c4iw_put_ep(&ep->com); 2360 2141 return -ECONNRESET; 2361 2142 } 2143 + set_bit(ULP_REJECT, &ep->com.history); 2362 2144 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2363 2145 if (mpa_rev == 0) 2364 2146 abort_connection(ep, NULL, GFP_KERNEL); ··· 2389 2169 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2390 2170 BUG_ON(!qp); 2391 2171 2172 + set_bit(ULP_ACCEPT, &ep->com.history); 2392 2173 if ((conn_param->ord > c4iw_max_read_depth) || 2393 2174 (conn_param->ird > c4iw_max_read_depth)) { 2394 2175 abort_connection(ep, NULL, GFP_KERNEL); ··· 2513 2292 err = -ENOMEM; 2514 2293 goto fail2; 2515 2294 } 2295 + insert_handle(dev, &dev->atid_idr, ep, ep->atid); 2516 2296 2517 2297 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 2518 2298 ntohl(cm_id->local_addr.sin_addr.s_addr), ··· 2559 2337 fail4: 2560 2338 dst_release(ep->dst); 2561 2339 fail3: 2340 + remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2562 2341 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2563 2342 fail2: 2564 2343 cm_id->rem_ref(cm_id); ··· 2573 2350 int err = 0; 2574 2351 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2575 2352 struct c4iw_listen_ep *ep; 2576 - 2577 2353 2578 2354 might_sleep(); 2579 2355 ··· 2592 2370 /* 2593 2371 * Allocate a server TID. 2594 2372 */ 2595 - ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2373 + if (dev->rdev.lldi.enable_fw_ofld_conn) 2374 + ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep); 2375 + else 2376 + ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2377 + 2596 2378 if (ep->stid == -1) { 2597 2379 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2598 2380 err = -ENOMEM; 2599 2381 goto fail2; 2600 2382 } 2601 - 2383 + insert_handle(dev, &dev->stid_idr, ep, ep->stid); 2602 2384 state_set(&ep->com, LISTEN); 2603 - c4iw_init_wr_wait(&ep->com.wr_wait); 2604 - err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2605 - ep->com.local_addr.sin_addr.s_addr, 2606 - ep->com.local_addr.sin_port, 2607 - ep->com.dev->rdev.lldi.rxq_ids[0]); 2608 - if (err) 2609 - goto fail3; 2610 - 2611 - /* wait for pass_open_rpl */ 2612 - err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2613 - __func__); 2385 + if (dev->rdev.lldi.enable_fw_ofld_conn) { 2386 + do { 2387 + err = cxgb4_create_server_filter( 2388 + ep->com.dev->rdev.lldi.ports[0], ep->stid, 2389 + ep->com.local_addr.sin_addr.s_addr, 2390 + ep->com.local_addr.sin_port, 2391 + 0, 2392 + ep->com.dev->rdev.lldi.rxq_ids[0], 2393 + 0, 2394 + 0); 2395 + if (err == -EBUSY) { 2396 + set_current_state(TASK_UNINTERRUPTIBLE); 2397 + schedule_timeout(usecs_to_jiffies(100)); 2398 + } 2399 + } while (err == -EBUSY); 2400 + } else { 2401 + c4iw_init_wr_wait(&ep->com.wr_wait); 2402 + err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 2403 + ep->stid, ep->com.local_addr.sin_addr.s_addr, 2404 + ep->com.local_addr.sin_port, 2405 + 0, 2406 + ep->com.dev->rdev.lldi.rxq_ids[0]); 2407 + if (!err) 2408 + err = c4iw_wait_for_reply(&ep->com.dev->rdev, 2409 + &ep->com.wr_wait, 2410 + 0, 0, __func__); 2411 + } 2614 2412 if (!err) { 2615 2413 cm_id->provider_data = ep; 2616 2414 goto out; 2617 2415 } 2618 - fail3: 2416 + pr_err("%s cxgb4_create_server/filter failed err %d " \ 2417 + "stid %d laddr %08x lport %d\n", \ 2418 + __func__, err, ep->stid, 2419 + ntohl(ep->com.local_addr.sin_addr.s_addr), 2420 + ntohs(ep->com.local_addr.sin_port)); 2619 2421 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2620 2422 fail2: 2621 2423 cm_id->rem_ref(cm_id); ··· 2658 2412 2659 2413 might_sleep(); 2660 2414 state_set(&ep->com, DEAD); 2661 - c4iw_init_wr_wait(&ep->com.wr_wait); 2662 - err = listen_stop(ep); 2663 - if (err) 2664 - goto done; 2665 - err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2666 - __func__); 2415 + if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) { 2416 + err = cxgb4_remove_server_filter( 2417 + ep->com.dev->rdev.lldi.ports[0], ep->stid, 2418 + ep->com.dev->rdev.lldi.rxq_ids[0], 0); 2419 + } else { 2420 + c4iw_init_wr_wait(&ep->com.wr_wait); 2421 + err = listen_stop(ep); 2422 + if (err) 2423 + goto done; 2424 + err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 2425 + 0, 0, __func__); 2426 + } 2427 + remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 2667 2428 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2668 2429 done: 2669 2430 cm_id->rem_ref(cm_id); ··· 2734 2481 2735 2482 if (close) { 2736 2483 if (abrupt) { 2484 + set_bit(EP_DISC_ABORT, &ep->com.history); 2737 2485 close_complete_upcall(ep); 2738 2486 ret = send_abort(ep, NULL, gfp); 2739 - } else 2487 + } else { 2488 + set_bit(EP_DISC_CLOSE, &ep->com.history); 2740 2489 ret = send_halfclose(ep, gfp); 2490 + } 2741 2491 if (ret) 2742 2492 fatal = 1; 2743 2493 } ··· 2750 2494 return ret; 2751 2495 } 2752 2496 2753 - static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) 2497 + static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 2498 + struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 2499 + { 2500 + struct c4iw_ep *ep; 2501 + int atid = be32_to_cpu(req->tid); 2502 + 2503 + ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); 2504 + if (!ep) 2505 + return; 2506 + 2507 + switch (req->retval) { 2508 + case FW_ENOMEM: 2509 + set_bit(ACT_RETRY_NOMEM, &ep->com.history); 2510 + if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2511 + send_fw_act_open_req(ep, atid); 2512 + return; 2513 + } 2514 + case FW_EADDRINUSE: 2515 + set_bit(ACT_RETRY_INUSE, &ep->com.history); 2516 + if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2517 + send_fw_act_open_req(ep, atid); 2518 + return; 2519 + } 2520 + break; 2521 + default: 2522 + pr_info("%s unexpected ofld conn wr retval %d\n", 2523 + __func__, req->retval); 2524 + break; 2525 + } 2526 + pr_err("active ofld_connect_wr failure %d atid %d\n", 2527 + req->retval, atid); 2528 + mutex_lock(&dev->rdev.stats.lock); 2529 + dev->rdev.stats.act_ofld_conn_fails++; 2530 + mutex_unlock(&dev->rdev.stats.lock); 2531 + connect_reply_upcall(ep, status2errno(req->retval)); 2532 + state_set(&ep->com, DEAD); 2533 + remove_handle(dev, &dev->atid_idr, atid); 2534 + cxgb4_free_atid(dev->rdev.lldi.tids, atid); 2535 + dst_release(ep->dst); 2536 + cxgb4_l2t_release(ep->l2t); 2537 + c4iw_put_ep(&ep->com); 2538 + } 2539 + 2540 + static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 2541 + struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 2542 + { 2543 + struct sk_buff *rpl_skb; 2544 + struct cpl_pass_accept_req *cpl; 2545 + int ret; 2546 + 2547 + rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie); 2548 + BUG_ON(!rpl_skb); 2549 + if (req->retval) { 2550 + PDBG("%s passive open failure %d\n", __func__, req->retval); 2551 + mutex_lock(&dev->rdev.stats.lock); 2552 + dev->rdev.stats.pas_ofld_conn_fails++; 2553 + mutex_unlock(&dev->rdev.stats.lock); 2554 + kfree_skb(rpl_skb); 2555 + } else { 2556 + cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 2557 + OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 2558 + htonl(req->tid))); 2559 + ret = pass_accept_req(dev, rpl_skb); 2560 + if (!ret) 2561 + kfree_skb(rpl_skb); 2562 + } 2563 + return; 2564 + } 2565 + 2566 + static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 2754 2567 { 2755 2568 struct cpl_fw6_msg *rpl = cplhdr(skb); 2756 - c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2569 + struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 2570 + 2571 + switch (rpl->type) { 2572 + case FW6_TYPE_CQE: 2573 + c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2574 + break; 2575 + case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 2576 + req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 2577 + switch (req->t_state) { 2578 + case TCP_SYN_SENT: 2579 + active_ofld_conn_reply(dev, skb, req); 2580 + break; 2581 + case TCP_SYN_RECV: 2582 + passive_ofld_conn_reply(dev, skb, req); 2583 + break; 2584 + default: 2585 + pr_err("%s unexpected ofld conn wr state %d\n", 2586 + __func__, req->t_state); 2587 + break; 2588 + } 2589 + break; 2590 + } 2591 + return 0; 2592 + } 2593 + 2594 + static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 2595 + { 2596 + u32 l2info; 2597 + u16 vlantag, len, hdr_len; 2598 + u8 intf; 2599 + struct cpl_rx_pkt *cpl = cplhdr(skb); 2600 + struct cpl_pass_accept_req *req; 2601 + struct tcp_options_received tmp_opt; 2602 + 2603 + /* Store values from cpl_rx_pkt in temporary location. */ 2604 + vlantag = cpl->vlan; 2605 + len = cpl->len; 2606 + l2info = cpl->l2info; 2607 + hdr_len = cpl->hdr_len; 2608 + intf = cpl->iff; 2609 + 2610 + __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 2611 + 2612 + /* 2613 + * We need to parse the TCP options from SYN packet. 2614 + * to generate cpl_pass_accept_req. 2615 + */ 2616 + memset(&tmp_opt, 0, sizeof(tmp_opt)); 2617 + tcp_clear_options(&tmp_opt); 2618 + tcp_parse_options(skb, &tmp_opt, 0, 0, NULL); 2619 + 2620 + req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 2621 + memset(req, 0, sizeof(*req)); 2622 + req->l2info = cpu_to_be16(V_SYN_INTF(intf) | 2623 + V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) | 2624 + F_SYN_XACT_MATCH); 2625 + req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) | 2626 + V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) | 2627 + V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) | 2628 + V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info)))); 2629 + req->vlan = vlantag; 2630 + req->len = len; 2631 + req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 2632 + PASS_OPEN_TOS(tos)); 2633 + req->tcpopt.mss = htons(tmp_opt.mss_clamp); 2634 + if (tmp_opt.wscale_ok) 2635 + req->tcpopt.wsf = tmp_opt.snd_wscale; 2636 + req->tcpopt.tstamp = tmp_opt.saw_tstamp; 2637 + if (tmp_opt.sack_ok) 2638 + req->tcpopt.sack = 1; 2639 + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 2640 + return; 2641 + } 2642 + 2643 + static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 2644 + __be32 laddr, __be16 lport, 2645 + __be32 raddr, __be16 rport, 2646 + u32 rcv_isn, u32 filter, u16 window, 2647 + u32 rss_qid, u8 port_id) 2648 + { 2649 + struct sk_buff *req_skb; 2650 + struct fw_ofld_connection_wr *req; 2651 + struct cpl_pass_accept_req *cpl = cplhdr(skb); 2652 + 2653 + req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 2654 + req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 2655 + memset(req, 0, sizeof(*req)); 2656 + req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 2657 + req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 2658 + req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 2659 + req->le.filter = filter; 2660 + req->le.lport = lport; 2661 + req->le.pport = rport; 2662 + req->le.u.ipv4.lip = laddr; 2663 + req->le.u.ipv4.pip = raddr; 2664 + req->tcb.rcv_nxt = htonl(rcv_isn + 1); 2665 + req->tcb.rcv_adv = htons(window); 2666 + req->tcb.t_state_to_astid = 2667 + htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | 2668 + V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | 2669 + V_FW_OFLD_CONNECTION_WR_ASTID( 2670 + GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 2671 + 2672 + /* 2673 + * We store the qid in opt2 which will be used by the firmware 2674 + * to send us the wr response. 2675 + */ 2676 + req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 2677 + 2678 + /* 2679 + * We initialize the MSS index in TCB to 0xF. 2680 + * So that when driver sends cpl_pass_accept_rpl 2681 + * TCB picks up the correct value. If this was 0 2682 + * TP will ignore any value > 0 for MSS index. 2683 + */ 2684 + req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 2685 + req->cookie = cpu_to_be64((u64)skb); 2686 + 2687 + set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 2688 + cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 2689 + } 2690 + 2691 + /* 2692 + * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 2693 + * messages when a filter is being used instead of server to 2694 + * redirect a syn packet. When packets hit filter they are redirected 2695 + * to the offload queue and driver tries to establish the connection 2696 + * using firmware work request. 2697 + */ 2698 + static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 2699 + { 2700 + int stid; 2701 + unsigned int filter; 2702 + struct ethhdr *eh = NULL; 2703 + struct vlan_ethhdr *vlan_eh = NULL; 2704 + struct iphdr *iph; 2705 + struct tcphdr *tcph; 2706 + struct rss_header *rss = (void *)skb->data; 2707 + struct cpl_rx_pkt *cpl = (void *)skb->data; 2708 + struct cpl_pass_accept_req *req = (void *)(rss + 1); 2709 + struct l2t_entry *e; 2710 + struct dst_entry *dst; 2711 + struct rtable *rt; 2712 + struct c4iw_ep *lep; 2713 + u16 window; 2714 + struct port_info *pi; 2715 + struct net_device *pdev; 2716 + u16 rss_qid; 2717 + int step; 2718 + u32 tx_chan; 2719 + struct neighbour *neigh; 2720 + 2721 + /* Drop all non-SYN packets */ 2722 + if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) 2723 + goto reject; 2724 + 2725 + /* 2726 + * Drop all packets which did not hit the filter. 2727 + * Unlikely to happen. 2728 + */ 2729 + if (!(rss->filter_hit && rss->filter_tid)) 2730 + goto reject; 2731 + 2732 + /* 2733 + * Calculate the server tid from filter hit index from cpl_rx_pkt. 2734 + */ 2735 + stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base 2736 + + dev->rdev.lldi.tids->nstids; 2737 + 2738 + lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); 2739 + if (!lep) { 2740 + PDBG("%s connect request on invalid stid %d\n", __func__, stid); 2741 + goto reject; 2742 + } 2743 + 2744 + if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { 2745 + eh = (struct ethhdr *)(req + 1); 2746 + iph = (struct iphdr *)(eh + 1); 2747 + } else { 2748 + vlan_eh = (struct vlan_ethhdr *)(req + 1); 2749 + iph = (struct iphdr *)(vlan_eh + 1); 2750 + skb->vlan_tci = ntohs(cpl->vlan); 2751 + } 2752 + 2753 + if (iph->version != 0x4) 2754 + goto reject; 2755 + 2756 + tcph = (struct tcphdr *)(iph + 1); 2757 + skb_set_network_header(skb, (void *)iph - (void *)rss); 2758 + skb_set_transport_header(skb, (void *)tcph - (void *)rss); 2759 + skb_get(skb); 2760 + 2761 + PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 2762 + ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 2763 + ntohs(tcph->source), iph->tos); 2764 + 2765 + rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 2766 + iph->tos); 2767 + if (!rt) { 2768 + pr_err("%s - failed to find dst entry!\n", 2769 + __func__); 2770 + goto reject; 2771 + } 2772 + dst = &rt->dst; 2773 + neigh = dst_neigh_lookup_skb(dst, skb); 2774 + 2775 + if (neigh->dev->flags & IFF_LOOPBACK) { 2776 + pdev = ip_dev_find(&init_net, iph->daddr); 2777 + e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 2778 + pdev, 0); 2779 + pi = (struct port_info *)netdev_priv(pdev); 2780 + tx_chan = cxgb4_port_chan(pdev); 2781 + dev_put(pdev); 2782 + } else { 2783 + e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 2784 + neigh->dev, 0); 2785 + pi = (struct port_info *)netdev_priv(neigh->dev); 2786 + tx_chan = cxgb4_port_chan(neigh->dev); 2787 + } 2788 + if (!e) { 2789 + pr_err("%s - failed to allocate l2t entry!\n", 2790 + __func__); 2791 + goto free_dst; 2792 + } 2793 + 2794 + step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 2795 + rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 2796 + window = htons(tcph->window); 2797 + 2798 + /* Calcuate filter portion for LE region. */ 2799 + filter = cpu_to_be32(select_ntuple(dev, dst, e)); 2800 + 2801 + /* 2802 + * Synthesize the cpl_pass_accept_req. We have everything except the 2803 + * TID. Once firmware sends a reply with TID we update the TID field 2804 + * in cpl and pass it through the regular cpl_pass_accept_req path. 2805 + */ 2806 + build_cpl_pass_accept_req(skb, stid, iph->tos); 2807 + send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 2808 + tcph->source, ntohl(tcph->seq), filter, window, 2809 + rss_qid, pi->port_id); 2810 + cxgb4_l2t_release(e); 2811 + free_dst: 2812 + dst_release(dst); 2813 + reject: 2757 2814 return 0; 2758 2815 } 2759 2816 ··· 3089 2520 [CPL_CLOSE_CON_RPL] = close_con_rpl, 3090 2521 [CPL_RDMA_TERMINATE] = terminate, 3091 2522 [CPL_FW4_ACK] = fw4_ack, 3092 - [CPL_FW6_MSG] = async_event 2523 + [CPL_FW6_MSG] = deferred_fw6_msg, 2524 + [CPL_RX_PKT] = rx_pkt 3093 2525 }; 3094 2526 3095 2527 static void process_timeout(struct c4iw_ep *ep) ··· 3101 2531 mutex_lock(&ep->com.mutex); 3102 2532 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3103 2533 ep->com.state); 2534 + set_bit(TIMEDOUT, &ep->com.history); 3104 2535 switch (ep->com.state) { 3105 2536 case MPA_REQ_SENT: 3106 2537 __state_set(&ep->com, ABORTING); ··· 3222 2651 PDBG("%s type %u\n", __func__, rpl->type); 3223 2652 3224 2653 switch (rpl->type) { 3225 - case 1: 2654 + case FW6_TYPE_WR_RPL: 3226 2655 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 3227 2656 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 3228 2657 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); ··· 3230 2659 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 3231 2660 kfree_skb(skb); 3232 2661 break; 3233 - case 2: 2662 + case FW6_TYPE_CQE: 2663 + case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3234 2664 sched(dev, skb); 3235 2665 break; 3236 2666 default: ··· 3294 2722 [CPL_RDMA_TERMINATE] = sched, 3295 2723 [CPL_FW4_ACK] = sched, 3296 2724 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3297 - [CPL_FW6_MSG] = fw6_msg 2725 + [CPL_FW6_MSG] = fw6_msg, 2726 + [CPL_RX_PKT] = sched 3298 2727 }; 3299 2728 3300 2729 int __init c4iw_cm_init(void)
+204 -6
drivers/infiniband/hw/cxgb4/device.c
··· 279 279 seq_printf(seq, " DB State: %s Transitions %llu\n", 280 280 db_state_str[dev->db_state], 281 281 dev->rdev.stats.db_state_transitions); 282 + seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 283 + seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", 284 + dev->rdev.stats.act_ofld_conn_fails); 285 + seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 286 + dev->rdev.stats.pas_ofld_conn_fails); 282 287 return 0; 283 288 } 284 289 ··· 314 309 dev->rdev.stats.db_empty = 0; 315 310 dev->rdev.stats.db_drop = 0; 316 311 dev->rdev.stats.db_state_transitions = 0; 312 + dev->rdev.stats.tcam_full = 0; 313 + dev->rdev.stats.act_ofld_conn_fails = 0; 314 + dev->rdev.stats.pas_ofld_conn_fails = 0; 317 315 mutex_unlock(&dev->rdev.stats.lock); 318 316 return count; 319 317 } ··· 328 320 .read = seq_read, 329 321 .llseek = seq_lseek, 330 322 .write = stats_clear, 323 + }; 324 + 325 + static int dump_ep(int id, void *p, void *data) 326 + { 327 + struct c4iw_ep *ep = p; 328 + struct c4iw_debugfs_data *epd = data; 329 + int space; 330 + int cc; 331 + 332 + space = epd->bufsize - epd->pos - 1; 333 + if (space == 0) 334 + return 1; 335 + 336 + cc = snprintf(epd->buf + epd->pos, space, 337 + "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx " 338 + "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n", 339 + ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, 340 + ep->com.flags, ep->com.history, ep->hwtid, ep->atid, 341 + &ep->com.local_addr.sin_addr.s_addr, 342 + ntohs(ep->com.local_addr.sin_port), 343 + &ep->com.remote_addr.sin_addr.s_addr, 344 + ntohs(ep->com.remote_addr.sin_port)); 345 + if (cc < space) 346 + epd->pos += cc; 347 + return 0; 348 + } 349 + 350 + static int dump_listen_ep(int id, void *p, void *data) 351 + { 352 + struct c4iw_listen_ep *ep = p; 353 + struct c4iw_debugfs_data *epd = data; 354 + int space; 355 + int cc; 356 + 357 + space = epd->bufsize - epd->pos - 1; 358 + if (space == 0) 359 + return 1; 360 + 361 + cc = snprintf(epd->buf + epd->pos, space, 362 + "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d " 363 + "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state, 364 + ep->com.flags, ep->stid, ep->backlog, 365 + &ep->com.local_addr.sin_addr.s_addr, 366 + ntohs(ep->com.local_addr.sin_port)); 367 + if (cc < space) 368 + epd->pos += cc; 369 + return 0; 370 + } 371 + 372 + static int ep_release(struct inode *inode, struct file *file) 373 + { 374 + struct c4iw_debugfs_data *epd = file->private_data; 375 + if (!epd) { 376 + pr_info("%s null qpd?\n", __func__); 377 + return 0; 378 + } 379 + vfree(epd->buf); 380 + kfree(epd); 381 + return 0; 382 + } 383 + 384 + static int ep_open(struct inode *inode, struct file *file) 385 + { 386 + struct c4iw_debugfs_data *epd; 387 + int ret = 0; 388 + int count = 1; 389 + 390 + epd = kmalloc(sizeof(*epd), GFP_KERNEL); 391 + if (!epd) { 392 + ret = -ENOMEM; 393 + goto out; 394 + } 395 + epd->devp = inode->i_private; 396 + epd->pos = 0; 397 + 398 + spin_lock_irq(&epd->devp->lock); 399 + idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); 400 + idr_for_each(&epd->devp->atid_idr, count_idrs, &count); 401 + idr_for_each(&epd->devp->stid_idr, count_idrs, &count); 402 + spin_unlock_irq(&epd->devp->lock); 403 + 404 + epd->bufsize = count * 160; 405 + epd->buf = vmalloc(epd->bufsize); 406 + if (!epd->buf) { 407 + ret = -ENOMEM; 408 + goto err1; 409 + } 410 + 411 + spin_lock_irq(&epd->devp->lock); 412 + idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); 413 + idr_for_each(&epd->devp->atid_idr, dump_ep, epd); 414 + idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); 415 + spin_unlock_irq(&epd->devp->lock); 416 + 417 + file->private_data = epd; 418 + goto out; 419 + err1: 420 + kfree(epd); 421 + out: 422 + return ret; 423 + } 424 + 425 + static const struct file_operations ep_debugfs_fops = { 426 + .owner = THIS_MODULE, 427 + .open = ep_open, 428 + .release = ep_release, 429 + .read = debugfs_read, 331 430 }; 332 431 333 432 static int setup_debugfs(struct c4iw_dev *devp) ··· 456 341 457 342 de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root, 458 343 (void *)devp, &stats_debugfs_fops); 344 + if (de && de->d_inode) 345 + de->d_inode->i_size = 4096; 346 + 347 + de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root, 348 + (void *)devp, &ep_debugfs_fops); 459 349 if (de && de->d_inode) 460 350 de->d_inode->i_size = 4096; 461 351 ··· 595 475 idr_destroy(&ctx->dev->cqidr); 596 476 idr_destroy(&ctx->dev->qpidr); 597 477 idr_destroy(&ctx->dev->mmidr); 478 + idr_destroy(&ctx->dev->hwtid_idr); 479 + idr_destroy(&ctx->dev->stid_idr); 480 + idr_destroy(&ctx->dev->atid_idr); 598 481 iounmap(ctx->dev->rdev.oc_mw_kva); 599 482 ib_dealloc_device(&ctx->dev->ibdev); 600 483 ctx->dev = NULL; ··· 655 532 idr_init(&devp->cqidr); 656 533 idr_init(&devp->qpidr); 657 534 idr_init(&devp->mmidr); 535 + idr_init(&devp->hwtid_idr); 536 + idr_init(&devp->stid_idr); 537 + idr_init(&devp->atid_idr); 658 538 spin_lock_init(&devp->lock); 659 539 mutex_init(&devp->rdev.stats.lock); 660 540 mutex_init(&devp->db_mutex); ··· 703 577 return ctx; 704 578 } 705 579 580 + static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, 581 + const __be64 *rsp, 582 + u32 pktshift) 583 + { 584 + struct sk_buff *skb; 585 + 586 + /* 587 + * Allocate space for cpl_pass_accept_req which will be synthesized by 588 + * driver. Once the driver synthesizes the request the skb will go 589 + * through the regular cpl_pass_accept_req processing. 590 + * The math here assumes sizeof cpl_pass_accept_req >= sizeof 591 + * cpl_rx_pkt. 592 + */ 593 + skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + 594 + sizeof(struct rss_header) - pktshift, GFP_ATOMIC); 595 + if (unlikely(!skb)) 596 + return NULL; 597 + 598 + __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + 599 + sizeof(struct rss_header) - pktshift); 600 + 601 + /* 602 + * This skb will contain: 603 + * rss_header from the rspq descriptor (1 flit) 604 + * cpl_rx_pkt struct from the rspq descriptor (2 flits) 605 + * space for the difference between the size of an 606 + * rx_pkt and pass_accept_req cpl (1 flit) 607 + * the packet data from the gl 608 + */ 609 + skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + 610 + sizeof(struct rss_header)); 611 + skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + 612 + sizeof(struct cpl_pass_accept_req), 613 + gl->va + pktshift, 614 + gl->tot_len - pktshift); 615 + return skb; 616 + } 617 + 618 + static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, 619 + const __be64 *rsp) 620 + { 621 + unsigned int opcode = *(u8 *)rsp; 622 + struct sk_buff *skb; 623 + 624 + if (opcode != CPL_RX_PKT) 625 + goto out; 626 + 627 + skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); 628 + if (skb == NULL) 629 + goto out; 630 + 631 + if (c4iw_handlers[opcode] == NULL) { 632 + pr_info("%s no handler opcode 0x%x...\n", __func__, 633 + opcode); 634 + kfree_skb(skb); 635 + goto out; 636 + } 637 + c4iw_handlers[opcode](dev, skb); 638 + return 1; 639 + out: 640 + return 0; 641 + } 642 + 706 643 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 707 644 const struct pkt_gl *gl) 708 645 { 709 646 struct uld_ctx *ctx = handle; 710 647 struct c4iw_dev *dev = ctx->dev; 711 648 struct sk_buff *skb; 712 - const struct cpl_act_establish *rpl; 713 - unsigned int opcode; 649 + u8 opcode; 714 650 715 651 if (gl == NULL) { 716 652 /* omit RSS and rsp_ctrl at end of descriptor */ ··· 789 601 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 790 602 c4iw_ev_handler(dev, qid); 791 603 return 0; 604 + } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { 605 + if (recv_rx_pkt(dev, gl, rsp)) 606 + return 0; 607 + 608 + pr_info("%s: unexpected FL contents at %p, " \ 609 + "RSS %#llx, FL %#llx, len %u\n", 610 + pci_name(ctx->lldi.pdev), gl->va, 611 + (unsigned long long)be64_to_cpu(*rsp), 612 + (unsigned long long)be64_to_cpu(*(u64 *)gl->va), 613 + gl->tot_len); 614 + 615 + return 0; 792 616 } else { 793 617 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 794 618 if (unlikely(!skb)) 795 619 goto nomem; 796 620 } 797 621 798 - rpl = cplhdr(skb); 799 - opcode = rpl->ot.opcode; 800 - 622 + opcode = *(u8 *)rsp; 801 623 if (c4iw_handlers[opcode]) 802 624 c4iw_handlers[opcode](dev, skb); 803 625 else 804 - printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, 626 + pr_info("%s no handler opcode 0x%x...\n", __func__, 805 627 opcode); 806 628 807 629 return 0;
+33
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 130 130 u64 db_empty; 131 131 u64 db_drop; 132 132 u64 db_state_transitions; 133 + u64 tcam_full; 134 + u64 act_ofld_conn_fails; 135 + u64 pas_ofld_conn_fails; 133 136 }; 134 137 135 138 struct c4iw_rdev { ··· 226 223 struct dentry *debugfs_root; 227 224 enum db_state db_state; 228 225 int qpcnt; 226 + struct idr hwtid_idr; 227 + struct idr atid_idr; 228 + struct idr stid_idr; 229 229 }; 230 230 231 231 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) ··· 718 712 CLOSE_SENT = 3, 719 713 }; 720 714 715 + enum c4iw_ep_history { 716 + ACT_OPEN_REQ = 0, 717 + ACT_OFLD_CONN = 1, 718 + ACT_OPEN_RPL = 2, 719 + ACT_ESTAB = 3, 720 + PASS_ACCEPT_REQ = 4, 721 + PASS_ESTAB = 5, 722 + ABORT_UPCALL = 6, 723 + ESTAB_UPCALL = 7, 724 + CLOSE_UPCALL = 8, 725 + ULP_ACCEPT = 9, 726 + ULP_REJECT = 10, 727 + TIMEDOUT = 11, 728 + PEER_ABORT = 12, 729 + PEER_CLOSE = 13, 730 + CONNREQ_UPCALL = 14, 731 + ABORT_CONN = 15, 732 + DISCONN_UPCALL = 16, 733 + EP_DISC_CLOSE = 17, 734 + EP_DISC_ABORT = 18, 735 + CONN_RPL_UPCALL = 19, 736 + ACT_RETRY_NOMEM = 20, 737 + ACT_RETRY_INUSE = 21 738 + }; 739 + 721 740 struct c4iw_ep_common { 722 741 struct iw_cm_id *cm_id; 723 742 struct c4iw_qp *qp; ··· 754 723 struct sockaddr_in remote_addr; 755 724 struct c4iw_wr_wait wr_wait; 756 725 unsigned long flags; 726 + unsigned long history; 757 727 }; 758 728 759 729 struct c4iw_listen_ep { ··· 792 760 u8 tos; 793 761 u8 retry_with_mpa_v1; 794 762 u8 tried_with_mpa_v1; 763 + unsigned int retry_count; 795 764 }; 796 765 797 766 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
+3
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 752 752 dev->trans_start = jiffies; 753 753 ++tx->tx_head; 754 754 755 + skb_orphan(skb); 756 + skb_dst_drop(skb); 757 + 755 758 if (++priv->tx_outstanding == ipoib_sendq_size) { 756 759 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", 757 760 tx->qp->qp_num);
+2 -1
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 615 615 616 616 address->last_send = priv->tx_head; 617 617 ++priv->tx_head; 618 - skb_orphan(skb); 619 618 619 + skb_orphan(skb); 620 + skb_dst_drop(skb); 620 621 } 621 622 622 623 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+136
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 35 35 #ifndef __CXGB4_H__ 36 36 #define __CXGB4_H__ 37 37 38 + #include "t4_hw.h" 39 + 38 40 #include <linux/bitops.h> 39 41 #include <linux/cache.h> 40 42 #include <linux/interrupt.h> ··· 214 212 struct tp_params { 215 213 unsigned int ntxchan; /* # of Tx channels */ 216 214 unsigned int tre; /* log2 of core clocks per TP tick */ 215 + unsigned short tx_modq_map; /* TX modulation scheduler queue to */ 216 + /* channel map */ 217 217 218 218 uint32_t dack_re; /* DACK timer resolution */ 219 219 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ ··· 530 526 struct net_device *port[MAX_NPORTS]; 531 527 u8 chan_map[NCHAN]; /* channel -> port map */ 532 528 529 + u32 filter_mode; 533 530 unsigned int l2t_start; 534 531 unsigned int l2t_end; 535 532 struct l2t_data *l2t; ··· 548 543 struct dentry *debugfs_root; 549 544 550 545 spinlock_t stats_lock; 546 + }; 547 + 548 + /* Defined bit width of user definable filter tuples 549 + */ 550 + #define ETHTYPE_BITWIDTH 16 551 + #define FRAG_BITWIDTH 1 552 + #define MACIDX_BITWIDTH 9 553 + #define FCOE_BITWIDTH 1 554 + #define IPORT_BITWIDTH 3 555 + #define MATCHTYPE_BITWIDTH 3 556 + #define PROTO_BITWIDTH 8 557 + #define TOS_BITWIDTH 8 558 + #define PF_BITWIDTH 8 559 + #define VF_BITWIDTH 8 560 + #define IVLAN_BITWIDTH 16 561 + #define OVLAN_BITWIDTH 16 562 + 563 + /* Filter matching rules. These consist of a set of ingress packet field 564 + * (value, mask) tuples. The associated ingress packet field matches the 565 + * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field 566 + * rule can be constructed by specifying a tuple of (0, 0).) A filter rule 567 + * matches an ingress packet when all of the individual individual field 568 + * matching rules are true. 569 + * 570 + * Partial field masks are always valid, however, while it may be easy to 571 + * understand their meanings for some fields (e.g. IP address to match a 572 + * subnet), for others making sensible partial masks is less intuitive (e.g. 573 + * MPS match type) ... 574 + * 575 + * Most of the following data structures are modeled on T4 capabilities. 576 + * Drivers for earlier chips use the subsets which make sense for those chips. 577 + * We really need to come up with a hardware-independent mechanism to 578 + * represent hardware filter capabilities ... 579 + */ 580 + struct ch_filter_tuple { 581 + /* Compressed header matching field rules. The TP_VLAN_PRI_MAP 582 + * register selects which of these fields will participate in the 583 + * filter match rules -- up to a maximum of 36 bits. Because 584 + * TP_VLAN_PRI_MAP is a global register, all filters must use the same 585 + * set of fields. 586 + */ 587 + uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ 588 + uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ 589 + uint32_t ivlan_vld:1; /* inner VLAN valid */ 590 + uint32_t ovlan_vld:1; /* outer VLAN valid */ 591 + uint32_t pfvf_vld:1; /* PF/VF valid */ 592 + uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ 593 + uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ 594 + uint32_t iport:IPORT_BITWIDTH; /* ingress port */ 595 + uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ 596 + uint32_t proto:PROTO_BITWIDTH; /* protocol type */ 597 + uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ 598 + uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ 599 + uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ 600 + uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ 601 + uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ 602 + 603 + /* Uncompressed header matching field rules. These are always 604 + * available for field rules. 605 + */ 606 + uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ 607 + uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ 608 + uint16_t lport; /* local port */ 609 + uint16_t fport; /* foreign port */ 610 + }; 611 + 612 + /* A filter ioctl command. 613 + */ 614 + struct ch_filter_specification { 615 + /* Administrative fields for filter. 616 + */ 617 + uint32_t hitcnts:1; /* count filter hits in TCB */ 618 + uint32_t prio:1; /* filter has priority over active/server */ 619 + 620 + /* Fundamental filter typing. This is the one element of filter 621 + * matching that doesn't exist as a (value, mask) tuple. 622 + */ 623 + uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ 624 + 625 + /* Packet dispatch information. Ingress packets which match the 626 + * filter rules will be dropped, passed to the host or switched back 627 + * out as egress packets. 628 + */ 629 + uint32_t action:2; /* drop, pass, switch */ 630 + 631 + uint32_t rpttid:1; /* report TID in RSS hash field */ 632 + 633 + uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ 634 + uint32_t iq:10; /* ingress queue */ 635 + 636 + uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */ 637 + uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ 638 + /* 1 => TCB contains IQ ID */ 639 + 640 + /* Switch proxy/rewrite fields. An ingress packet which matches a 641 + * filter with "switch" set will be looped back out as an egress 642 + * packet -- potentially with some Ethernet header rewriting. 643 + */ 644 + uint32_t eport:2; /* egress port to switch packet out */ 645 + uint32_t newdmac:1; /* rewrite destination MAC address */ 646 + uint32_t newsmac:1; /* rewrite source MAC address */ 647 + uint32_t newvlan:2; /* rewrite VLAN Tag */ 648 + uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ 649 + uint8_t smac[ETH_ALEN]; /* new source MAC address */ 650 + uint16_t vlan; /* VLAN Tag to insert */ 651 + 652 + /* Filter rule value/mask pairs. 653 + */ 654 + struct ch_filter_tuple val; 655 + struct ch_filter_tuple mask; 656 + }; 657 + 658 + enum { 659 + FILTER_PASS = 0, /* default */ 660 + FILTER_DROP, 661 + FILTER_SWITCH 662 + }; 663 + 664 + enum { 665 + VLAN_NOCHANGE = 0, /* default */ 666 + VLAN_REMOVE, 667 + VLAN_INSERT, 668 + VLAN_REWRITE 551 669 }; 552 670 553 671 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) ··· 829 701 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 830 702 unsigned int data_reg, const u32 *vals, 831 703 unsigned int nregs, unsigned int start_idx); 704 + void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 705 + unsigned int data_reg, u32 *vals, unsigned int nregs, 706 + unsigned int start_idx); 707 + 708 + struct fw_filter_wr; 709 + 832 710 void t4_intr_enable(struct adapter *adapter); 833 711 void t4_intr_disable(struct adapter *adapter); 834 712 int t4_slow_intr_handler(struct adapter *adapter); ··· 870 736 struct tp_tcp_stats *v6); 871 737 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 872 738 const unsigned short *alpha, const unsigned short *beta); 739 + 740 + void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); 873 741 874 742 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 875 743 const u8 *addr);
+448 -11
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 175 175 MIN_FL_ENTRIES = 16 176 176 }; 177 177 178 + /* Host shadow copy of ingress filter entry. This is in host native format 179 + * and doesn't match the ordering or bit order, etc. of the hardware of the 180 + * firmware command. The use of bit-field structure elements is purely to 181 + * remind ourselves of the field size limitations and save memory in the case 182 + * where the filter table is large. 183 + */ 184 + struct filter_entry { 185 + /* Administrative fields for filter. 186 + */ 187 + u32 valid:1; /* filter allocated and valid */ 188 + u32 locked:1; /* filter is administratively locked */ 189 + 190 + u32 pending:1; /* filter action is pending firmware reply */ 191 + u32 smtidx:8; /* Source MAC Table index for smac */ 192 + struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 193 + 194 + /* The filter itself. Most of this is a straight copy of information 195 + * provided by the extended ioctl(). Some fields are translated to 196 + * internal forms -- for instance the Ingress Queue ID passed in from 197 + * the ioctl() is translated into the Absolute Ingress Queue ID. 198 + */ 199 + struct ch_filter_specification fs; 200 + }; 201 + 178 202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 179 203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 180 204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) ··· 348 324 }; 349 325 350 326 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; 327 + 328 + module_param(tp_vlan_pri_map, uint, 0644); 329 + MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration"); 351 330 352 331 static struct dentry *cxgb4_debugfs_root; 353 332 ··· 533 506 return ret; 534 507 } 535 508 536 - /* 537 - * Response queue handler for the FW event queue. 509 + /* Clear a filter and release any of its resources that we own. This also 510 + * clears the filter's "pending" status. 511 + */ 512 + static void clear_filter(struct adapter *adap, struct filter_entry *f) 513 + { 514 + /* If the new or old filter have loopback rewriteing rules then we'll 515 + * need to free any existing Layer Two Table (L2T) entries of the old 516 + * filter rule. The firmware will handle freeing up any Source MAC 517 + * Table (SMT) entries used for rewriting Source MAC Addresses in 518 + * loopback rules. 519 + */ 520 + if (f->l2t) 521 + cxgb4_l2t_release(f->l2t); 522 + 523 + /* The zeroing of the filter rule below clears the filter valid, 524 + * pending, locked flags, l2t pointer, etc. so it's all we need for 525 + * this operation. 526 + */ 527 + memset(f, 0, sizeof(*f)); 528 + } 529 + 530 + /* Handle a filter write/deletion reply. 531 + */ 532 + static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) 533 + { 534 + unsigned int idx = GET_TID(rpl); 535 + unsigned int nidx = idx - adap->tids.ftid_base; 536 + unsigned int ret; 537 + struct filter_entry *f; 538 + 539 + if (idx >= adap->tids.ftid_base && nidx < 540 + (adap->tids.nftids + adap->tids.nsftids)) { 541 + idx = nidx; 542 + ret = GET_TCB_COOKIE(rpl->cookie); 543 + f = &adap->tids.ftid_tab[idx]; 544 + 545 + if (ret == FW_FILTER_WR_FLT_DELETED) { 546 + /* Clear the filter when we get confirmation from the 547 + * hardware that the filter has been deleted. 548 + */ 549 + clear_filter(adap, f); 550 + } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) { 551 + dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", 552 + idx); 553 + clear_filter(adap, f); 554 + } else if (ret == FW_FILTER_WR_FLT_ADDED) { 555 + f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff; 556 + f->pending = 0; /* asynchronous setup completed */ 557 + f->valid = 1; 558 + } else { 559 + /* Something went wrong. Issue a warning about the 560 + * problem and clear everything out. 561 + */ 562 + dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n", 563 + idx, ret); 564 + clear_filter(adap, f); 565 + } 566 + } 567 + } 568 + 569 + /* Response queue handler for the FW event queue. 538 570 */ 539 571 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 540 572 const struct pkt_gl *gl) ··· 628 542 const struct cpl_l2t_write_rpl *p = (void *)rsp; 629 543 630 544 do_l2t_write_rpl(q->adap, p); 545 + } else if (opcode == CPL_SET_TCB_RPL) { 546 + const struct cpl_set_tcb_rpl *p = (void *)rsp; 547 + 548 + filter_rpl(q->adap, p); 631 549 } else 632 550 dev_err(q->adap->pdev_dev, 633 551 "unexpected CPL %#x on FW event queue\n", opcode); ··· 1071 981 vfree(addr); 1072 982 else 1073 983 kfree(addr); 984 + } 985 + 986 + /* Send a Work Request to write the filter at a specified index. We construct 987 + * a Firmware Filter Work Request to have the work done and put the indicated 988 + * filter into "pending" mode which will prevent any further actions against 989 + * it till we get a reply from the firmware on the completion status of the 990 + * request. 991 + */ 992 + static int set_filter_wr(struct adapter *adapter, int fidx) 993 + { 994 + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; 995 + struct sk_buff *skb; 996 + struct fw_filter_wr *fwr; 997 + unsigned int ftid; 998 + 999 + /* If the new filter requires loopback Destination MAC and/or VLAN 1000 + * rewriting then we need to allocate a Layer 2 Table (L2T) entry for 1001 + * the filter. 1002 + */ 1003 + if (f->fs.newdmac || f->fs.newvlan) { 1004 + /* allocate L2T entry for new filter */ 1005 + f->l2t = t4_l2t_alloc_switching(adapter->l2t); 1006 + if (f->l2t == NULL) 1007 + return -EAGAIN; 1008 + if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, 1009 + f->fs.eport, f->fs.dmac)) { 1010 + cxgb4_l2t_release(f->l2t); 1011 + f->l2t = NULL; 1012 + return -ENOMEM; 1013 + } 1014 + } 1015 + 1016 + ftid = adapter->tids.ftid_base + fidx; 1017 + 1018 + skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); 1019 + fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); 1020 + memset(fwr, 0, sizeof(*fwr)); 1021 + 1022 + /* It would be nice to put most of the following in t4_hw.c but most 1023 + * of the work is translating the cxgbtool ch_filter_specification 1024 + * into the Work Request and the definition of that structure is 1025 + * currently in cxgbtool.h which isn't appropriate to pull into the 1026 + * common code. We may eventually try to come up with a more neutral 1027 + * filter specification structure but for now it's easiest to simply 1028 + * put this fairly direct code in line ... 1029 + */ 1030 + fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 1031 + fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16)); 1032 + fwr->tid_to_iq = 1033 + htonl(V_FW_FILTER_WR_TID(ftid) | 1034 + V_FW_FILTER_WR_RQTYPE(f->fs.type) | 1035 + V_FW_FILTER_WR_NOREPLY(0) | 1036 + V_FW_FILTER_WR_IQ(f->fs.iq)); 1037 + fwr->del_filter_to_l2tix = 1038 + htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 1039 + V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 1040 + V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 1041 + V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 1042 + V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 1043 + V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 1044 + V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 1045 + V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 1046 + V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 1047 + f->fs.newvlan == VLAN_REWRITE) | 1048 + V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 1049 + f->fs.newvlan == VLAN_REWRITE) | 1050 + V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 1051 + V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 1052 + V_FW_FILTER_WR_PRIO(f->fs.prio) | 1053 + V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 1054 + fwr->ethtype = htons(f->fs.val.ethtype); 1055 + fwr->ethtypem = htons(f->fs.mask.ethtype); 1056 + fwr->frag_to_ovlan_vldm = 1057 + (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 1058 + V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 1059 + V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | 1060 + V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | 1061 + V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | 1062 + V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); 1063 + fwr->smac_sel = 0; 1064 + fwr->rx_chan_rx_rpl_iq = 1065 + htons(V_FW_FILTER_WR_RX_CHAN(0) | 1066 + V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id)); 1067 + fwr->maci_to_matchtypem = 1068 + htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 1069 + V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 1070 + V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 1071 + V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 1072 + V_FW_FILTER_WR_PORT(f->fs.val.iport) | 1073 + V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 1074 + V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 1075 + V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 1076 + fwr->ptcl = f->fs.val.proto; 1077 + fwr->ptclm = f->fs.mask.proto; 1078 + fwr->ttyp = f->fs.val.tos; 1079 + fwr->ttypm = f->fs.mask.tos; 1080 + fwr->ivlan = htons(f->fs.val.ivlan); 1081 + fwr->ivlanm = htons(f->fs.mask.ivlan); 1082 + fwr->ovlan = htons(f->fs.val.ovlan); 1083 + fwr->ovlanm = htons(f->fs.mask.ovlan); 1084 + memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); 1085 + memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); 1086 + memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); 1087 + memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); 1088 + fwr->lp = htons(f->fs.val.lport); 1089 + fwr->lpm = htons(f->fs.mask.lport); 1090 + fwr->fp = htons(f->fs.val.fport); 1091 + fwr->fpm = htons(f->fs.mask.fport); 1092 + if (f->fs.newsmac) 1093 + memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma)); 1094 + 1095 + /* Mark the filter as "pending" and ship off the Filter Work Request. 1096 + * When we get the Work Request Reply we'll clear the pending status. 1097 + */ 1098 + f->pending = 1; 1099 + set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); 1100 + t4_ofld_send(adapter, skb); 1101 + return 0; 1102 + } 1103 + 1104 + /* Delete the filter at a specified index. 1105 + */ 1106 + static int del_filter_wr(struct adapter *adapter, int fidx) 1107 + { 1108 + struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; 1109 + struct sk_buff *skb; 1110 + struct fw_filter_wr *fwr; 1111 + unsigned int len, ftid; 1112 + 1113 + len = sizeof(*fwr); 1114 + ftid = adapter->tids.ftid_base + fidx; 1115 + 1116 + skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); 1117 + fwr = (struct fw_filter_wr *)__skb_put(skb, len); 1118 + t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); 1119 + 1120 + /* Mark the filter as "pending" and ship off the Filter Work Request. 1121 + * When we get the Work Request Reply we'll clear the pending status. 1122 + */ 1123 + f->pending = 1; 1124 + t4_mgmt_tx(adapter, skb); 1125 + return 0; 1074 1126 } 1075 1127 1076 1128 static inline int is_offload(const struct adapter *adap) ··· 2427 2195 if (t->afree) { 2428 2196 union aopen_entry *p = t->afree; 2429 2197 2430 - atid = p - t->atid_tab; 2198 + atid = (p - t->atid_tab) + t->atid_base; 2431 2199 t->afree = p->next; 2432 2200 p->data = data; 2433 2201 t->atids_in_use++; ··· 2442 2210 */ 2443 2211 void cxgb4_free_atid(struct tid_info *t, unsigned int atid) 2444 2212 { 2445 - union aopen_entry *p = &t->atid_tab[atid]; 2213 + union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; 2446 2214 2447 2215 spin_lock_bh(&t->atid_lock); 2448 2216 p->next = t->afree; ··· 2481 2249 } 2482 2250 EXPORT_SYMBOL(cxgb4_alloc_stid); 2483 2251 2484 - /* 2485 - * Release a server TID. 2252 + /* Allocate a server filter TID and set it to the supplied value. 2253 + */ 2254 + int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) 2255 + { 2256 + int stid; 2257 + 2258 + spin_lock_bh(&t->stid_lock); 2259 + if (family == PF_INET) { 2260 + stid = find_next_zero_bit(t->stid_bmap, 2261 + t->nstids + t->nsftids, t->nstids); 2262 + if (stid < (t->nstids + t->nsftids)) 2263 + __set_bit(stid, t->stid_bmap); 2264 + else 2265 + stid = -1; 2266 + } else { 2267 + stid = -1; 2268 + } 2269 + if (stid >= 0) { 2270 + t->stid_tab[stid].data = data; 2271 + stid += t->stid_base; 2272 + t->stids_in_use++; 2273 + } 2274 + spin_unlock_bh(&t->stid_lock); 2275 + return stid; 2276 + } 2277 + EXPORT_SYMBOL(cxgb4_alloc_sftid); 2278 + 2279 + /* Release a server TID. 2486 2280 */ 2487 2281 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 2488 2282 { ··· 2620 2362 static int tid_init(struct tid_info *t) 2621 2363 { 2622 2364 size_t size; 2365 + unsigned int stid_bmap_size; 2623 2366 unsigned int natids = t->natids; 2624 2367 2625 - size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + 2368 + stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 2369 + size = t->ntids * sizeof(*t->tid_tab) + 2370 + natids * sizeof(*t->atid_tab) + 2626 2371 t->nstids * sizeof(*t->stid_tab) + 2627 - BITS_TO_LONGS(t->nstids) * sizeof(long); 2372 + t->nsftids * sizeof(*t->stid_tab) + 2373 + stid_bmap_size * sizeof(long) + 2374 + t->nftids * sizeof(*t->ftid_tab) + 2375 + t->nsftids * sizeof(*t->ftid_tab); 2376 + 2628 2377 t->tid_tab = t4_alloc_mem(size); 2629 2378 if (!t->tid_tab) 2630 2379 return -ENOMEM; 2631 2380 2632 2381 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 2633 2382 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; 2634 - t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; 2383 + t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; 2384 + t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; 2635 2385 spin_lock_init(&t->stid_lock); 2636 2386 spin_lock_init(&t->atid_lock); 2637 2387 ··· 2654 2388 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 2655 2389 t->afree = t->atid_tab; 2656 2390 } 2657 - bitmap_zero(t->stid_bmap, t->nstids); 2391 + bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 2658 2392 return 0; 2659 2393 } 2660 2394 ··· 2670 2404 * Returns <0 on error and one of the %NET_XMIT_* values on success. 2671 2405 */ 2672 2406 int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 2673 - __be32 sip, __be16 sport, unsigned int queue) 2407 + __be32 sip, __be16 sport, __be16 vlan, 2408 + unsigned int queue) 2674 2409 { 2675 2410 unsigned int chan; 2676 2411 struct sk_buff *skb; ··· 3017 2750 { 3018 2751 void *handle; 3019 2752 struct cxgb4_lld_info lli; 2753 + unsigned short i; 3020 2754 3021 2755 lli.pdev = adap->pdev; 3022 2756 lli.l2t = adap->l2t; ··· 3044 2776 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3045 2777 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3046 2778 (adap->fn * 4)); 2779 + lli.filt_mode = adap->filter_mode; 2780 + /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 2781 + for (i = 0; i < NCHAN; i++) 2782 + lli.tx_modq[i] = i; 3047 2783 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 3048 2784 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 3049 2785 lli.fw_vers = adap->params.fw_vers; 3050 2786 lli.dbfifo_int_thresh = dbfifo_int_thresh; 2787 + lli.sge_pktshift = adap->sge.pktshift; 2788 + lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 3051 2789 3052 2790 handle = ulds[uld].add(&lli); 3053 2791 if (IS_ERR(handle)) { ··· 3272 2998 netif_carrier_off(dev); 3273 2999 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); 3274 3000 } 3001 + 3002 + /* Return an error number if the indicated filter isn't writable ... 3003 + */ 3004 + static int writable_filter(struct filter_entry *f) 3005 + { 3006 + if (f->locked) 3007 + return -EPERM; 3008 + if (f->pending) 3009 + return -EBUSY; 3010 + 3011 + return 0; 3012 + } 3013 + 3014 + /* Delete the filter at the specified index (if valid). The checks for all 3015 + * the common problems with doing this like the filter being locked, currently 3016 + * pending in another operation, etc. 3017 + */ 3018 + static int delete_filter(struct adapter *adapter, unsigned int fidx) 3019 + { 3020 + struct filter_entry *f; 3021 + int ret; 3022 + 3023 + if (fidx >= adapter->tids.nftids + adapter->tids.nsftids) 3024 + return -EINVAL; 3025 + 3026 + f = &adapter->tids.ftid_tab[fidx]; 3027 + ret = writable_filter(f); 3028 + if (ret) 3029 + return ret; 3030 + if (f->valid) 3031 + return del_filter_wr(adapter, fidx); 3032 + 3033 + return 0; 3034 + } 3035 + 3036 + int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, 3037 + __be32 sip, __be16 sport, __be16 vlan, 3038 + unsigned int queue, unsigned char port, unsigned char mask) 3039 + { 3040 + int ret; 3041 + struct filter_entry *f; 3042 + struct adapter *adap; 3043 + int i; 3044 + u8 *val; 3045 + 3046 + adap = netdev2adap(dev); 3047 + 3048 + /* Adjust stid to correct filter index */ 3049 + stid -= adap->tids.nstids; 3050 + stid += adap->tids.nftids; 3051 + 3052 + /* Check to make sure the filter requested is writable ... 3053 + */ 3054 + f = &adap->tids.ftid_tab[stid]; 3055 + ret = writable_filter(f); 3056 + if (ret) 3057 + return ret; 3058 + 3059 + /* Clear out any old resources being used by the filter before 3060 + * we start constructing the new filter. 3061 + */ 3062 + if (f->valid) 3063 + clear_filter(adap, f); 3064 + 3065 + /* Clear out filter specifications */ 3066 + memset(&f->fs, 0, sizeof(struct ch_filter_specification)); 3067 + f->fs.val.lport = cpu_to_be16(sport); 3068 + f->fs.mask.lport = ~0; 3069 + val = (u8 *)&sip; 3070 + if ((val[0] | val[1] | val[2] | val[3]) != 0) { 3071 + for (i = 0; i < 4; i++) { 3072 + f->fs.val.lip[i] = val[i]; 3073 + f->fs.mask.lip[i] = ~0; 3074 + } 3075 + if (adap->filter_mode & F_PORT) { 3076 + f->fs.val.iport = port; 3077 + f->fs.mask.iport = mask; 3078 + } 3079 + } 3080 + 3081 + f->fs.dirsteer = 1; 3082 + f->fs.iq = queue; 3083 + /* Mark filter as locked */ 3084 + f->locked = 1; 3085 + f->fs.rpttid = 1; 3086 + 3087 + ret = set_filter_wr(adap, stid); 3088 + if (ret) { 3089 + clear_filter(adap, f); 3090 + return ret; 3091 + } 3092 + 3093 + return 0; 3094 + } 3095 + EXPORT_SYMBOL(cxgb4_create_server_filter); 3096 + 3097 + int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 3098 + unsigned int queue, bool ipv6) 3099 + { 3100 + int ret; 3101 + struct filter_entry *f; 3102 + struct adapter *adap; 3103 + 3104 + adap = netdev2adap(dev); 3105 + 3106 + /* Adjust stid to correct filter index */ 3107 + stid -= adap->tids.nstids; 3108 + stid += adap->tids.nftids; 3109 + 3110 + f = &adap->tids.ftid_tab[stid]; 3111 + /* Unlock the filter */ 3112 + f->locked = 0; 3113 + 3114 + ret = delete_filter(adap, stid); 3115 + if (ret) 3116 + return ret; 3117 + 3118 + return 0; 3119 + } 3120 + EXPORT_SYMBOL(cxgb4_remove_server_filter); 3275 3121 3276 3122 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, 3277 3123 struct rtnl_link_stats64 *ns) ··· 3638 3244 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); 3639 3245 v = t4_read_reg(adap, TP_PIO_DATA); 3640 3246 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 3247 + 3248 + /* first 4 Tx modulation queues point to consecutive Tx channels */ 3249 + adap->params.tp.tx_modq_map = 0xE4; 3250 + t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, 3251 + V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map)); 3252 + 3253 + /* associate each Tx modulation queue with consecutive Tx channels */ 3254 + v = 0x84218421; 3255 + t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3256 + &v, 1, A_TP_TX_SCHED_HDR); 3257 + t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3258 + &v, 1, A_TP_TX_SCHED_FIFO); 3259 + t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3260 + &v, 1, A_TP_TX_SCHED_PCMD); 3261 + 3262 + #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ 3263 + if (is_offload(adap)) { 3264 + t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 3265 + V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 3266 + V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 3267 + V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 3268 + V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 3269 + t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT, 3270 + V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 3271 + V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 3272 + V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 3273 + V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 3274 + } 3641 3275 3642 3276 /* get basic stuff going */ 3643 3277 return t4_early_init(adap, adap->fn); ··· 4457 4035 for (j = 0; j < NCHAN; j++) 4458 4036 adap->params.tp.tx_modq[j] = j; 4459 4037 4038 + t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 4039 + &adap->filter_mode, 1, 4040 + TP_VLAN_PRI_MAP); 4041 + 4460 4042 adap->flags |= FW_OK; 4461 4043 return 0; 4462 4044 ··· 5086 4660 5087 4661 if (adapter->debugfs_root) 5088 4662 debugfs_remove_recursive(adapter->debugfs_root); 4663 + 4664 + /* If we allocated filters, free up state associated with any 4665 + * valid filters ... 4666 + */ 4667 + if (adapter->tids.ftid_tab) { 4668 + struct filter_entry *f = &adapter->tids.ftid_tab[0]; 4669 + for (i = 0; i < (adapter->tids.nftids + 4670 + adapter->tids.nsftids); i++, f++) 4671 + if (f->valid) 4672 + clear_filter(adapter, f); 4673 + } 5089 4674 5090 4675 if (adapter->flags & FULL_INIT_DONE) 5091 4676 cxgb_down(adapter);
+20 -3
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
··· 38 38 #include <linux/cache.h> 39 39 #include <linux/spinlock.h> 40 40 #include <linux/skbuff.h> 41 + #include <linux/inetdevice.h> 41 42 #include <linux/atomic.h> 42 43 43 44 /* CPL message priority levels */ ··· 98 97 99 98 union aopen_entry *atid_tab; 100 99 unsigned int natids; 100 + unsigned int atid_base; 101 101 102 + struct filter_entry *ftid_tab; 102 103 unsigned int nftids; 103 104 unsigned int ftid_base; 104 105 unsigned int aftid_base; ··· 132 129 static inline void *lookup_stid(const struct tid_info *t, unsigned int stid) 133 130 { 134 131 stid -= t->stid_base; 135 - return stid < t->nstids ? t->stid_tab[stid].data : NULL; 132 + return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL; 136 133 } 137 134 138 135 static inline void cxgb4_insert_tid(struct tid_info *t, void *data, ··· 144 141 145 142 int cxgb4_alloc_atid(struct tid_info *t, void *data); 146 143 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data); 144 + int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data); 147 145 void cxgb4_free_atid(struct tid_info *t, unsigned int atid); 148 146 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family); 149 147 void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid); ··· 152 148 struct in6_addr; 153 149 154 150 int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 155 - __be32 sip, __be16 sport, unsigned int queue); 156 - 151 + __be32 sip, __be16 sport, __be16 vlan, 152 + unsigned int queue); 153 + int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, 154 + __be32 sip, __be16 sport, __be16 vlan, 155 + unsigned int queue, 156 + unsigned char port, unsigned char mask); 157 + int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 158 + unsigned int queue, bool ipv6); 157 159 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 158 160 { 159 161 skb_set_queue_mapping(skb, (queue << 1) | prio); ··· 231 221 unsigned int iscsi_iolen; /* iSCSI max I/O length */ 232 222 unsigned short udb_density; /* # of user DB/page */ 233 223 unsigned short ucq_density; /* # of user CQs/page */ 224 + unsigned short filt_mode; /* filter optional components */ 225 + unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ 226 + /* scheduler queue */ 234 227 void __iomem *gts_reg; /* address of GTS register */ 235 228 void __iomem *db_reg; /* address of kernel doorbell */ 236 229 int dbfifo_int_thresh; /* doorbell fifo int threshold */ 230 + unsigned int sge_pktshift; /* Padding between CPL and */ 231 + /* packet data */ 232 + bool enable_fw_ofld_conn; /* Enable connection through fw */ 233 + /* WR */ 237 234 }; 238 235 239 236 struct cxgb4_uld_info {
+32
drivers/net/ethernet/chelsio/cxgb4/l2t.c
··· 484 484 handle_failed_resolution(adap, arpq); 485 485 } 486 486 487 + /* Allocate an L2T entry for use by a switching rule. Such need to be 488 + * explicitly freed and while busy they are not on any hash chain, so normal 489 + * address resolution updates do not see them. 490 + */ 491 + struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d) 492 + { 493 + struct l2t_entry *e; 494 + 495 + write_lock_bh(&d->lock); 496 + e = alloc_l2e(d); 497 + if (e) { 498 + spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 499 + e->state = L2T_STATE_SWITCHING; 500 + atomic_set(&e->refcnt, 1); 501 + spin_unlock(&e->lock); 502 + } 503 + write_unlock_bh(&d->lock); 504 + return e; 505 + } 506 + 507 + /* Sets/updates the contents of a switching L2T entry that has been allocated 508 + * with an earlier call to @t4_l2t_alloc_switching. 509 + */ 510 + int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 511 + u8 port, u8 *eth_addr) 512 + { 513 + e->vlan = vlan; 514 + e->lport = port; 515 + memcpy(e->dmac, eth_addr, ETH_ALEN); 516 + return write_l2e(adap, e, 0); 517 + } 518 + 487 519 struct l2t_data *t4_init_l2t(void) 488 520 { 489 521 int i;
+3
drivers/net/ethernet/chelsio/cxgb4/l2t.h
··· 100 100 unsigned int priority); 101 101 102 102 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh); 103 + struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d); 104 + int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan, 105 + u8 port, u8 *eth_addr); 103 106 struct l2t_data *t4_init_l2t(void); 104 107 void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl); 105 108
+21 -1
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 109 109 * Reads registers that are accessed indirectly through an address/data 110 110 * register pair. 111 111 */ 112 - static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 112 + void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 113 113 unsigned int data_reg, u32 *vals, 114 114 unsigned int nregs, unsigned int start_idx) 115 115 { ··· 2266 2266 2267 2267 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2268 2268 return 0; 2269 + } 2270 + 2271 + /* t4_mk_filtdelwr - create a delete filter WR 2272 + * @ftid: the filter ID 2273 + * @wr: the filter work request to populate 2274 + * @qid: ingress queue to receive the delete notification 2275 + * 2276 + * Creates a filter work request to delete the supplied filter. If @qid is 2277 + * negative the delete notification is suppressed. 2278 + */ 2279 + void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 2280 + { 2281 + memset(wr, 0, sizeof(*wr)); 2282 + wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 2283 + wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); 2284 + wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 2285 + V_FW_FILTER_WR_NOREPLY(qid < 0)); 2286 + wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 2287 + if (qid >= 0) 2288 + wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 2269 2289 } 2270 2290 2271 2291 #define INIT_CMD(var, cmd, rd_wr) do { \
+66
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
··· 193 193 __be64 wr_lo; 194 194 }; 195 195 196 + /* wr_hi fields */ 197 + #define S_WR_OP 24 198 + #define V_WR_OP(x) ((__u64)(x) << S_WR_OP) 199 + 196 200 #define WR_HDR struct work_request_hdr wr 201 + 202 + /* option 0 fields */ 203 + #define S_MSS_IDX 60 204 + #define M_MSS_IDX 0xF 205 + #define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX) 206 + #define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX) 207 + 208 + /* option 2 fields */ 209 + #define S_RSS_QUEUE 0 210 + #define M_RSS_QUEUE 0x3FF 211 + #define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE) 212 + #define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE) 197 213 198 214 struct cpl_pass_open_req { 199 215 WR_HDR; ··· 220 204 __be32 peer_ip; 221 205 __be64 opt0; 222 206 #define TX_CHAN(x) ((x) << 2) 207 + #define NO_CONG(x) ((x) << 4) 223 208 #define DELACK(x) ((x) << 5) 224 209 #define ULP_MODE(x) ((x) << 8) 225 210 #define RCV_BUFSIZ(x) ((x) << 12) 226 211 #define DSCP(x) ((x) << 22) 227 212 #define SMAC_SEL(x) ((u64)(x) << 28) 228 213 #define L2T_IDX(x) ((u64)(x) << 36) 214 + #define TCAM_BYPASS(x) ((u64)(x) << 48) 229 215 #define NAGLE(x) ((u64)(x) << 49) 230 216 #define WND_SCALE(x) ((u64)(x) << 50) 231 217 #define KEEP_ALIVE(x) ((u64)(x) << 54) ··· 265 247 #define RSS_QUEUE_VALID (1 << 10) 266 248 #define RX_COALESCE_VALID(x) ((x) << 11) 267 249 #define RX_COALESCE(x) ((x) << 12) 250 + #define PACE(x) ((x) << 16) 268 251 #define TX_QUEUE(x) ((x) << 23) 269 252 #define RX_CHANNEL(x) ((x) << 26) 253 + #define CCTRL_ECN(x) ((x) << 27) 270 254 #define WND_SCALE_EN(x) ((x) << 28) 271 255 #define TSTAMPS_EN(x) ((x) << 29) 272 256 #define SACK_EN(x) ((x) << 30) ··· 312 292 union opcode_tid ot; 313 293 __be32 rsvd; 314 294 __be32 tos_stid; 295 + #define PASS_OPEN_TID(x) ((x) << 0) 296 + #define PASS_OPEN_TOS(x) ((x) << 24) 297 + #define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF) 315 298 #define GET_POPEN_TID(x) ((x) & 0xffffff) 316 299 #define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) 317 300 __be16 mac_idx; ··· 355 332 __be16 word_cookie; 356 333 #define TCB_WORD(x) ((x) << 0) 357 334 #define TCB_COOKIE(x) ((x) << 5) 335 + #define GET_TCB_COOKIE(x) (((x) >> 5) & 7) 358 336 __be64 mask; 359 337 __be64 val; 360 338 }; ··· 560 536 __be16 err_vec; 561 537 }; 562 538 539 + /* rx_pkt.l2info fields */ 540 + #define S_RX_ETHHDR_LEN 0 541 + #define M_RX_ETHHDR_LEN 0x1F 542 + #define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) 543 + #define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) 544 + 545 + #define S_RX_MACIDX 8 546 + #define M_RX_MACIDX 0x1FF 547 + #define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) 548 + #define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX) 549 + 550 + #define S_RXF_SYN 21 551 + #define V_RXF_SYN(x) ((x) << S_RXF_SYN) 552 + #define F_RXF_SYN V_RXF_SYN(1U) 553 + 554 + #define S_RX_CHAN 28 555 + #define M_RX_CHAN 0xF 556 + #define V_RX_CHAN(x) ((x) << S_RX_CHAN) 557 + #define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN) 558 + 559 + /* rx_pkt.hdr_len fields */ 560 + #define S_RX_TCPHDR_LEN 0 561 + #define M_RX_TCPHDR_LEN 0x3F 562 + #define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN) 563 + #define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN) 564 + 565 + #define S_RX_IPHDR_LEN 6 566 + #define M_RX_IPHDR_LEN 0x3FF 567 + #define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN) 568 + #define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN) 569 + 563 570 struct cpl_trace_pkt { 564 571 u8 opcode; 565 572 u8 intf; ··· 689 634 /* cpl_fw6_msg.type values */ 690 635 enum { 691 636 FW6_TYPE_CMD_RPL = 0, 637 + FW6_TYPE_WR_RPL = 1, 638 + FW6_TYPE_CQE = 2, 639 + FW6_TYPE_OFLD_CONNECTION_WR_RPL = 3, 640 + }; 641 + 642 + struct cpl_fw6_msg_ofld_connection_wr_rpl { 643 + __u64 cookie; 644 + __be32 tid; /* or atid in case of active failure */ 645 + __u8 t_state; 646 + __u8 retval; 647 + __u8 rsvd[2]; 692 648 }; 693 649 694 650 enum {
+37
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
··· 1064 1064 #define ADDRESS(x) ((x) << ADDRESS_SHIFT) 1065 1065 1066 1066 #define XGMAC_PORT_INT_CAUSE 0x10dc 1067 + 1068 + #define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28 1069 + 1070 + #define A_TP_TX_MOD_CHANNEL_WEIGHT 0x7e34 1071 + 1072 + #define S_TX_MOD_QUEUE_REQ_MAP 0 1073 + #define M_TX_MOD_QUEUE_REQ_MAP 0xffffU 1074 + #define V_TX_MOD_QUEUE_REQ_MAP(x) ((x) << S_TX_MOD_QUEUE_REQ_MAP) 1075 + 1076 + #define A_TP_TX_MOD_QUEUE_WEIGHT0 0x7e30 1077 + 1078 + #define S_TX_MODQ_WEIGHT3 24 1079 + #define M_TX_MODQ_WEIGHT3 0xffU 1080 + #define V_TX_MODQ_WEIGHT3(x) ((x) << S_TX_MODQ_WEIGHT3) 1081 + 1082 + #define S_TX_MODQ_WEIGHT2 16 1083 + #define M_TX_MODQ_WEIGHT2 0xffU 1084 + #define V_TX_MODQ_WEIGHT2(x) ((x) << S_TX_MODQ_WEIGHT2) 1085 + 1086 + #define S_TX_MODQ_WEIGHT1 8 1087 + #define M_TX_MODQ_WEIGHT1 0xffU 1088 + #define V_TX_MODQ_WEIGHT1(x) ((x) << S_TX_MODQ_WEIGHT1) 1089 + 1090 + #define S_TX_MODQ_WEIGHT0 0 1091 + #define M_TX_MODQ_WEIGHT0 0xffU 1092 + #define V_TX_MODQ_WEIGHT0(x) ((x) << S_TX_MODQ_WEIGHT0) 1093 + 1094 + #define A_TP_TX_SCHED_HDR 0x23 1095 + 1096 + #define A_TP_TX_SCHED_FIFO 0x24 1097 + 1098 + #define A_TP_TX_SCHED_PCMD 0x25 1099 + 1100 + #define S_PORT 1 1101 + #define V_PORT(x) ((x) << S_PORT) 1102 + #define F_PORT V_PORT(1U) 1103 + 1067 1104 #endif /* __T4_REGS_H */
+418
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
··· 35 35 #ifndef _T4FW_INTERFACE_H_ 36 36 #define _T4FW_INTERFACE_H_ 37 37 38 + enum fw_retval { 39 + FW_SUCCESS = 0, /* completed sucessfully */ 40 + FW_EPERM = 1, /* operation not permitted */ 41 + FW_ENOENT = 2, /* no such file or directory */ 42 + FW_EIO = 5, /* input/output error; hw bad */ 43 + FW_ENOEXEC = 8, /* exec format error; inv microcode */ 44 + FW_EAGAIN = 11, /* try again */ 45 + FW_ENOMEM = 12, /* out of memory */ 46 + FW_EFAULT = 14, /* bad address; fw bad */ 47 + FW_EBUSY = 16, /* resource busy */ 48 + FW_EEXIST = 17, /* file exists */ 49 + FW_EINVAL = 22, /* invalid argument */ 50 + FW_ENOSPC = 28, /* no space left on device */ 51 + FW_ENOSYS = 38, /* functionality not implemented */ 52 + FW_EPROTO = 71, /* protocol error */ 53 + FW_EADDRINUSE = 98, /* address already in use */ 54 + FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ 55 + FW_ENETDOWN = 100, /* network is down */ 56 + FW_ENETUNREACH = 101, /* network is unreachable */ 57 + FW_ENOBUFS = 105, /* no buffer space available */ 58 + FW_ETIMEDOUT = 110, /* timeout */ 59 + FW_EINPROGRESS = 115, /* fw internal */ 60 + FW_SCSI_ABORT_REQUESTED = 128, /* */ 61 + FW_SCSI_ABORT_TIMEDOUT = 129, /* */ 62 + FW_SCSI_ABORTED = 130, /* */ 63 + FW_SCSI_CLOSE_REQUESTED = 131, /* */ 64 + FW_ERR_LINK_DOWN = 132, /* */ 65 + FW_RDEV_NOT_READY = 133, /* */ 66 + FW_ERR_RDEV_LOST = 134, /* */ 67 + FW_ERR_RDEV_LOGO = 135, /* */ 68 + FW_FCOE_NO_XCHG = 136, /* */ 69 + FW_SCSI_RSP_ERR = 137, /* */ 70 + FW_ERR_RDEV_IMPL_LOGO = 138, /* */ 71 + FW_SCSI_UNDER_FLOW_ERR = 139, /* */ 72 + FW_SCSI_OVER_FLOW_ERR = 140, /* */ 73 + FW_SCSI_DDP_ERR = 141, /* DDP error*/ 74 + FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */ 75 + }; 76 + 38 77 #define FW_T4VF_SGE_BASE_ADDR 0x0000 39 78 #define FW_T4VF_MPS_BASE_ADDR 0x0100 40 79 #define FW_T4VF_PL_BASE_ADDR 0x0200 ··· 85 46 FW_ULPTX_WR = 0x04, 86 47 FW_TP_WR = 0x05, 87 48 FW_ETH_TX_PKT_WR = 0x08, 49 + FW_OFLD_CONNECTION_WR = 0x2f, 88 50 FW_FLOWC_WR = 0x0a, 89 51 FW_OFLD_TX_DATA_WR = 0x0b, 90 52 FW_CMD_WR = 0x10, ··· 121 81 #define FW_WR_LEN16(x) ((x) << 0) 122 82 123 83 #define HW_TPL_FR_MT_PR_IV_P_FC 0X32B 84 + #define HW_TPL_FR_MT_PR_OV_P_FC 0X327 85 + 86 + /* filter wr reply code in cookie in CPL_SET_TCB_RPL */ 87 + enum fw_filter_wr_cookie { 88 + FW_FILTER_WR_SUCCESS, 89 + FW_FILTER_WR_FLT_ADDED, 90 + FW_FILTER_WR_FLT_DELETED, 91 + FW_FILTER_WR_SMT_TBL_FULL, 92 + FW_FILTER_WR_EINVAL, 93 + }; 94 + 95 + struct fw_filter_wr { 96 + __be32 op_pkd; 97 + __be32 len16_pkd; 98 + __be64 r3; 99 + __be32 tid_to_iq; 100 + __be32 del_filter_to_l2tix; 101 + __be16 ethtype; 102 + __be16 ethtypem; 103 + __u8 frag_to_ovlan_vldm; 104 + __u8 smac_sel; 105 + __be16 rx_chan_rx_rpl_iq; 106 + __be32 maci_to_matchtypem; 107 + __u8 ptcl; 108 + __u8 ptclm; 109 + __u8 ttyp; 110 + __u8 ttypm; 111 + __be16 ivlan; 112 + __be16 ivlanm; 113 + __be16 ovlan; 114 + __be16 ovlanm; 115 + __u8 lip[16]; 116 + __u8 lipm[16]; 117 + __u8 fip[16]; 118 + __u8 fipm[16]; 119 + __be16 lp; 120 + __be16 lpm; 121 + __be16 fp; 122 + __be16 fpm; 123 + __be16 r7; 124 + __u8 sma[6]; 125 + }; 126 + 127 + #define S_FW_FILTER_WR_TID 12 128 + #define M_FW_FILTER_WR_TID 0xfffff 129 + #define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID) 130 + #define G_FW_FILTER_WR_TID(x) \ 131 + (((x) >> S_FW_FILTER_WR_TID) & M_FW_FILTER_WR_TID) 132 + 133 + #define S_FW_FILTER_WR_RQTYPE 11 134 + #define M_FW_FILTER_WR_RQTYPE 0x1 135 + #define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE) 136 + #define G_FW_FILTER_WR_RQTYPE(x) \ 137 + (((x) >> S_FW_FILTER_WR_RQTYPE) & M_FW_FILTER_WR_RQTYPE) 138 + #define F_FW_FILTER_WR_RQTYPE V_FW_FILTER_WR_RQTYPE(1U) 139 + 140 + #define S_FW_FILTER_WR_NOREPLY 10 141 + #define M_FW_FILTER_WR_NOREPLY 0x1 142 + #define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY) 143 + #define G_FW_FILTER_WR_NOREPLY(x) \ 144 + (((x) >> S_FW_FILTER_WR_NOREPLY) & M_FW_FILTER_WR_NOREPLY) 145 + #define F_FW_FILTER_WR_NOREPLY V_FW_FILTER_WR_NOREPLY(1U) 146 + 147 + #define S_FW_FILTER_WR_IQ 0 148 + #define M_FW_FILTER_WR_IQ 0x3ff 149 + #define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ) 150 + #define G_FW_FILTER_WR_IQ(x) \ 151 + (((x) >> S_FW_FILTER_WR_IQ) & M_FW_FILTER_WR_IQ) 152 + 153 + #define S_FW_FILTER_WR_DEL_FILTER 31 154 + #define M_FW_FILTER_WR_DEL_FILTER 0x1 155 + #define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER) 156 + #define G_FW_FILTER_WR_DEL_FILTER(x) \ 157 + (((x) >> S_FW_FILTER_WR_DEL_FILTER) & M_FW_FILTER_WR_DEL_FILTER) 158 + #define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U) 159 + 160 + #define S_FW_FILTER_WR_RPTTID 25 161 + #define M_FW_FILTER_WR_RPTTID 0x1 162 + #define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID) 163 + #define G_FW_FILTER_WR_RPTTID(x) \ 164 + (((x) >> S_FW_FILTER_WR_RPTTID) & M_FW_FILTER_WR_RPTTID) 165 + #define F_FW_FILTER_WR_RPTTID V_FW_FILTER_WR_RPTTID(1U) 166 + 167 + #define S_FW_FILTER_WR_DROP 24 168 + #define M_FW_FILTER_WR_DROP 0x1 169 + #define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP) 170 + #define G_FW_FILTER_WR_DROP(x) \ 171 + (((x) >> S_FW_FILTER_WR_DROP) & M_FW_FILTER_WR_DROP) 172 + #define F_FW_FILTER_WR_DROP V_FW_FILTER_WR_DROP(1U) 173 + 174 + #define S_FW_FILTER_WR_DIRSTEER 23 175 + #define M_FW_FILTER_WR_DIRSTEER 0x1 176 + #define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER) 177 + #define G_FW_FILTER_WR_DIRSTEER(x) \ 178 + (((x) >> S_FW_FILTER_WR_DIRSTEER) & M_FW_FILTER_WR_DIRSTEER) 179 + #define F_FW_FILTER_WR_DIRSTEER V_FW_FILTER_WR_DIRSTEER(1U) 180 + 181 + #define S_FW_FILTER_WR_MASKHASH 22 182 + #define M_FW_FILTER_WR_MASKHASH 0x1 183 + #define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH) 184 + #define G_FW_FILTER_WR_MASKHASH(x) \ 185 + (((x) >> S_FW_FILTER_WR_MASKHASH) & M_FW_FILTER_WR_MASKHASH) 186 + #define F_FW_FILTER_WR_MASKHASH V_FW_FILTER_WR_MASKHASH(1U) 187 + 188 + #define S_FW_FILTER_WR_DIRSTEERHASH 21 189 + #define M_FW_FILTER_WR_DIRSTEERHASH 0x1 190 + #define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH) 191 + #define G_FW_FILTER_WR_DIRSTEERHASH(x) \ 192 + (((x) >> S_FW_FILTER_WR_DIRSTEERHASH) & M_FW_FILTER_WR_DIRSTEERHASH) 193 + #define F_FW_FILTER_WR_DIRSTEERHASH V_FW_FILTER_WR_DIRSTEERHASH(1U) 194 + 195 + #define S_FW_FILTER_WR_LPBK 20 196 + #define M_FW_FILTER_WR_LPBK 0x1 197 + #define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK) 198 + #define G_FW_FILTER_WR_LPBK(x) \ 199 + (((x) >> S_FW_FILTER_WR_LPBK) & M_FW_FILTER_WR_LPBK) 200 + #define F_FW_FILTER_WR_LPBK V_FW_FILTER_WR_LPBK(1U) 201 + 202 + #define S_FW_FILTER_WR_DMAC 19 203 + #define M_FW_FILTER_WR_DMAC 0x1 204 + #define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC) 205 + #define G_FW_FILTER_WR_DMAC(x) \ 206 + (((x) >> S_FW_FILTER_WR_DMAC) & M_FW_FILTER_WR_DMAC) 207 + #define F_FW_FILTER_WR_DMAC V_FW_FILTER_WR_DMAC(1U) 208 + 209 + #define S_FW_FILTER_WR_SMAC 18 210 + #define M_FW_FILTER_WR_SMAC 0x1 211 + #define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC) 212 + #define G_FW_FILTER_WR_SMAC(x) \ 213 + (((x) >> S_FW_FILTER_WR_SMAC) & M_FW_FILTER_WR_SMAC) 214 + #define F_FW_FILTER_WR_SMAC V_FW_FILTER_WR_SMAC(1U) 215 + 216 + #define S_FW_FILTER_WR_INSVLAN 17 217 + #define M_FW_FILTER_WR_INSVLAN 0x1 218 + #define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN) 219 + #define G_FW_FILTER_WR_INSVLAN(x) \ 220 + (((x) >> S_FW_FILTER_WR_INSVLAN) & M_FW_FILTER_WR_INSVLAN) 221 + #define F_FW_FILTER_WR_INSVLAN V_FW_FILTER_WR_INSVLAN(1U) 222 + 223 + #define S_FW_FILTER_WR_RMVLAN 16 224 + #define M_FW_FILTER_WR_RMVLAN 0x1 225 + #define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN) 226 + #define G_FW_FILTER_WR_RMVLAN(x) \ 227 + (((x) >> S_FW_FILTER_WR_RMVLAN) & M_FW_FILTER_WR_RMVLAN) 228 + #define F_FW_FILTER_WR_RMVLAN V_FW_FILTER_WR_RMVLAN(1U) 229 + 230 + #define S_FW_FILTER_WR_HITCNTS 15 231 + #define M_FW_FILTER_WR_HITCNTS 0x1 232 + #define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS) 233 + #define G_FW_FILTER_WR_HITCNTS(x) \ 234 + (((x) >> S_FW_FILTER_WR_HITCNTS) & M_FW_FILTER_WR_HITCNTS) 235 + #define F_FW_FILTER_WR_HITCNTS V_FW_FILTER_WR_HITCNTS(1U) 236 + 237 + #define S_FW_FILTER_WR_TXCHAN 13 238 + #define M_FW_FILTER_WR_TXCHAN 0x3 239 + #define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN) 240 + #define G_FW_FILTER_WR_TXCHAN(x) \ 241 + (((x) >> S_FW_FILTER_WR_TXCHAN) & M_FW_FILTER_WR_TXCHAN) 242 + 243 + #define S_FW_FILTER_WR_PRIO 12 244 + #define M_FW_FILTER_WR_PRIO 0x1 245 + #define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO) 246 + #define G_FW_FILTER_WR_PRIO(x) \ 247 + (((x) >> S_FW_FILTER_WR_PRIO) & M_FW_FILTER_WR_PRIO) 248 + #define F_FW_FILTER_WR_PRIO V_FW_FILTER_WR_PRIO(1U) 249 + 250 + #define S_FW_FILTER_WR_L2TIX 0 251 + #define M_FW_FILTER_WR_L2TIX 0xfff 252 + #define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX) 253 + #define G_FW_FILTER_WR_L2TIX(x) \ 254 + (((x) >> S_FW_FILTER_WR_L2TIX) & M_FW_FILTER_WR_L2TIX) 255 + 256 + #define S_FW_FILTER_WR_FRAG 7 257 + #define M_FW_FILTER_WR_FRAG 0x1 258 + #define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG) 259 + #define G_FW_FILTER_WR_FRAG(x) \ 260 + (((x) >> S_FW_FILTER_WR_FRAG) & M_FW_FILTER_WR_FRAG) 261 + #define F_FW_FILTER_WR_FRAG V_FW_FILTER_WR_FRAG(1U) 262 + 263 + #define S_FW_FILTER_WR_FRAGM 6 264 + #define M_FW_FILTER_WR_FRAGM 0x1 265 + #define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM) 266 + #define G_FW_FILTER_WR_FRAGM(x) \ 267 + (((x) >> S_FW_FILTER_WR_FRAGM) & M_FW_FILTER_WR_FRAGM) 268 + #define F_FW_FILTER_WR_FRAGM V_FW_FILTER_WR_FRAGM(1U) 269 + 270 + #define S_FW_FILTER_WR_IVLAN_VLD 5 271 + #define M_FW_FILTER_WR_IVLAN_VLD 0x1 272 + #define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD) 273 + #define G_FW_FILTER_WR_IVLAN_VLD(x) \ 274 + (((x) >> S_FW_FILTER_WR_IVLAN_VLD) & M_FW_FILTER_WR_IVLAN_VLD) 275 + #define F_FW_FILTER_WR_IVLAN_VLD V_FW_FILTER_WR_IVLAN_VLD(1U) 276 + 277 + #define S_FW_FILTER_WR_OVLAN_VLD 4 278 + #define M_FW_FILTER_WR_OVLAN_VLD 0x1 279 + #define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD) 280 + #define G_FW_FILTER_WR_OVLAN_VLD(x) \ 281 + (((x) >> S_FW_FILTER_WR_OVLAN_VLD) & M_FW_FILTER_WR_OVLAN_VLD) 282 + #define F_FW_FILTER_WR_OVLAN_VLD V_FW_FILTER_WR_OVLAN_VLD(1U) 283 + 284 + #define S_FW_FILTER_WR_IVLAN_VLDM 3 285 + #define M_FW_FILTER_WR_IVLAN_VLDM 0x1 286 + #define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM) 287 + #define G_FW_FILTER_WR_IVLAN_VLDM(x) \ 288 + (((x) >> S_FW_FILTER_WR_IVLAN_VLDM) & M_FW_FILTER_WR_IVLAN_VLDM) 289 + #define F_FW_FILTER_WR_IVLAN_VLDM V_FW_FILTER_WR_IVLAN_VLDM(1U) 290 + 291 + #define S_FW_FILTER_WR_OVLAN_VLDM 2 292 + #define M_FW_FILTER_WR_OVLAN_VLDM 0x1 293 + #define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM) 294 + #define G_FW_FILTER_WR_OVLAN_VLDM(x) \ 295 + (((x) >> S_FW_FILTER_WR_OVLAN_VLDM) & M_FW_FILTER_WR_OVLAN_VLDM) 296 + #define F_FW_FILTER_WR_OVLAN_VLDM V_FW_FILTER_WR_OVLAN_VLDM(1U) 297 + 298 + #define S_FW_FILTER_WR_RX_CHAN 15 299 + #define M_FW_FILTER_WR_RX_CHAN 0x1 300 + #define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN) 301 + #define G_FW_FILTER_WR_RX_CHAN(x) \ 302 + (((x) >> S_FW_FILTER_WR_RX_CHAN) & M_FW_FILTER_WR_RX_CHAN) 303 + #define F_FW_FILTER_WR_RX_CHAN V_FW_FILTER_WR_RX_CHAN(1U) 304 + 305 + #define S_FW_FILTER_WR_RX_RPL_IQ 0 306 + #define M_FW_FILTER_WR_RX_RPL_IQ 0x3ff 307 + #define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ) 308 + #define G_FW_FILTER_WR_RX_RPL_IQ(x) \ 309 + (((x) >> S_FW_FILTER_WR_RX_RPL_IQ) & M_FW_FILTER_WR_RX_RPL_IQ) 310 + 311 + #define S_FW_FILTER_WR_MACI 23 312 + #define M_FW_FILTER_WR_MACI 0x1ff 313 + #define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI) 314 + #define G_FW_FILTER_WR_MACI(x) \ 315 + (((x) >> S_FW_FILTER_WR_MACI) & M_FW_FILTER_WR_MACI) 316 + 317 + #define S_FW_FILTER_WR_MACIM 14 318 + #define M_FW_FILTER_WR_MACIM 0x1ff 319 + #define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM) 320 + #define G_FW_FILTER_WR_MACIM(x) \ 321 + (((x) >> S_FW_FILTER_WR_MACIM) & M_FW_FILTER_WR_MACIM) 322 + 323 + #define S_FW_FILTER_WR_FCOE 13 324 + #define M_FW_FILTER_WR_FCOE 0x1 325 + #define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE) 326 + #define G_FW_FILTER_WR_FCOE(x) \ 327 + (((x) >> S_FW_FILTER_WR_FCOE) & M_FW_FILTER_WR_FCOE) 328 + #define F_FW_FILTER_WR_FCOE V_FW_FILTER_WR_FCOE(1U) 329 + 330 + #define S_FW_FILTER_WR_FCOEM 12 331 + #define M_FW_FILTER_WR_FCOEM 0x1 332 + #define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM) 333 + #define G_FW_FILTER_WR_FCOEM(x) \ 334 + (((x) >> S_FW_FILTER_WR_FCOEM) & M_FW_FILTER_WR_FCOEM) 335 + #define F_FW_FILTER_WR_FCOEM V_FW_FILTER_WR_FCOEM(1U) 336 + 337 + #define S_FW_FILTER_WR_PORT 9 338 + #define M_FW_FILTER_WR_PORT 0x7 339 + #define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT) 340 + #define G_FW_FILTER_WR_PORT(x) \ 341 + (((x) >> S_FW_FILTER_WR_PORT) & M_FW_FILTER_WR_PORT) 342 + 343 + #define S_FW_FILTER_WR_PORTM 6 344 + #define M_FW_FILTER_WR_PORTM 0x7 345 + #define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM) 346 + #define G_FW_FILTER_WR_PORTM(x) \ 347 + (((x) >> S_FW_FILTER_WR_PORTM) & M_FW_FILTER_WR_PORTM) 348 + 349 + #define S_FW_FILTER_WR_MATCHTYPE 3 350 + #define M_FW_FILTER_WR_MATCHTYPE 0x7 351 + #define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE) 352 + #define G_FW_FILTER_WR_MATCHTYPE(x) \ 353 + (((x) >> S_FW_FILTER_WR_MATCHTYPE) & M_FW_FILTER_WR_MATCHTYPE) 354 + 355 + #define S_FW_FILTER_WR_MATCHTYPEM 0 356 + #define M_FW_FILTER_WR_MATCHTYPEM 0x7 357 + #define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM) 358 + #define G_FW_FILTER_WR_MATCHTYPEM(x) \ 359 + (((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM) 124 360 125 361 struct fw_ulptx_wr { 126 362 __be32 op_to_compl; ··· 415 99 __be32 equiq_to_len16; 416 100 __be64 r3; 417 101 }; 102 + 103 + struct fw_ofld_connection_wr { 104 + __be32 op_compl; 105 + __be32 len16_pkd; 106 + __u64 cookie; 107 + __be64 r2; 108 + __be64 r3; 109 + struct fw_ofld_connection_le { 110 + __be32 version_cpl; 111 + __be32 filter; 112 + __be32 r1; 113 + __be16 lport; 114 + __be16 pport; 115 + union fw_ofld_connection_leip { 116 + struct fw_ofld_connection_le_ipv4 { 117 + __be32 pip; 118 + __be32 lip; 119 + __be64 r0; 120 + __be64 r1; 121 + __be64 r2; 122 + } ipv4; 123 + struct fw_ofld_connection_le_ipv6 { 124 + __be64 pip_hi; 125 + __be64 pip_lo; 126 + __be64 lip_hi; 127 + __be64 lip_lo; 128 + } ipv6; 129 + } u; 130 + } le; 131 + struct fw_ofld_connection_tcb { 132 + __be32 t_state_to_astid; 133 + __be16 cplrxdataack_cplpassacceptrpl; 134 + __be16 rcv_adv; 135 + __be32 rcv_nxt; 136 + __be32 tx_max; 137 + __be64 opt0; 138 + __be32 opt2; 139 + __be32 r1; 140 + __be64 r2; 141 + __be64 r3; 142 + } tcb; 143 + }; 144 + 145 + #define S_FW_OFLD_CONNECTION_WR_VERSION 31 146 + #define M_FW_OFLD_CONNECTION_WR_VERSION 0x1 147 + #define V_FW_OFLD_CONNECTION_WR_VERSION(x) \ 148 + ((x) << S_FW_OFLD_CONNECTION_WR_VERSION) 149 + #define G_FW_OFLD_CONNECTION_WR_VERSION(x) \ 150 + (((x) >> S_FW_OFLD_CONNECTION_WR_VERSION) & \ 151 + M_FW_OFLD_CONNECTION_WR_VERSION) 152 + #define F_FW_OFLD_CONNECTION_WR_VERSION \ 153 + V_FW_OFLD_CONNECTION_WR_VERSION(1U) 154 + 155 + #define S_FW_OFLD_CONNECTION_WR_CPL 30 156 + #define M_FW_OFLD_CONNECTION_WR_CPL 0x1 157 + #define V_FW_OFLD_CONNECTION_WR_CPL(x) ((x) << S_FW_OFLD_CONNECTION_WR_CPL) 158 + #define G_FW_OFLD_CONNECTION_WR_CPL(x) \ 159 + (((x) >> S_FW_OFLD_CONNECTION_WR_CPL) & M_FW_OFLD_CONNECTION_WR_CPL) 160 + #define F_FW_OFLD_CONNECTION_WR_CPL V_FW_OFLD_CONNECTION_WR_CPL(1U) 161 + 162 + #define S_FW_OFLD_CONNECTION_WR_T_STATE 28 163 + #define M_FW_OFLD_CONNECTION_WR_T_STATE 0xf 164 + #define V_FW_OFLD_CONNECTION_WR_T_STATE(x) \ 165 + ((x) << S_FW_OFLD_CONNECTION_WR_T_STATE) 166 + #define G_FW_OFLD_CONNECTION_WR_T_STATE(x) \ 167 + (((x) >> S_FW_OFLD_CONNECTION_WR_T_STATE) & \ 168 + M_FW_OFLD_CONNECTION_WR_T_STATE) 169 + 170 + #define S_FW_OFLD_CONNECTION_WR_RCV_SCALE 24 171 + #define M_FW_OFLD_CONNECTION_WR_RCV_SCALE 0xf 172 + #define V_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ 173 + ((x) << S_FW_OFLD_CONNECTION_WR_RCV_SCALE) 174 + #define G_FW_OFLD_CONNECTION_WR_RCV_SCALE(x) \ 175 + (((x) >> S_FW_OFLD_CONNECTION_WR_RCV_SCALE) & \ 176 + M_FW_OFLD_CONNECTION_WR_RCV_SCALE) 177 + 178 + #define S_FW_OFLD_CONNECTION_WR_ASTID 0 179 + #define M_FW_OFLD_CONNECTION_WR_ASTID 0xffffff 180 + #define V_FW_OFLD_CONNECTION_WR_ASTID(x) \ 181 + ((x) << S_FW_OFLD_CONNECTION_WR_ASTID) 182 + #define G_FW_OFLD_CONNECTION_WR_ASTID(x) \ 183 + (((x) >> S_FW_OFLD_CONNECTION_WR_ASTID) & M_FW_OFLD_CONNECTION_WR_ASTID) 184 + 185 + #define S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 15 186 + #define M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK 0x1 187 + #define V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ 188 + ((x) << S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) 189 + #define G_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(x) \ 190 + (((x) >> S_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) & \ 191 + M_FW_OFLD_CONNECTION_WR_CPLRXDATAACK) 192 + #define F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK \ 193 + V_FW_OFLD_CONNECTION_WR_CPLRXDATAACK(1U) 194 + 195 + #define S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 14 196 + #define M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL 0x1 197 + #define V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ 198 + ((x) << S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) 199 + #define G_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(x) \ 200 + (((x) >> S_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) & \ 201 + M_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL) 202 + #define F_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL \ 203 + V_FW_OFLD_CONNECTION_WR_CPLPASSACCEPTRPL(1U) 418 204 419 205 enum fw_flowc_mnem { 420 206 FW_FLOWC_MNEM_PFNVFN, /* PFN [15:8] VFN [7:0] */
+12 -3
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 1338 1338 { 1339 1339 struct mlx4_cmd_mailbox *mailbox; 1340 1340 __be32 *outbox; 1341 + u32 dword_field; 1341 1342 int err; 1342 1343 u8 byte_field; 1343 1344 ··· 1373 1372 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1374 1373 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1375 1374 1375 + MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET); 1376 + if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) { 1377 + param->steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1378 + } else { 1379 + MLX4_GET(byte_field, outbox, INIT_HCA_UC_STEERING_OFFSET); 1380 + if (byte_field & 0x8) 1381 + param->steering_mode = MLX4_STEERING_MODE_B0; 1382 + else 1383 + param->steering_mode = MLX4_STEERING_MODE_A0; 1384 + } 1376 1385 /* steering attributes */ 1377 - if (dev->caps.steering_mode == 1378 - MLX4_STEERING_MODE_DEVICE_MANAGED) { 1379 - 1386 + if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 1380 1387 MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); 1381 1388 MLX4_GET(param->log_mc_entry_sz, outbox, 1382 1389 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+1
drivers/net/ethernet/mellanox/mlx4/fw.h
··· 172 172 u8 log_uar_sz; 173 173 u8 uar_page_sz; /* log pg sz in 4k chunks */ 174 174 u8 fs_hash_enable_bits; 175 + u8 steering_mode; /* for QUERY_HCA */ 175 176 u64 dev_cap_enabled; 176 177 }; 177 178
+89 -26
drivers/net/ethernet/mellanox/mlx4/main.c
··· 85 85 module_param(probe_vf, int, 0644); 86 86 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 87 87 88 - int mlx4_log_num_mgm_entry_size = 10; 88 + int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 89 89 module_param_named(log_num_mgm_entry_size, 90 90 mlx4_log_num_mgm_entry_size, int, 0444); 91 91 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 92 92 " of qp per mcg, for example:" 93 - " 10 gives 248.range: 9<=" 93 + " 10 gives 248.range: 7 <=" 94 94 " log_num_mgm_entry_size <= 12." 95 - " Not in use with device managed" 96 - " flow steering"); 95 + " To activate device managed" 96 + " flow steering when available, set to -1"); 97 97 98 98 static bool enable_64b_cqe_eqe; 99 99 module_param(enable_64b_cqe_eqe, bool, 0444); ··· 280 280 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 281 281 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 282 282 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 283 - 284 - if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 285 - dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 286 - dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 287 - dev->caps.fs_log_max_ucast_qp_range_size = 288 - dev_cap->fs_log_max_ucast_qp_range_size; 289 - } else { 290 - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 291 - dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) { 292 - dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 293 - } else { 294 - dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 295 - 296 - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 297 - dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 298 - mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags " 299 - "set to use B0 steering. Falling back to A0 steering mode.\n"); 300 - } 301 - dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 302 - } 303 - mlx4_dbg(dev, "Steering mode is: %s\n", 304 - mlx4_steering_mode_str(dev->caps.steering_mode)); 305 283 306 284 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 307 285 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) ··· 471 493 } 472 494 EXPORT_SYMBOL(mlx4_is_slave_active); 473 495 496 + static void slave_adjust_steering_mode(struct mlx4_dev *dev, 497 + struct mlx4_dev_cap *dev_cap, 498 + struct mlx4_init_hca_param *hca_param) 499 + { 500 + dev->caps.steering_mode = hca_param->steering_mode; 501 + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 502 + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 503 + dev->caps.fs_log_max_ucast_qp_range_size = 504 + dev_cap->fs_log_max_ucast_qp_range_size; 505 + } else 506 + dev->caps.num_qp_per_mgm = 507 + 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 508 + 509 + mlx4_dbg(dev, "Steering mode is: %s\n", 510 + mlx4_steering_mode_str(dev->caps.steering_mode)); 511 + } 512 + 474 513 static int mlx4_slave_cap(struct mlx4_dev *dev) 475 514 { 476 515 int err; ··· 629 634 } else { 630 635 dev->caps.cqe_size = 32; 631 636 } 637 + 638 + slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 632 639 633 640 return 0; 634 641 ··· 1318 1321 } 1319 1322 } 1320 1323 1324 + static int choose_log_fs_mgm_entry_size(int qp_per_entry) 1325 + { 1326 + int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 1327 + 1328 + for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 1329 + i++) { 1330 + if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 1331 + break; 1332 + } 1333 + 1334 + return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 1335 + } 1336 + 1337 + static void choose_steering_mode(struct mlx4_dev *dev, 1338 + struct mlx4_dev_cap *dev_cap) 1339 + { 1340 + if (mlx4_log_num_mgm_entry_size == -1 && 1341 + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1342 + (!mlx4_is_mfunc(dev) || 1343 + (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && 1344 + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1345 + MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1346 + dev->oper_log_mgm_entry_size = 1347 + choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 1348 + dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 1349 + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 1350 + dev->caps.fs_log_max_ucast_qp_range_size = 1351 + dev_cap->fs_log_max_ucast_qp_range_size; 1352 + } else { 1353 + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 1354 + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1355 + dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 1356 + else { 1357 + dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 1358 + 1359 + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 1360 + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 1361 + mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags " 1362 + "set to use B0 steering. Falling back to A0 steering mode.\n"); 1363 + } 1364 + dev->oper_log_mgm_entry_size = 1365 + mlx4_log_num_mgm_entry_size > 0 ? 1366 + mlx4_log_num_mgm_entry_size : 1367 + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 1368 + dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 1369 + } 1370 + mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, " 1371 + "modparam log_num_mgm_entry_size = %d\n", 1372 + mlx4_steering_mode_str(dev->caps.steering_mode), 1373 + dev->oper_log_mgm_entry_size, 1374 + mlx4_log_num_mgm_entry_size); 1375 + } 1376 + 1321 1377 static int mlx4_init_hca(struct mlx4_dev *dev) 1322 1378 { 1323 1379 struct mlx4_priv *priv = mlx4_priv(dev); ··· 1409 1359 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1410 1360 goto err_stop_fw; 1411 1361 } 1362 + 1363 + choose_steering_mode(dev, &dev_cap); 1412 1364 1413 1365 if (mlx4_is_master(dev)) 1414 1366 mlx4_parav_master_pf_caps(dev); ··· 2502 2450 if (port_type_array[0] == false && port_type_array[1] == true) { 2503 2451 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2504 2452 port_type_array[0] = true; 2453 + } 2454 + 2455 + if (mlx4_log_num_mgm_entry_size != -1 && 2456 + (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 2457 + mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) { 2458 + pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not " 2459 + "in legal range (-1 or %d..%d)\n", 2460 + mlx4_log_num_mgm_entry_size, 2461 + MLX4_MIN_MGM_LOG_ENTRY_SIZE, 2462 + MLX4_MAX_MGM_LOG_ENTRY_SIZE); 2463 + return -1; 2505 2464 } 2506 2465 2507 2466 return 0;
+1 -6
drivers/net/ethernet/mellanox/mlx4/mcg.c
··· 54 54 55 55 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 56 56 { 57 - if (dev->caps.steering_mode == 58 - MLX4_STEERING_MODE_DEVICE_MANAGED) 59 - return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE; 60 - else 61 - return min((1 << mlx4_log_num_mgm_entry_size), 62 - MLX4_MAX_MGM_ENTRY_SIZE); 57 + return 1 << dev->oper_log_mgm_entry_size; 63 58 } 64 59 65 60 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+4 -2
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 94 94 }; 95 95 96 96 enum { 97 - MLX4_MAX_MGM_ENTRY_SIZE = 0x1000, 98 - MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2), 97 + MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE = 10, 98 + MLX4_MIN_MGM_LOG_ENTRY_SIZE = 7, 99 + MLX4_MAX_MGM_LOG_ENTRY_SIZE = 12, 100 + MLX4_MAX_QP_PER_MGM = 4 * ((1 << MLX4_MAX_MGM_LOG_ENTRY_SIZE) / 16 - 2), 99 101 MLX4_MTT_ENTRY_PER_SEG = 8, 100 102 }; 101 103
+21 -7
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 3071 3071 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 3072 3072 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; 3073 3073 int err; 3074 + int qpn; 3074 3075 struct mlx4_net_trans_rule_hw_ctrl *ctrl; 3075 3076 struct _rule_hw *rule_header; 3076 3077 int header_id; ··· 3081 3080 return -EOPNOTSUPP; 3082 3081 3083 3082 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3083 + qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3084 + err = get_res(dev, slave, qpn, RES_QP, NULL); 3085 + if (err) { 3086 + pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); 3087 + return err; 3088 + } 3084 3089 rule_header = (struct _rule_hw *)(ctrl + 1); 3085 3090 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); 3086 3091 3087 3092 switch (header_id) { 3088 3093 case MLX4_NET_TRANS_RULE_ID_ETH: 3089 - if (validate_eth_header_mac(slave, rule_header, rlist)) 3090 - return -EINVAL; 3094 + if (validate_eth_header_mac(slave, rule_header, rlist)) { 3095 + err = -EINVAL; 3096 + goto err_put; 3097 + } 3091 3098 break; 3092 3099 case MLX4_NET_TRANS_RULE_ID_IB: 3093 3100 break; ··· 3103 3094 case MLX4_NET_TRANS_RULE_ID_TCP: 3104 3095 case MLX4_NET_TRANS_RULE_ID_UDP: 3105 3096 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); 3106 - if (add_eth_header(dev, slave, inbox, rlist, header_id)) 3107 - return -EINVAL; 3097 + if (add_eth_header(dev, slave, inbox, rlist, header_id)) { 3098 + err = -EINVAL; 3099 + goto err_put; 3100 + } 3108 3101 vhcr->in_modifier += 3109 3102 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; 3110 3103 break; 3111 3104 default: 3112 3105 pr_err("Corrupted mailbox.\n"); 3113 - return -EINVAL; 3106 + err = -EINVAL; 3107 + goto err_put; 3114 3108 } 3115 3109 3116 3110 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, ··· 3121 3109 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3122 3110 MLX4_CMD_NATIVE); 3123 3111 if (err) 3124 - return err; 3112 + goto err_put; 3125 3113 3126 3114 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); 3127 3115 if (err) { 3128 3116 mlx4_err(dev, "Fail to add flow steering resources.\n "); 3129 3117 /* detach rule*/ 3130 3118 mlx4_cmd(dev, vhcr->out_param, 0, 0, 3131 - MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 3119 + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 3132 3120 MLX4_CMD_NATIVE); 3133 3121 } 3122 + err_put: 3123 + put_res(dev, slave, qpn, RES_QP); 3134 3124 return err; 3135 3125 } 3136 3126
-39
drivers/scsi/csiostor/t4fw_api_stor.h
··· 40 40 * R E T U R N V A L U E S 41 41 ********************************/ 42 42 43 - enum fw_retval { 44 - FW_SUCCESS = 0, /* completed sucessfully */ 45 - FW_EPERM = 1, /* operation not permitted */ 46 - FW_ENOENT = 2, /* no such file or directory */ 47 - FW_EIO = 5, /* input/output error; hw bad */ 48 - FW_ENOEXEC = 8, /* exec format error; inv microcode */ 49 - FW_EAGAIN = 11, /* try again */ 50 - FW_ENOMEM = 12, /* out of memory */ 51 - FW_EFAULT = 14, /* bad address; fw bad */ 52 - FW_EBUSY = 16, /* resource busy */ 53 - FW_EEXIST = 17, /* file exists */ 54 - FW_EINVAL = 22, /* invalid argument */ 55 - FW_ENOSPC = 28, /* no space left on device */ 56 - FW_ENOSYS = 38, /* functionality not implemented */ 57 - FW_EPROTO = 71, /* protocol error */ 58 - FW_EADDRINUSE = 98, /* address already in use */ 59 - FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ 60 - FW_ENETDOWN = 100, /* network is down */ 61 - FW_ENETUNREACH = 101, /* network is unreachable */ 62 - FW_ENOBUFS = 105, /* no buffer space available */ 63 - FW_ETIMEDOUT = 110, /* timeout */ 64 - FW_EINPROGRESS = 115, /* fw internal */ 65 - FW_SCSI_ABORT_REQUESTED = 128, /* */ 66 - FW_SCSI_ABORT_TIMEDOUT = 129, /* */ 67 - FW_SCSI_ABORTED = 130, /* */ 68 - FW_SCSI_CLOSE_REQUESTED = 131, /* */ 69 - FW_ERR_LINK_DOWN = 132, /* */ 70 - FW_RDEV_NOT_READY = 133, /* */ 71 - FW_ERR_RDEV_LOST = 134, /* */ 72 - FW_ERR_RDEV_LOGO = 135, /* */ 73 - FW_FCOE_NO_XCHG = 136, /* */ 74 - FW_SCSI_RSP_ERR = 137, /* */ 75 - FW_ERR_RDEV_IMPL_LOGO = 138, /* */ 76 - FW_SCSI_UNDER_FLOW_ERR = 139, /* */ 77 - FW_SCSI_OVER_FLOW_ERR = 140, /* */ 78 - FW_SCSI_DDP_ERR = 141, /* DDP error*/ 79 - FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */ 80 - }; 81 - 82 43 enum fw_fcoe_link_sub_op { 83 44 FCOE_LINK_DOWN = 0x0, 84 45 FCOE_LINK_UP = 0x1,
+1
include/linux/mlx4/device.h
··· 625 625 u8 rev_id; 626 626 char board_id[MLX4_BOARD_ID_LEN]; 627 627 int num_vfs; 628 + int oper_log_mgm_entry_size; 628 629 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 629 630 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 630 631 };