Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed: update series

This patch series tries to improve general configuration by changing
configuration to better suit B0 boards and allow more available
resources to each physical function.
In additition, it contains some small fixes and semantic changes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+441 -331
-3
drivers/net/ethernet/qlogic/qed/qed.h
··· 146 146 u16 ovlan; 147 147 u32 part_num[4]; 148 148 149 - u32 vendor_id; 150 - u32 device_id; 151 - 152 149 unsigned char hw_mac_addr[ETH_ALEN]; 153 150 154 151 struct qed_igu_info *p_igu_info;
+102 -321
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 32 32 #include "qed_sp.h" 33 33 34 34 /* API common to all protocols */ 35 + enum BAR_ID { 36 + BAR_ID_0, /* used for GRC */ 37 + BAR_ID_1 /* Used for doorbells */ 38 + }; 39 + 40 + static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 41 + enum BAR_ID bar_id) 42 + { 43 + u32 bar_reg = (bar_id == BAR_ID_0 ? 44 + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); 45 + u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 46 + 47 + if (val) 48 + return 1 << (val + 15); 49 + 50 + /* Old MFW initialized above registered only conditionally */ 51 + if (p_hwfn->cdev->num_hwfns > 1) { 52 + DP_INFO(p_hwfn, 53 + "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); 54 + return BAR_ID_0 ? 256 * 1024 : 512 * 1024; 55 + } else { 56 + DP_INFO(p_hwfn, 57 + "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); 58 + return 512 * 1024; 59 + } 60 + } 61 + 35 62 void qed_init_dp(struct qed_dev *cdev, 36 63 u32 dp_module, u8 dp_level) 37 64 { ··· 420 393 { 421 394 int hw_mode = 0; 422 395 423 - hw_mode = (1 << MODE_BB_A0); 396 + hw_mode = (1 << MODE_BB_B0); 424 397 425 398 switch (p_hwfn->cdev->num_ports_in_engines) { 426 399 case 1: ··· 677 650 bool allow_npar_tx_switch, 678 651 const u8 *bin_fw_data) 679 652 { 680 - struct qed_storm_stats *p_stat; 681 - u32 load_code, param, *p_address; 653 + u32 load_code, param; 682 654 int rc, mfw_rc, i; 683 - u8 fw_vport = 0; 684 655 685 656 rc = qed_init_fw_data(cdev, bin_fw_data); 686 657 if (rc != 0) ··· 686 661 687 662 for_each_hwfn(cdev, i) { 688 663 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 689 - 690 - rc = qed_fw_vport(p_hwfn, 0, &fw_vport); 691 - if (rc != 0) 692 - return rc; 693 664 694 665 /* Enable DMAE in PXP */ 695 666 rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); ··· 750 729 } 751 730 752 731 p_hwfn->hw_init_done = true; 753 - 754 - /* init PF stats */ 755 - p_stat = &p_hwfn->storm_stats; 756 - p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM + 757 - MSTORM_QUEUE_STAT_OFFSET(fw_vport); 758 - p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); 759 - 760 - p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM + 761 - USTORM_QUEUE_STAT_OFFSET(fw_vport); 762 - p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); 763 - 764 - p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM + 765 - PSTORM_QUEUE_STAT_OFFSET(fw_vport); 766 - p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); 767 - 768 - p_address = &p_stat->tstats.address; 769 - *p_address = BAR0_MAP_REG_TSDM_RAM + 770 - TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 771 - p_stat->tstats.len = sizeof(struct tstorm_per_port_stat); 772 732 } 773 733 774 734 return 0; 775 735 } 776 736 777 737 #define QED_HW_STOP_RETRY_LIMIT (10) 738 + static inline void qed_hw_timers_stop(struct qed_dev *cdev, 739 + struct qed_hwfn *p_hwfn, 740 + struct qed_ptt *p_ptt) 741 + { 742 + int i; 743 + 744 + /* close timers */ 745 + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 746 + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 747 + 748 + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 749 + if ((!qed_rd(p_hwfn, p_ptt, 750 + TM_REG_PF_SCAN_ACTIVE_CONN)) && 751 + (!qed_rd(p_hwfn, p_ptt, 752 + TM_REG_PF_SCAN_ACTIVE_TASK))) 753 + break; 754 + 755 + /* Dependent on number of connection/tasks, possibly 756 + * 1ms sleep is required between polls 757 + */ 758 + usleep_range(1000, 2000); 759 + } 760 + 761 + if (i < QED_HW_STOP_RETRY_LIMIT) 762 + return; 763 + 764 + DP_NOTICE(p_hwfn, 765 + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 766 + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), 767 + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); 768 + } 769 + 770 + void qed_hw_timers_stop_all(struct qed_dev *cdev) 771 + { 772 + int j; 773 + 774 + for_each_hwfn(cdev, j) { 775 + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; 776 + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; 777 + 778 + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 779 + } 780 + } 781 + 778 782 int qed_hw_stop(struct qed_dev *cdev) 779 783 { 780 784 int rc = 0, t_rc; 781 - int i, j; 785 + int j; 782 786 783 787 for_each_hwfn(cdev, j) { 784 788 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; ··· 816 770 817 771 rc = qed_sp_pf_stop(p_hwfn); 818 772 if (rc) 819 - return rc; 773 + DP_NOTICE(p_hwfn, 774 + "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n"); 820 775 821 776 qed_wr(p_hwfn, p_ptt, 822 777 NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); ··· 828 781 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 829 782 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 830 783 831 - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 832 - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 833 - for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 834 - if ((!qed_rd(p_hwfn, p_ptt, 835 - TM_REG_PF_SCAN_ACTIVE_CONN)) && 836 - (!qed_rd(p_hwfn, p_ptt, 837 - TM_REG_PF_SCAN_ACTIVE_TASK))) 838 - break; 839 - 840 - usleep_range(1000, 2000); 841 - } 842 - if (i == QED_HW_STOP_RETRY_LIMIT) 843 - DP_NOTICE(p_hwfn, 844 - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 845 - (u8)qed_rd(p_hwfn, p_ptt, 846 - TM_REG_PF_SCAN_ACTIVE_CONN), 847 - (u8)qed_rd(p_hwfn, p_ptt, 848 - TM_REG_PF_SCAN_ACTIVE_TASK)); 784 + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); 849 785 850 786 /* Disable Attention Generation */ 851 787 qed_int_igu_disable_int(p_hwfn, p_ptt); ··· 857 827 858 828 void qed_hw_stop_fastpath(struct qed_dev *cdev) 859 829 { 860 - int i, j; 830 + int j; 861 831 862 832 for_each_hwfn(cdev, j) { 863 833 struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; ··· 875 845 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); 876 846 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); 877 847 qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); 878 - 879 - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); 880 - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); 881 - for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { 882 - if ((!qed_rd(p_hwfn, p_ptt, 883 - TM_REG_PF_SCAN_ACTIVE_CONN)) && 884 - (!qed_rd(p_hwfn, p_ptt, 885 - TM_REG_PF_SCAN_ACTIVE_TASK))) 886 - break; 887 - 888 - usleep_range(1000, 2000); 889 - } 890 - if (i == QED_HW_STOP_RETRY_LIMIT) 891 - DP_NOTICE(p_hwfn, 892 - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", 893 - (u8)qed_rd(p_hwfn, p_ptt, 894 - TM_REG_PF_SCAN_ACTIVE_CONN), 895 - (u8)qed_rd(p_hwfn, p_ptt, 896 - TM_REG_PF_SCAN_ACTIVE_TASK)); 897 848 898 849 qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); 899 850 ··· 960 949 } 961 950 962 951 /* Setup bar access */ 963 - static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 952 + static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) 964 953 { 965 - int rc; 966 - 967 - /* Allocate PTT pool */ 968 - rc = qed_ptt_pool_alloc(p_hwfn); 969 - if (rc) 970 - return rc; 971 - 972 - /* Allocate the main PTT */ 973 - p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 974 - 975 954 /* clear indirect access */ 976 955 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); 977 956 qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); ··· 976 975 /* enable internal target-read */ 977 976 qed_wr(p_hwfn, p_hwfn->p_main_ptt, 978 977 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); 979 - 980 - return 0; 981 978 } 982 979 983 980 static void get_function_id(struct qed_hwfn *p_hwfn) ··· 1082 1083 1083 1084 /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ 1084 1085 nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); 1085 - 1086 - /* Read Vendor Id / Device Id */ 1087 - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 1088 - offsetof(struct nvm_cfg1, glob) + 1089 - offsetof(struct nvm_cfg1_glob, pci_id); 1090 - p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) & 1091 - NVM_CFG1_GLOB_VENDOR_ID_MASK; 1092 1086 1093 1087 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 1094 1088 offsetof(struct nvm_cfg1, glob) + ··· 1276 1284 return rc; 1277 1285 } 1278 1286 1279 - static void qed_get_dev_info(struct qed_dev *cdev) 1287 + static int qed_get_dev_info(struct qed_dev *cdev) 1280 1288 { 1281 1289 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 1282 1290 u32 tmp; ··· 1315 1323 "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", 1316 1324 cdev->chip_num, cdev->chip_rev, 1317 1325 cdev->chip_bond_id, cdev->chip_metal); 1326 + 1327 + if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { 1328 + DP_NOTICE(cdev->hwfns, 1329 + "The chip type/rev (BB A0) is not supported!\n"); 1330 + return -EINVAL; 1331 + } 1332 + 1333 + return 0; 1318 1334 } 1319 1335 1320 1336 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, ··· 1345 1345 1346 1346 get_function_id(p_hwfn); 1347 1347 1348 - rc = qed_hw_hwfn_prepare(p_hwfn); 1348 + /* Allocate PTT pool */ 1349 + rc = qed_ptt_pool_alloc(p_hwfn); 1349 1350 if (rc) { 1350 1351 DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); 1351 1352 goto err0; 1352 1353 } 1353 1354 1355 + /* Allocate the main PTT */ 1356 + p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); 1357 + 1354 1358 /* First hwfn learns basic information, e.g., number of hwfns */ 1355 - if (!p_hwfn->my_id) 1356 - qed_get_dev_info(p_hwfn->cdev); 1359 + if (!p_hwfn->my_id) { 1360 + rc = qed_get_dev_info(p_hwfn->cdev); 1361 + if (rc != 0) 1362 + goto err1; 1363 + } 1364 + 1365 + qed_hw_hwfn_prepare(p_hwfn); 1357 1366 1358 1367 /* Initialize MCP structure */ 1359 1368 rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); ··· 1394 1385 return rc; 1395 1386 } 1396 1387 1397 - static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, 1398 - u8 bar_id) 1399 - { 1400 - u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE 1401 - : PGLUE_B_REG_PF_BAR1_SIZE); 1402 - u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); 1403 - 1404 - /* Get the BAR size(in KB) from hardware given val */ 1405 - return 1 << (val + 15); 1406 - } 1407 - 1408 1388 int qed_hw_prepare(struct qed_dev *cdev, 1409 1389 int personality) 1410 1390 { ··· 1418 1420 u8 __iomem *addr; 1419 1421 1420 1422 /* adjust bar offset for second engine */ 1421 - addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; 1423 + addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2; 1422 1424 p_regview = addr; 1423 1425 1424 1426 /* adjust doorbell bar offset for second engine */ 1425 - addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; 1427 + addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2; 1426 1428 p_doorbell = addr; 1427 1429 1428 1430 /* prepare second hw function */ ··· 1532 1534 dma_free_coherent(&cdev->pdev->dev, size, 1533 1535 p_chain->p_virt_addr, 1534 1536 p_chain->p_phys_addr); 1535 - } 1536 - 1537 - static void __qed_get_vport_stats(struct qed_dev *cdev, 1538 - struct qed_eth_stats *stats) 1539 - { 1540 - int i, j; 1541 - 1542 - memset(stats, 0, sizeof(*stats)); 1543 - 1544 - for_each_hwfn(cdev, i) { 1545 - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1546 - struct eth_mstorm_per_queue_stat mstats; 1547 - struct eth_ustorm_per_queue_stat ustats; 1548 - struct eth_pstorm_per_queue_stat pstats; 1549 - struct tstorm_per_port_stat tstats; 1550 - struct port_stats port_stats; 1551 - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 1552 - 1553 - if (!p_ptt) { 1554 - DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1555 - continue; 1556 - } 1557 - 1558 - memset(&mstats, 0, sizeof(mstats)); 1559 - qed_memcpy_from(p_hwfn, p_ptt, &mstats, 1560 - p_hwfn->storm_stats.mstats.address, 1561 - p_hwfn->storm_stats.mstats.len); 1562 - 1563 - memset(&ustats, 0, sizeof(ustats)); 1564 - qed_memcpy_from(p_hwfn, p_ptt, &ustats, 1565 - p_hwfn->storm_stats.ustats.address, 1566 - p_hwfn->storm_stats.ustats.len); 1567 - 1568 - memset(&pstats, 0, sizeof(pstats)); 1569 - qed_memcpy_from(p_hwfn, p_ptt, &pstats, 1570 - p_hwfn->storm_stats.pstats.address, 1571 - p_hwfn->storm_stats.pstats.len); 1572 - 1573 - memset(&tstats, 0, sizeof(tstats)); 1574 - qed_memcpy_from(p_hwfn, p_ptt, &tstats, 1575 - p_hwfn->storm_stats.tstats.address, 1576 - p_hwfn->storm_stats.tstats.len); 1577 - 1578 - memset(&port_stats, 0, sizeof(port_stats)); 1579 - 1580 - if (p_hwfn->mcp_info) 1581 - qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1582 - p_hwfn->mcp_info->port_addr + 1583 - offsetof(struct public_port, stats), 1584 - sizeof(port_stats)); 1585 - qed_ptt_release(p_hwfn, p_ptt); 1586 - 1587 - stats->no_buff_discards += 1588 - HILO_64_REGPAIR(mstats.no_buff_discard); 1589 - stats->packet_too_big_discard += 1590 - HILO_64_REGPAIR(mstats.packet_too_big_discard); 1591 - stats->ttl0_discard += 1592 - HILO_64_REGPAIR(mstats.ttl0_discard); 1593 - stats->tpa_coalesced_pkts += 1594 - HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1595 - stats->tpa_coalesced_events += 1596 - HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1597 - stats->tpa_aborts_num += 1598 - HILO_64_REGPAIR(mstats.tpa_aborts_num); 1599 - stats->tpa_coalesced_bytes += 1600 - HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1601 - 1602 - stats->rx_ucast_bytes += 1603 - HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1604 - stats->rx_mcast_bytes += 1605 - HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1606 - stats->rx_bcast_bytes += 1607 - HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1608 - stats->rx_ucast_pkts += 1609 - HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1610 - stats->rx_mcast_pkts += 1611 - HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1612 - stats->rx_bcast_pkts += 1613 - HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1614 - 1615 - stats->mftag_filter_discards += 1616 - HILO_64_REGPAIR(tstats.mftag_filter_discard); 1617 - stats->mac_filter_discards += 1618 - HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1619 - 1620 - stats->tx_ucast_bytes += 1621 - HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1622 - stats->tx_mcast_bytes += 1623 - HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1624 - stats->tx_bcast_bytes += 1625 - HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1626 - stats->tx_ucast_pkts += 1627 - HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1628 - stats->tx_mcast_pkts += 1629 - HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1630 - stats->tx_bcast_pkts += 1631 - HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1632 - stats->tx_err_drop_pkts += 1633 - HILO_64_REGPAIR(pstats.error_drop_pkts); 1634 - stats->rx_64_byte_packets += port_stats.pmm.r64; 1635 - stats->rx_127_byte_packets += port_stats.pmm.r127; 1636 - stats->rx_255_byte_packets += port_stats.pmm.r255; 1637 - stats->rx_511_byte_packets += port_stats.pmm.r511; 1638 - stats->rx_1023_byte_packets += port_stats.pmm.r1023; 1639 - stats->rx_1518_byte_packets += port_stats.pmm.r1518; 1640 - stats->rx_1522_byte_packets += port_stats.pmm.r1522; 1641 - stats->rx_2047_byte_packets += port_stats.pmm.r2047; 1642 - stats->rx_4095_byte_packets += port_stats.pmm.r4095; 1643 - stats->rx_9216_byte_packets += port_stats.pmm.r9216; 1644 - stats->rx_16383_byte_packets += port_stats.pmm.r16383; 1645 - stats->rx_crc_errors += port_stats.pmm.rfcs; 1646 - stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; 1647 - stats->rx_pause_frames += port_stats.pmm.rxpf; 1648 - stats->rx_pfc_frames += port_stats.pmm.rxpp; 1649 - stats->rx_align_errors += port_stats.pmm.raln; 1650 - stats->rx_carrier_errors += port_stats.pmm.rfcr; 1651 - stats->rx_oversize_packets += port_stats.pmm.rovr; 1652 - stats->rx_jabbers += port_stats.pmm.rjbr; 1653 - stats->rx_undersize_packets += port_stats.pmm.rund; 1654 - stats->rx_fragments += port_stats.pmm.rfrg; 1655 - stats->tx_64_byte_packets += port_stats.pmm.t64; 1656 - stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; 1657 - stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; 1658 - stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; 1659 - stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; 1660 - stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; 1661 - stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; 1662 - stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; 1663 - stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; 1664 - stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; 1665 - stats->tx_pause_frames += port_stats.pmm.txpf; 1666 - stats->tx_pfc_frames += port_stats.pmm.txpp; 1667 - stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; 1668 - stats->tx_total_collisions += port_stats.pmm.tncl; 1669 - stats->rx_mac_bytes += port_stats.pmm.rbyte; 1670 - stats->rx_mac_uc_packets += port_stats.pmm.rxuca; 1671 - stats->rx_mac_mc_packets += port_stats.pmm.rxmca; 1672 - stats->rx_mac_bc_packets += port_stats.pmm.rxbca; 1673 - stats->rx_mac_frames_ok += port_stats.pmm.rxpok; 1674 - stats->tx_mac_bytes += port_stats.pmm.tbyte; 1675 - stats->tx_mac_uc_packets += port_stats.pmm.txuca; 1676 - stats->tx_mac_mc_packets += port_stats.pmm.txmca; 1677 - stats->tx_mac_bc_packets += port_stats.pmm.txbca; 1678 - stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; 1679 - 1680 - for (j = 0; j < 8; j++) { 1681 - stats->brb_truncates += port_stats.brb.brb_truncate[j]; 1682 - stats->brb_discards += port_stats.brb.brb_discard[j]; 1683 - } 1684 - } 1685 - } 1686 - 1687 - void qed_get_vport_stats(struct qed_dev *cdev, 1688 - struct qed_eth_stats *stats) 1689 - { 1690 - u32 i; 1691 - 1692 - if (!cdev) { 1693 - memset(stats, 0, sizeof(*stats)); 1694 - return; 1695 - } 1696 - 1697 - __qed_get_vport_stats(cdev, stats); 1698 - 1699 - if (!cdev->reset_stats) 1700 - return; 1701 - 1702 - /* Reduce the statistics baseline */ 1703 - for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1704 - ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1705 - } 1706 - 1707 - /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1708 - void qed_reset_vport_stats(struct qed_dev *cdev) 1709 - { 1710 - int i; 1711 - 1712 - for_each_hwfn(cdev, i) { 1713 - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1714 - struct eth_mstorm_per_queue_stat mstats; 1715 - struct eth_ustorm_per_queue_stat ustats; 1716 - struct eth_pstorm_per_queue_stat pstats; 1717 - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 1718 - 1719 - if (!p_ptt) { 1720 - DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1721 - continue; 1722 - } 1723 - 1724 - memset(&mstats, 0, sizeof(mstats)); 1725 - qed_memcpy_to(p_hwfn, p_ptt, 1726 - p_hwfn->storm_stats.mstats.address, 1727 - &mstats, 1728 - p_hwfn->storm_stats.mstats.len); 1729 - 1730 - memset(&ustats, 0, sizeof(ustats)); 1731 - qed_memcpy_to(p_hwfn, p_ptt, 1732 - p_hwfn->storm_stats.ustats.address, 1733 - &ustats, 1734 - p_hwfn->storm_stats.ustats.len); 1735 - 1736 - memset(&pstats, 0, sizeof(pstats)); 1737 - qed_memcpy_to(p_hwfn, p_ptt, 1738 - p_hwfn->storm_stats.pstats.address, 1739 - &pstats, 1740 - p_hwfn->storm_stats.pstats.len); 1741 - 1742 - qed_ptt_release(p_hwfn, p_ptt); 1743 - } 1744 - 1745 - /* PORT statistics are not necessarily reset, so we need to 1746 - * read and create a baseline for future statistics. 1747 - */ 1748 - if (!cdev->reset_stats) 1749 - DP_INFO(cdev, "Reset stats not allocated\n"); 1750 - else 1751 - __qed_get_vport_stats(cdev, cdev->reset_stats); 1752 1537 } 1753 1538 1754 1539 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+9 -2
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
··· 78 78 const u8 *bin_fw_data); 79 79 80 80 /** 81 + * @brief qed_hw_timers_stop_all - stop the timers HW block 82 + * 83 + * @param cdev 84 + * 85 + * @return void 86 + */ 87 + void qed_hw_timers_stop_all(struct qed_dev *cdev); 88 + 89 + /** 81 90 * @brief qed_hw_stop - 82 91 * 83 92 * @param cdev ··· 165 156 */ 166 157 void qed_ptt_release(struct qed_hwfn *p_hwfn, 167 158 struct qed_ptt *p_ptt); 168 - void qed_get_vport_stats(struct qed_dev *cdev, 169 - struct qed_eth_stats *stats); 170 159 void qed_reset_vport_stats(struct qed_dev *cdev); 171 160 172 161 enum qed_dmae_address_type_t {
+1 -1
drivers/net/ethernet/qlogic/qed/qed_hsi.h
··· 968 968 969 969 enum init_modes { 970 970 MODE_BB_A0, 971 - MODE_RESERVED, 971 + MODE_BB_B0, 972 972 MODE_RESERVED2, 973 973 MODE_ASIC, 974 974 MODE_RESERVED3,
+323
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 31 31 #include "qed_hsi.h" 32 32 #include "qed_hw.h" 33 33 #include "qed_int.h" 34 + #include "qed_mcp.h" 34 35 #include "qed_reg_addr.h" 35 36 #include "qed_sp.h" 36 37 ··· 1230 1229 } 1231 1230 1232 1231 return rc; 1232 + } 1233 + 1234 + /* Statistics related code */ 1235 + static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, 1236 + u32 *p_addr, 1237 + u32 *p_len, 1238 + u16 statistics_bin) 1239 + { 1240 + *p_addr = BAR0_MAP_REG_PSDM_RAM + 1241 + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1242 + *p_len = sizeof(struct eth_pstorm_per_queue_stat); 1243 + } 1244 + 1245 + static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, 1246 + struct qed_ptt *p_ptt, 1247 + struct qed_eth_stats *p_stats, 1248 + u16 statistics_bin) 1249 + { 1250 + struct eth_pstorm_per_queue_stat pstats; 1251 + u32 pstats_addr = 0, pstats_len = 0; 1252 + 1253 + __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, 1254 + statistics_bin); 1255 + 1256 + memset(&pstats, 0, sizeof(pstats)); 1257 + qed_memcpy_from(p_hwfn, p_ptt, &pstats, 1258 + pstats_addr, pstats_len); 1259 + 1260 + p_stats->tx_ucast_bytes += 1261 + HILO_64_REGPAIR(pstats.sent_ucast_bytes); 1262 + p_stats->tx_mcast_bytes += 1263 + HILO_64_REGPAIR(pstats.sent_mcast_bytes); 1264 + p_stats->tx_bcast_bytes += 1265 + HILO_64_REGPAIR(pstats.sent_bcast_bytes); 1266 + p_stats->tx_ucast_pkts += 1267 + HILO_64_REGPAIR(pstats.sent_ucast_pkts); 1268 + p_stats->tx_mcast_pkts += 1269 + HILO_64_REGPAIR(pstats.sent_mcast_pkts); 1270 + p_stats->tx_bcast_pkts += 1271 + HILO_64_REGPAIR(pstats.sent_bcast_pkts); 1272 + p_stats->tx_err_drop_pkts += 1273 + HILO_64_REGPAIR(pstats.error_drop_pkts); 1274 + } 1275 + 1276 + static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn, 1277 + u32 *p_addr, 1278 + u32 *p_len) 1279 + { 1280 + *p_addr = BAR0_MAP_REG_TSDM_RAM + 1281 + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); 1282 + *p_len = sizeof(struct tstorm_per_port_stat); 1283 + } 1284 + 1285 + static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, 1286 + struct qed_ptt *p_ptt, 1287 + struct qed_eth_stats *p_stats, 1288 + u16 statistics_bin) 1289 + { 1290 + u32 tstats_addr = 0, tstats_len = 0; 1291 + struct tstorm_per_port_stat tstats; 1292 + 1293 + __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len); 1294 + 1295 + memset(&tstats, 0, sizeof(tstats)); 1296 + qed_memcpy_from(p_hwfn, p_ptt, &tstats, 1297 + tstats_addr, tstats_len); 1298 + 1299 + p_stats->mftag_filter_discards += 1300 + HILO_64_REGPAIR(tstats.mftag_filter_discard); 1301 + p_stats->mac_filter_discards += 1302 + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); 1303 + } 1304 + 1305 + static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, 1306 + u32 *p_addr, 1307 + u32 *p_len, 1308 + u16 statistics_bin) 1309 + { 1310 + *p_addr = BAR0_MAP_REG_USDM_RAM + 1311 + USTORM_QUEUE_STAT_OFFSET(statistics_bin); 1312 + *p_len = sizeof(struct eth_ustorm_per_queue_stat); 1313 + } 1314 + 1315 + static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, 1316 + struct qed_ptt *p_ptt, 1317 + struct qed_eth_stats *p_stats, 1318 + u16 statistics_bin) 1319 + { 1320 + struct eth_ustorm_per_queue_stat ustats; 1321 + u32 ustats_addr = 0, ustats_len = 0; 1322 + 1323 + __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, 1324 + statistics_bin); 1325 + 1326 + memset(&ustats, 0, sizeof(ustats)); 1327 + qed_memcpy_from(p_hwfn, p_ptt, &ustats, 1328 + ustats_addr, ustats_len); 1329 + 1330 + p_stats->rx_ucast_bytes += 1331 + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 1332 + p_stats->rx_mcast_bytes += 1333 + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 1334 + p_stats->rx_bcast_bytes += 1335 + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 1336 + p_stats->rx_ucast_pkts += 1337 + HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 1338 + p_stats->rx_mcast_pkts += 1339 + HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 1340 + p_stats->rx_bcast_pkts += 1341 + HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 1342 + } 1343 + 1344 + static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, 1345 + u32 *p_addr, 1346 + u32 *p_len, 1347 + u16 statistics_bin) 1348 + { 1349 + *p_addr = BAR0_MAP_REG_MSDM_RAM + 1350 + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); 1351 + *p_len = sizeof(struct eth_mstorm_per_queue_stat); 1352 + } 1353 + 1354 + static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, 1355 + struct qed_ptt *p_ptt, 1356 + struct qed_eth_stats *p_stats, 1357 + u16 statistics_bin) 1358 + { 1359 + struct eth_mstorm_per_queue_stat mstats; 1360 + u32 mstats_addr = 0, mstats_len = 0; 1361 + 1362 + __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, 1363 + statistics_bin); 1364 + 1365 + memset(&mstats, 0, sizeof(mstats)); 1366 + qed_memcpy_from(p_hwfn, p_ptt, &mstats, 1367 + mstats_addr, mstats_len); 1368 + 1369 + p_stats->no_buff_discards += 1370 + HILO_64_REGPAIR(mstats.no_buff_discard); 1371 + p_stats->packet_too_big_discard += 1372 + HILO_64_REGPAIR(mstats.packet_too_big_discard); 1373 + p_stats->ttl0_discard += 1374 + HILO_64_REGPAIR(mstats.ttl0_discard); 1375 + p_stats->tpa_coalesced_pkts += 1376 + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); 1377 + p_stats->tpa_coalesced_events += 1378 + HILO_64_REGPAIR(mstats.tpa_coalesced_events); 1379 + p_stats->tpa_aborts_num += 1380 + HILO_64_REGPAIR(mstats.tpa_aborts_num); 1381 + p_stats->tpa_coalesced_bytes += 1382 + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); 1383 + } 1384 + 1385 + static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, 1386 + struct qed_ptt *p_ptt, 1387 + struct qed_eth_stats *p_stats) 1388 + { 1389 + struct port_stats port_stats; 1390 + int j; 1391 + 1392 + memset(&port_stats, 0, sizeof(port_stats)); 1393 + 1394 + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, 1395 + p_hwfn->mcp_info->port_addr + 1396 + offsetof(struct public_port, stats), 1397 + sizeof(port_stats)); 1398 + 1399 + p_stats->rx_64_byte_packets += port_stats.pmm.r64; 1400 + p_stats->rx_127_byte_packets += port_stats.pmm.r127; 1401 + p_stats->rx_255_byte_packets += port_stats.pmm.r255; 1402 + p_stats->rx_511_byte_packets += port_stats.pmm.r511; 1403 + p_stats->rx_1023_byte_packets += port_stats.pmm.r1023; 1404 + p_stats->rx_1518_byte_packets += port_stats.pmm.r1518; 1405 + p_stats->rx_1522_byte_packets += port_stats.pmm.r1522; 1406 + p_stats->rx_2047_byte_packets += port_stats.pmm.r2047; 1407 + p_stats->rx_4095_byte_packets += port_stats.pmm.r4095; 1408 + p_stats->rx_9216_byte_packets += port_stats.pmm.r9216; 1409 + p_stats->rx_16383_byte_packets += port_stats.pmm.r16383; 1410 + p_stats->rx_crc_errors += port_stats.pmm.rfcs; 1411 + p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; 1412 + p_stats->rx_pause_frames += port_stats.pmm.rxpf; 1413 + p_stats->rx_pfc_frames += port_stats.pmm.rxpp; 1414 + p_stats->rx_align_errors += port_stats.pmm.raln; 1415 + p_stats->rx_carrier_errors += port_stats.pmm.rfcr; 1416 + p_stats->rx_oversize_packets += port_stats.pmm.rovr; 1417 + p_stats->rx_jabbers += port_stats.pmm.rjbr; 1418 + p_stats->rx_undersize_packets += port_stats.pmm.rund; 1419 + p_stats->rx_fragments += port_stats.pmm.rfrg; 1420 + p_stats->tx_64_byte_packets += port_stats.pmm.t64; 1421 + p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; 1422 + p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; 1423 + p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; 1424 + p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; 1425 + p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; 1426 + p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; 1427 + p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; 1428 + p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; 1429 + p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; 1430 + p_stats->tx_pause_frames += port_stats.pmm.txpf; 1431 + p_stats->tx_pfc_frames += port_stats.pmm.txpp; 1432 + p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; 1433 + p_stats->tx_total_collisions += port_stats.pmm.tncl; 1434 + p_stats->rx_mac_bytes += port_stats.pmm.rbyte; 1435 + p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; 1436 + p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; 1437 + p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; 1438 + p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; 1439 + p_stats->tx_mac_bytes += port_stats.pmm.tbyte; 1440 + p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; 1441 + p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; 1442 + p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; 1443 + p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; 1444 + for (j = 0; j < 8; j++) { 1445 + p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; 1446 + p_stats->brb_discards += port_stats.brb.brb_discard[j]; 1447 + } 1448 + } 1449 + 1450 + static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, 1451 + struct qed_ptt *p_ptt, 1452 + struct qed_eth_stats *stats, 1453 + u16 statistics_bin) 1454 + { 1455 + __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); 1456 + __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); 1457 + __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); 1458 + __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); 1459 + 1460 + if (p_hwfn->mcp_info) 1461 + __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); 1462 + } 1463 + 1464 + static void _qed_get_vport_stats(struct qed_dev *cdev, 1465 + struct qed_eth_stats *stats) 1466 + { 1467 + u8 fw_vport = 0; 1468 + int i; 1469 + 1470 + memset(stats, 0, sizeof(*stats)); 1471 + 1472 + for_each_hwfn(cdev, i) { 1473 + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1474 + struct qed_ptt *p_ptt; 1475 + 1476 + /* The main vport index is relative first */ 1477 + if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { 1478 + DP_ERR(p_hwfn, "No vport available!\n"); 1479 + continue; 1480 + } 1481 + 1482 + p_ptt = qed_ptt_acquire(p_hwfn); 1483 + if (!p_ptt) { 1484 + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1485 + continue; 1486 + } 1487 + 1488 + __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport); 1489 + 1490 + qed_ptt_release(p_hwfn, p_ptt); 1491 + } 1492 + } 1493 + 1494 + void qed_get_vport_stats(struct qed_dev *cdev, 1495 + struct qed_eth_stats *stats) 1496 + { 1497 + u32 i; 1498 + 1499 + if (!cdev) { 1500 + memset(stats, 0, sizeof(*stats)); 1501 + return; 1502 + } 1503 + 1504 + _qed_get_vport_stats(cdev, stats); 1505 + 1506 + if (!cdev->reset_stats) 1507 + return; 1508 + 1509 + /* Reduce the statistics baseline */ 1510 + for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) 1511 + ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; 1512 + } 1513 + 1514 + /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ 1515 + void qed_reset_vport_stats(struct qed_dev *cdev) 1516 + { 1517 + int i; 1518 + 1519 + for_each_hwfn(cdev, i) { 1520 + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1521 + struct eth_mstorm_per_queue_stat mstats; 1522 + struct eth_ustorm_per_queue_stat ustats; 1523 + struct eth_pstorm_per_queue_stat pstats; 1524 + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); 1525 + u32 addr = 0, len = 0; 1526 + 1527 + if (!p_ptt) { 1528 + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 1529 + continue; 1530 + } 1531 + 1532 + memset(&mstats, 0, sizeof(mstats)); 1533 + __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); 1534 + qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); 1535 + 1536 + memset(&ustats, 0, sizeof(ustats)); 1537 + __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); 1538 + qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); 1539 + 1540 + memset(&pstats, 0, sizeof(pstats)); 1541 + __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); 1542 + qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); 1543 + 1544 + qed_ptt_release(p_hwfn, p_ptt); 1545 + } 1546 + 1547 + /* PORT statistics are not necessarily reset, so we need to 1548 + * read and create a baseline for future statistics. 1549 + */ 1550 + if (!cdev->reset_stats) 1551 + DP_INFO(cdev, "Reset stats not allocated\n"); 1552 + else 1553 + _qed_get_vport_stats(cdev, cdev->reset_stats); 1233 1554 } 1234 1555 1235 1556 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+6 -4
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 779 779 rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, 780 780 true, data); 781 781 if (rc) 782 - goto err3; 782 + goto err2; 783 783 784 784 DP_INFO(cdev, 785 785 "HW initialization and function start completed successfully\n"); ··· 798 798 return rc; 799 799 } 800 800 801 + qed_reset_vport_stats(cdev); 802 + 801 803 return 0; 802 804 803 - err3: 804 - qed_free_stream_mem(cdev); 805 - qed_slowpath_irq_free(cdev); 806 805 err2: 806 + qed_hw_timers_stop_all(cdev); 807 + qed_slowpath_irq_free(cdev); 808 + qed_free_stream_mem(cdev); 807 809 qed_disable_msix(cdev); 808 810 err1: 809 811 qed_resc_free(cdev);