Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'bnxt_en-updates'

Michael Chan says:

====================
bnxt_en: Updates for net-next.

-Add default VLAN support for VFs.
-Add NPAR (NIC partioning) support.
-Add support for new device 5731x and 5741x. GRO logic is different.
-Support new ETHTOOL_{G|S}LINKSETTINGS.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+442 -114
+254 -38
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 75 75 BCM57301, 76 76 BCM57302, 77 77 BCM57304, 78 + BCM57311, 79 + BCM57312, 78 80 BCM57402, 79 81 BCM57404, 80 82 BCM57406, 83 + BCM57404_NPAR, 84 + BCM57412, 85 + BCM57414, 86 + BCM57416, 87 + BCM57417, 88 + BCM57414_NPAR, 81 89 BCM57314, 82 90 BCM57304_VF, 83 91 BCM57404_VF, 92 + BCM57414_VF, 93 + BCM57314_VF, 84 94 }; 85 95 86 96 /* indexed by enum above */ ··· 100 90 { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, 101 91 { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, 102 92 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, 93 + { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" }, 94 + { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, 103 95 { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, 104 96 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, 105 97 { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, 98 + { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, 99 + { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" }, 100 + { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, 101 + { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" }, 102 + { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" }, 103 + { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, 106 104 { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, 107 105 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, 108 106 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, 107 + { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" }, 108 + { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" }, 109 109 }; 110 110 111 111 static const struct pci_device_id bnxt_pci_tbl[] = { 112 112 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, 113 113 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, 114 114 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, 115 + { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, 116 + { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, 115 117 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, 116 118 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, 117 119 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, 120 + { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57404_NPAR }, 121 + { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, 122 + { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, 123 + { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, 124 + { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, 125 + { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57414_NPAR }, 118 126 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, 119 127 #ifdef CONFIG_BNXT_SRIOV 120 128 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, 121 129 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, 130 + { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF }, 131 + { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF }, 122 132 #endif 123 133 { 0 } 124 134 }; ··· 155 125 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, 156 126 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, 157 127 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, 128 + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, 158 129 HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, 159 130 }; 160 131 161 132 static bool bnxt_vf_pciid(enum board_idx idx) 162 133 { 163 - return (idx == BCM57304_VF || idx == BCM57404_VF); 134 + return (idx == BCM57304_VF || idx == BCM57404_VF || 135 + idx == BCM57314_VF || idx == BCM57414_VF); 164 136 } 165 137 166 138 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) ··· 952 920 } 953 921 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); 954 922 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); 923 + tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); 955 924 956 925 rxr->rx_prod = NEXT_RX(prod); 957 926 cons = NEXT_RX(cons); ··· 971 938 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); 972 939 } 973 940 974 - #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 975 - #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 976 - 977 - static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, 978 - struct rx_tpa_end_cmp *tpa_end, 979 - struct rx_tpa_end_cmp_ext *tpa_end1, 941 + static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, 942 + int payload_off, int tcp_ts, 980 943 struct sk_buff *skb) 981 944 { 982 945 #ifdef CONFIG_INET 983 946 struct tcphdr *th; 984 - int payload_off, tcp_opt_len = 0; 985 947 int len, nw_off; 986 - u16 segs; 948 + u16 outer_ip_off, inner_ip_off, inner_mac_off; 949 + u32 hdr_info = tpa_info->hdr_info; 950 + bool loopback = false; 987 951 988 - segs = TPA_END_TPA_SEGS(tpa_end); 989 - if (segs == 1) 990 - return skb; 952 + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); 953 + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); 954 + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); 991 955 992 - NAPI_GRO_CB(skb)->count = segs; 993 - skb_shinfo(skb)->gso_size = 994 - le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 995 - skb_shinfo(skb)->gso_type = tpa_info->gso_type; 996 - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 997 - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 998 - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 999 - if (TPA_END_GRO_TS(tpa_end)) 956 + /* If the packet is an internal loopback packet, the offsets will 957 + * have an extra 4 bytes. 958 + */ 959 + if (inner_mac_off == 4) { 960 + loopback = true; 961 + } else if (inner_mac_off > 4) { 962 + __be16 proto = *((__be16 *)(skb->data + inner_ip_off - 963 + ETH_HLEN - 2)); 964 + 965 + /* We only support inner iPv4/ipv6. If we don't see the 966 + * correct protocol ID, it must be a loopback packet where 967 + * the offsets are off by 4. 968 + */ 969 + if (proto != htons(ETH_P_IP) && proto && htons(ETH_P_IPV6)) 970 + loopback = true; 971 + } 972 + if (loopback) { 973 + /* internal loopback packet, subtract all offsets by 4 */ 974 + inner_ip_off -= 4; 975 + inner_mac_off -= 4; 976 + outer_ip_off -= 4; 977 + } 978 + 979 + nw_off = inner_ip_off - ETH_HLEN; 980 + skb_set_network_header(skb, nw_off); 981 + if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { 982 + struct ipv6hdr *iph = ipv6_hdr(skb); 983 + 984 + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); 985 + len = skb->len - skb_transport_offset(skb); 986 + th = tcp_hdr(skb); 987 + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); 988 + } else { 989 + struct iphdr *iph = ip_hdr(skb); 990 + 991 + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); 992 + len = skb->len - skb_transport_offset(skb); 993 + th = tcp_hdr(skb); 994 + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); 995 + } 996 + 997 + if (inner_mac_off) { /* tunnel */ 998 + struct udphdr *uh = NULL; 999 + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - 1000 + ETH_HLEN - 2)); 1001 + 1002 + if (proto == htons(ETH_P_IP)) { 1003 + struct iphdr *iph = (struct iphdr *)skb->data; 1004 + 1005 + if (iph->protocol == IPPROTO_UDP) 1006 + uh = (struct udphdr *)(iph + 1); 1007 + } else { 1008 + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; 1009 + 1010 + if (iph->nexthdr == IPPROTO_UDP) 1011 + uh = (struct udphdr *)(iph + 1); 1012 + } 1013 + if (uh) { 1014 + if (uh->check) 1015 + skb_shinfo(skb)->gso_type |= 1016 + SKB_GSO_UDP_TUNNEL_CSUM; 1017 + else 1018 + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1019 + } 1020 + } 1021 + #endif 1022 + return skb; 1023 + } 1024 + 1025 + #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) 1026 + #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) 1027 + 1028 + static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, 1029 + int payload_off, int tcp_ts, 1030 + struct sk_buff *skb) 1031 + { 1032 + #ifdef CONFIG_INET 1033 + struct tcphdr *th; 1034 + int len, nw_off, tcp_opt_len; 1035 + 1036 + if (tcp_ts) 1000 1037 tcp_opt_len = 12; 1001 1038 1002 1039 if (tpa_info->gso_type == SKB_GSO_TCPV4) { ··· 1119 1016 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 1120 1017 } 1121 1018 } 1019 + #endif 1020 + return skb; 1021 + } 1022 + 1023 + static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, 1024 + struct bnxt_tpa_info *tpa_info, 1025 + struct rx_tpa_end_cmp *tpa_end, 1026 + struct rx_tpa_end_cmp_ext *tpa_end1, 1027 + struct sk_buff *skb) 1028 + { 1029 + #ifdef CONFIG_INET 1030 + int payload_off; 1031 + u16 segs; 1032 + 1033 + segs = TPA_END_TPA_SEGS(tpa_end); 1034 + if (segs == 1) 1035 + return skb; 1036 + 1037 + NAPI_GRO_CB(skb)->count = segs; 1038 + skb_shinfo(skb)->gso_size = 1039 + le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); 1040 + skb_shinfo(skb)->gso_type = tpa_info->gso_type; 1041 + payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & 1042 + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> 1043 + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; 1044 + skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); 1122 1045 #endif 1123 1046 return skb; 1124 1047 } ··· 1259 1130 } 1260 1131 1261 1132 if (TPA_END_GRO(tpa_end)) 1262 - skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); 1133 + skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); 1263 1134 1264 1135 return skb; 1265 1136 } ··· 1487 1358 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); 1488 1359 break; 1489 1360 } 1361 + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: 1362 + if (BNXT_PF(bp)) 1363 + goto async_event_process_exit; 1364 + set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); 1365 + break; 1490 1366 default: 1491 1367 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", 1492 1368 event_id); ··· 2396 2262 bp->flags &= ~BNXT_FLAG_TPA; 2397 2263 if (bp->dev->features & NETIF_F_LRO) 2398 2264 bp->flags |= BNXT_FLAG_LRO; 2399 - if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) 2265 + if (bp->dev->features & NETIF_F_GRO) 2400 2266 bp->flags |= BNXT_FLAG_GRO; 2401 2267 } 2402 2268 ··· 3411 3277 unsigned int ring = 0, grp_idx; 3412 3278 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3413 3279 struct hwrm_vnic_cfg_input req = {0}; 3280 + u16 def_vlan = 0; 3414 3281 3415 3282 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); 3416 3283 /* Only RSS support for now TBD: COS & LB */ ··· 3432 3297 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + 3433 3298 VLAN_HLEN); 3434 3299 3435 - if (bp->flags & BNXT_FLAG_STRIP_VLAN) 3300 + #ifdef CONFIG_BNXT_SRIOV 3301 + if (BNXT_VF(bp)) 3302 + def_vlan = bp->vf.vlan; 3303 + #endif 3304 + if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) 3436 3305 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); 3437 3306 3438 3307 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); ··· 3975 3836 return 0; 3976 3837 } 3977 3838 3839 + static int bnxt_hwrm_func_qcfg(struct bnxt *bp) 3840 + { 3841 + struct hwrm_func_qcfg_input req = {0}; 3842 + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; 3843 + int rc; 3844 + 3845 + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); 3846 + req.fid = cpu_to_le16(0xffff); 3847 + mutex_lock(&bp->hwrm_cmd_lock); 3848 + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 3849 + if (rc) 3850 + goto func_qcfg_exit; 3851 + 3852 + #ifdef CONFIG_BNXT_SRIOV 3853 + if (BNXT_VF(bp)) { 3854 + struct bnxt_vf_info *vf = &bp->vf; 3855 + 3856 + vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; 3857 + } 3858 + #endif 3859 + switch (resp->port_partition_type) { 3860 + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: 3861 + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: 3862 + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: 3863 + bp->port_partition_type = resp->port_partition_type; 3864 + break; 3865 + } 3866 + 3867 + func_qcfg_exit: 3868 + mutex_unlock(&bp->hwrm_cmd_lock); 3869 + return rc; 3870 + } 3871 + 3978 3872 int bnxt_hwrm_func_qcaps(struct bnxt *bp) 3979 3873 { 3980 3874 int rc = 0; ··· 4161 3989 4162 3990 if (resp->hwrm_intf_maj >= 1) 4163 3991 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); 3992 + 3993 + bp->chip_num = le16_to_cpu(resp->chip_num); 4164 3994 4165 3995 hwrm_ver_get_exit: 4166 3996 mutex_unlock(&bp->hwrm_cmd_lock); ··· 4403 4229 if (rc) 4404 4230 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", 4405 4231 rc); 4232 + 4233 + if (BNXT_VF(bp)) { 4234 + bnxt_hwrm_func_qcfg(bp); 4235 + netdev_update_features(bp->dev); 4236 + } 4406 4237 4407 4238 return 0; 4408 4239 ··· 4823 4644 int rc = 0; 4824 4645 struct hwrm_port_phy_qcaps_input req = {0}; 4825 4646 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; 4647 + struct bnxt_link_info *link_info = &bp->link_info; 4826 4648 4827 4649 if (bp->hwrm_spec_code < 0x10201) 4828 4650 return 0; ··· 4846 4666 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & 4847 4667 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; 4848 4668 } 4669 + link_info->support_auto_speeds = 4670 + le16_to_cpu(resp->supported_speeds_auto_mode); 4849 4671 4850 4672 hwrm_phy_qcaps_exit: 4851 4673 mutex_unlock(&bp->hwrm_cmd_lock); ··· 5105 4923 { 5106 4924 struct hwrm_port_phy_cfg_input req = {0}; 5107 4925 5108 - if (BNXT_VF(bp)) 4926 + if (!BNXT_SINGLE_PF(bp)) 5109 4927 return 0; 5110 4928 5111 4929 if (pci_num_vf(bp->pdev)) ··· 5651 5469 features |= NETIF_F_HW_VLAN_CTAG_RX | 5652 5470 NETIF_F_HW_VLAN_STAG_RX; 5653 5471 } 5654 - 5472 + #ifdef CONFIG_BNXT_SRIOV 5473 + if (BNXT_VF(bp)) { 5474 + if (bp->vf.vlan) { 5475 + features &= ~(NETIF_F_HW_VLAN_CTAG_RX | 5476 + NETIF_F_HW_VLAN_STAG_RX); 5477 + } 5478 + } 5479 + #endif 5655 5480 return features; 5656 5481 } 5657 5482 ··· 5774 5585 } 5775 5586 } 5776 5587 5777 - static void bnxt_reset_task(struct bnxt *bp) 5588 + static void bnxt_reset_task(struct bnxt *bp, bool silent) 5778 5589 { 5779 - bnxt_dbg_dump_states(bp); 5590 + if (!silent) 5591 + bnxt_dbg_dump_states(bp); 5780 5592 if (netif_running(bp->dev)) { 5781 5593 bnxt_close_nic(bp, false, false); 5782 5594 bnxt_open_nic(bp, false, false); ··· 5828 5638 mod_timer(&bp->timer, jiffies + bp->current_interval); 5829 5639 } 5830 5640 5641 + /* Only called from bnxt_sp_task() */ 5642 + static void bnxt_reset(struct bnxt *bp, bool silent) 5643 + { 5644 + /* bnxt_reset_task() calls bnxt_close_nic() which waits 5645 + * for BNXT_STATE_IN_SP_TASK to clear. 5646 + * If there is a parallel dev_close(), bnxt_close() may be holding 5647 + * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we 5648 + * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). 5649 + */ 5650 + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5651 + rtnl_lock(); 5652 + if (test_bit(BNXT_STATE_OPEN, &bp->state)) 5653 + bnxt_reset_task(bp, silent); 5654 + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5655 + rtnl_unlock(); 5656 + } 5657 + 5831 5658 static void bnxt_cfg_ntp_filters(struct bnxt *); 5832 5659 5833 5660 static void bnxt_sp_task(struct work_struct *work) ··· 5881 5674 bnxt_hwrm_tunnel_dst_port_free( 5882 5675 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); 5883 5676 } 5884 - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { 5885 - /* bnxt_reset_task() calls bnxt_close_nic() which waits 5886 - * for BNXT_STATE_IN_SP_TASK to clear. 5887 - */ 5888 - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5889 - rtnl_lock(); 5890 - bnxt_reset_task(bp); 5891 - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); 5892 - rtnl_unlock(); 5893 - } 5677 + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) 5678 + bnxt_reset(bp, false); 5679 + 5680 + if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) 5681 + bnxt_reset(bp, true); 5894 5682 5895 5683 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) 5896 5684 bnxt_get_port_module_status(bp); ··· 6371 6169 return rc; 6372 6170 } 6373 6171 6172 + /* Older firmware does not have supported_auto_speeds, so assume 6173 + * that all supported speeds can be autonegotiated. 6174 + */ 6175 + if (link_info->auto_link_speeds && !link_info->support_auto_speeds) 6176 + link_info->support_auto_speeds = link_info->support_speeds; 6177 + 6374 6178 /*initialize the ethool setting copy with NVM settings */ 6375 6179 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 6376 6180 link_info->autoneg = BNXT_AUTONEG_SPEED; ··· 6550 6342 goto init_err; 6551 6343 6552 6344 mutex_init(&bp->hwrm_cmd_lock); 6553 - bnxt_hwrm_ver_get(bp); 6345 + rc = bnxt_hwrm_ver_get(bp); 6346 + if (rc) 6347 + goto init_err; 6348 + 6349 + bp->gro_func = bnxt_gro_func_5730x; 6350 + if (BNXT_CHIP_NUM_57X1X(bp->chip_num)) 6351 + bp->gro_func = bnxt_gro_func_5731x; 6554 6352 6555 6353 rc = bnxt_hwrm_func_drv_rgtr(bp); 6556 6354 if (rc) ··· 6578 6364 rc = -1; 6579 6365 goto init_err; 6580 6366 } 6367 + 6368 + bnxt_hwrm_func_qcfg(bp); 6581 6369 6582 6370 bnxt_set_tpa_flags(bp); 6583 6371 bnxt_set_ring_params(bp);
+61 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 298 298 #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) 299 299 #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) 300 300 #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) 301 + #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) 301 302 302 303 __le32 rx_tpa_start_cmp_metadata; 303 304 __le32 rx_tpa_start_cmp_cfa_code_v2; 304 305 #define RX_TPA_START_CMP_V2 (0x1 << 0) 305 306 #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) 306 307 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 307 - __le32 rx_tpa_start_cmp_unused5; 308 + __le32 rx_tpa_start_cmp_hdr_info; 308 309 }; 309 310 310 311 struct rx_tpa_end_cmp { ··· 585 584 u32 metadata; 586 585 enum pkt_hash_types hash_type; 587 586 u32 rss_hash; 587 + u32 hdr_info; 588 + 589 + #define BNXT_TPA_L4_SIZE(hdr_info) \ 590 + (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) 591 + 592 + #define BNXT_TPA_INNER_L3_OFF(hdr_info) \ 593 + (((hdr_info) >> 18) & 0x1ff) 594 + 595 + #define BNXT_TPA_INNER_L2_OFF(hdr_info) \ 596 + (((hdr_info) >> 9) & 0x1ff) 597 + 598 + #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ 599 + ((hdr_info) & 0x1ff) 588 600 }; 589 601 590 602 struct bnxt_rx_ring_info { ··· 849 835 #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 850 836 #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 851 837 #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 838 + u16 support_auto_speeds; 852 839 u16 lp_auto_link_speeds; 853 840 u16 force_link_speed; 854 841 u32 preemphasis; ··· 888 873 void __iomem *bar2; 889 874 890 875 u32 reg_base; 876 + u16 chip_num; 877 + #define CHIP_NUM_57301 0x16c8 878 + #define CHIP_NUM_57302 0x16c9 879 + #define CHIP_NUM_57304 0x16ca 880 + #define CHIP_NUM_57402 0x16d0 881 + #define CHIP_NUM_57404 0x16d1 882 + #define CHIP_NUM_57406 0x16d2 883 + 884 + #define CHIP_NUM_57311 0x16ce 885 + #define CHIP_NUM_57312 0x16cf 886 + #define CHIP_NUM_57314 0x16df 887 + #define CHIP_NUM_57412 0x16d6 888 + #define CHIP_NUM_57414 0x16d7 889 + #define CHIP_NUM_57416 0x16d8 890 + #define CHIP_NUM_57417 0x16d9 891 + 892 + #define BNXT_CHIP_NUM_5730X(chip_num) \ 893 + ((chip_num) >= CHIP_NUM_57301 && \ 894 + (chip_num) <= CHIP_NUM_57304) 895 + 896 + #define BNXT_CHIP_NUM_5740X(chip_num) \ 897 + ((chip_num) >= CHIP_NUM_57402 && \ 898 + (chip_num) <= CHIP_NUM_57406) 899 + 900 + #define BNXT_CHIP_NUM_5731X(chip_num) \ 901 + ((chip_num) == CHIP_NUM_57311 || \ 902 + (chip_num) == CHIP_NUM_57312 || \ 903 + (chip_num) == CHIP_NUM_57314) 904 + 905 + #define BNXT_CHIP_NUM_5741X(chip_num) \ 906 + ((chip_num) >= CHIP_NUM_57412 && \ 907 + (chip_num) <= CHIP_NUM_57417) 908 + 909 + #define BNXT_CHIP_NUM_57X0X(chip_num) \ 910 + (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num)) 911 + 912 + #define BNXT_CHIP_NUM_57X1X(chip_num) \ 913 + (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) 891 914 892 915 struct net_device *dev; 893 916 struct pci_dev *pdev; ··· 960 907 961 908 #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) 962 909 #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) 910 + #define BNXT_NPAR(bp) ((bp)->port_partition_type) 911 + #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) 963 912 964 913 struct bnxt_napi **bnapi; 965 914 966 915 struct bnxt_rx_ring_info *rx_ring; 967 916 struct bnxt_tx_ring_info *tx_ring; 917 + 918 + struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int, 919 + struct sk_buff *); 968 920 969 921 u32 rx_buf_size; 970 922 u32 rx_buf_use_size; /* useable size */ ··· 1051 993 __le16 vxlan_fw_dst_port_id; 1052 994 u8 nge_port_cnt; 1053 995 __le16 nge_fw_dst_port_id; 996 + u8 port_partition_type; 1054 997 1055 998 u16 rx_coal_ticks; 1056 999 u16 rx_coal_ticks_irq; ··· 1077 1018 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 1078 1019 #define BNXT_PERIODIC_STATS_SP_EVENT 9 1079 1020 #define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 1021 + #define BNXT_RESET_TASK_SILENT_SP_EVENT 11 1080 1022 1081 1023 struct bnxt_pf_info pf; 1082 1024 #ifdef CONFIG_BNXT_SRIOV
+124 -75
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 628 628 return speed_mask; 629 629 } 630 630 631 - static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) 631 + #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ 632 + { \ 633 + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ 634 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 635 + 100baseT_Full); \ 636 + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ 637 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 638 + 1000baseT_Full); \ 639 + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ 640 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 641 + 10000baseT_Full); \ 642 + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ 643 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 644 + 25000baseCR_Full); \ 645 + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ 646 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 647 + 40000baseCR4_Full);\ 648 + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ 649 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 650 + 50000baseCR2_Full);\ 651 + if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ 652 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 653 + Pause); \ 654 + if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ 655 + ethtool_link_ksettings_add_link_mode( \ 656 + lk_ksettings, name, Asym_Pause);\ 657 + } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ 658 + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ 659 + Asym_Pause); \ 660 + } \ 661 + } 662 + 663 + #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ 664 + { \ 665 + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 666 + 100baseT_Full) || \ 667 + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 668 + 100baseT_Half)) \ 669 + (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ 670 + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 671 + 1000baseT_Full) || \ 672 + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 673 + 1000baseT_Half)) \ 674 + (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ 675 + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 676 + 10000baseT_Full)) \ 677 + (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ 678 + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 679 + 25000baseCR_Full)) \ 680 + (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ 681 + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 682 + 40000baseCR4_Full)) \ 683 + (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ 684 + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ 685 + 50000baseCR2_Full)) \ 686 + (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ 687 + } 688 + 689 + static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, 690 + struct ethtool_link_ksettings *lk_ksettings) 632 691 { 633 692 u16 fw_speeds = link_info->auto_link_speeds; 634 693 u8 fw_pause = 0; ··· 695 636 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 696 637 fw_pause = link_info->auto_pause_setting; 697 638 698 - return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); 639 + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); 699 640 } 700 641 701 - static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) 642 + static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, 643 + struct ethtool_link_ksettings *lk_ksettings) 702 644 { 703 645 u16 fw_speeds = link_info->lp_auto_link_speeds; 704 646 u8 fw_pause = 0; ··· 707 647 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) 708 648 fw_pause = link_info->lp_pause; 709 649 710 - return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); 650 + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, 651 + lp_advertising); 711 652 } 712 653 713 - static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) 654 + static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, 655 + struct ethtool_link_ksettings *lk_ksettings) 714 656 { 715 657 u16 fw_speeds = link_info->support_speeds; 716 - u32 supported; 717 658 718 - supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); 719 - return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; 659 + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); 660 + 661 + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); 662 + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 663 + Asym_Pause); 664 + 665 + if (link_info->support_auto_speeds) 666 + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 667 + Autoneg); 720 668 } 721 669 722 670 u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) ··· 751 683 } 752 684 } 753 685 754 - static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 686 + static int bnxt_get_link_ksettings(struct net_device *dev, 687 + struct ethtool_link_ksettings *lk_ksettings) 755 688 { 756 689 struct bnxt *bp = netdev_priv(dev); 757 690 struct bnxt_link_info *link_info = &bp->link_info; 758 - u16 ethtool_speed; 691 + struct ethtool_link_settings *base = &lk_ksettings->base; 692 + u32 ethtool_speed; 759 693 760 - cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 694 + ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); 695 + bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); 761 696 762 - if (link_info->auto_link_speeds) 763 - cmd->supported |= SUPPORTED_Autoneg; 764 - 697 + ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); 765 698 if (link_info->autoneg) { 766 - cmd->advertising = 767 - bnxt_fw_to_ethtool_advertised_spds(link_info); 768 - cmd->advertising |= ADVERTISED_Autoneg; 769 - cmd->autoneg = AUTONEG_ENABLE; 699 + bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); 700 + ethtool_link_ksettings_add_link_mode(lk_ksettings, 701 + advertising, Autoneg); 702 + base->autoneg = AUTONEG_ENABLE; 770 703 if (link_info->phy_link_status == BNXT_LINK_LINK) 771 - cmd->lp_advertising = 772 - bnxt_fw_to_ethtool_lp_adv(link_info); 704 + bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); 773 705 ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); 774 706 if (!netif_carrier_ok(dev)) 775 - cmd->duplex = DUPLEX_UNKNOWN; 707 + base->duplex = DUPLEX_UNKNOWN; 776 708 else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) 777 - cmd->duplex = DUPLEX_FULL; 709 + base->duplex = DUPLEX_FULL; 778 710 else 779 - cmd->duplex = DUPLEX_HALF; 711 + base->duplex = DUPLEX_HALF; 780 712 } else { 781 - cmd->autoneg = AUTONEG_DISABLE; 782 - cmd->advertising = 0; 713 + base->autoneg = AUTONEG_DISABLE; 783 714 ethtool_speed = 784 715 bnxt_fw_to_ethtool_speed(link_info->req_link_speed); 785 - cmd->duplex = DUPLEX_HALF; 716 + base->duplex = DUPLEX_HALF; 786 717 if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) 787 - cmd->duplex = DUPLEX_FULL; 718 + base->duplex = DUPLEX_FULL; 788 719 } 789 - ethtool_cmd_speed_set(cmd, ethtool_speed); 720 + base->speed = ethtool_speed; 790 721 791 - cmd->port = PORT_NONE; 722 + base->port = PORT_NONE; 792 723 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { 793 - cmd->port = PORT_TP; 794 - cmd->supported |= SUPPORTED_TP; 795 - cmd->advertising |= ADVERTISED_TP; 724 + base->port = PORT_TP; 725 + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 726 + TP); 727 + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 728 + TP); 796 729 } else { 797 - cmd->supported |= SUPPORTED_FIBRE; 798 - cmd->advertising |= ADVERTISED_FIBRE; 730 + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, 731 + FIBRE); 732 + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, 733 + FIBRE); 799 734 800 735 if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) 801 - cmd->port = PORT_DA; 736 + base->port = PORT_DA; 802 737 else if (link_info->media_type == 803 738 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) 804 - cmd->port = PORT_FIBRE; 739 + base->port = PORT_FIBRE; 805 740 } 806 - 807 - if (link_info->transceiver == 808 - PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) 809 - cmd->transceiver = XCVR_INTERNAL; 810 - else 811 - cmd->transceiver = XCVR_EXTERNAL; 812 - cmd->phy_address = link_info->phy_addr; 741 + base->phy_address = link_info->phy_addr; 813 742 814 743 return 0; 815 744 } ··· 880 815 return fw_speed_mask; 881 816 } 882 817 883 - static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 818 + static int bnxt_set_link_ksettings(struct net_device *dev, 819 + const struct ethtool_link_ksettings *lk_ksettings) 884 820 { 885 - int rc = 0; 886 821 struct bnxt *bp = netdev_priv(dev); 887 822 struct bnxt_link_info *link_info = &bp->link_info; 823 + const struct ethtool_link_settings *base = &lk_ksettings->base; 888 824 u32 speed, fw_advertising = 0; 889 825 bool set_pause = false; 826 + int rc = 0; 890 827 891 - if (BNXT_VF(bp)) 892 - return rc; 828 + if (!BNXT_SINGLE_PF(bp)) 829 + return -EOPNOTSUPP; 893 830 894 - if (cmd->autoneg == AUTONEG_ENABLE) { 895 - u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); 896 - 897 - if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | 898 - ADVERTISED_TP | ADVERTISED_FIBRE)) { 899 - netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", 900 - cmd->advertising); 901 - rc = -EINVAL; 902 - goto set_setting_exit; 903 - } 904 - fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); 905 - if (fw_advertising & ~link_info->support_speeds) { 906 - netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", 907 - cmd->advertising); 908 - rc = -EINVAL; 909 - goto set_setting_exit; 910 - } 831 + if (base->autoneg == AUTONEG_ENABLE) { 832 + BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, 833 + advertising); 911 834 link_info->autoneg |= BNXT_AUTONEG_SPEED; 912 835 if (!fw_advertising) 913 - link_info->advertising = link_info->support_speeds; 836 + link_info->advertising = link_info->support_auto_speeds; 914 837 else 915 838 link_info->advertising = fw_advertising; 916 839 /* any change to autoneg will cause link change, therefore the ··· 916 863 rc = -EINVAL; 917 864 goto set_setting_exit; 918 865 } 919 - /* TODO: currently don't support half duplex */ 920 - if (cmd->duplex == DUPLEX_HALF) { 866 + if (base->duplex == DUPLEX_HALF) { 921 867 netdev_err(dev, "HALF DUPLEX is not supported!\n"); 922 868 rc = -EINVAL; 923 869 goto set_setting_exit; 924 870 } 925 - /* If received a request for an unknown duplex, assume full*/ 926 - if (cmd->duplex == DUPLEX_UNKNOWN) 927 - cmd->duplex = DUPLEX_FULL; 928 - speed = ethtool_cmd_speed(cmd); 871 + speed = base->speed; 929 872 fw_speed = bnxt_get_fw_speed(dev, speed); 930 873 if (!fw_speed) { 931 874 rc = -EINVAL; ··· 960 911 struct bnxt *bp = netdev_priv(dev); 961 912 struct bnxt_link_info *link_info = &bp->link_info; 962 913 963 - if (BNXT_VF(bp)) 914 + if (!BNXT_SINGLE_PF(bp)) 964 915 return rc; 965 916 966 917 if (epause->autoneg) { ··· 1482 1433 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); 1483 1434 int rc = 0; 1484 1435 1485 - if (BNXT_VF(bp)) 1436 + if (!BNXT_SINGLE_PF(bp)) 1486 1437 return 0; 1487 1438 1488 1439 if (!(bp->flags & BNXT_FLAG_EEE_CAP)) ··· 1667 1618 } 1668 1619 1669 1620 const struct ethtool_ops bnxt_ethtool_ops = { 1670 - .get_settings = bnxt_get_settings, 1671 - .set_settings = bnxt_set_settings, 1621 + .get_link_ksettings = bnxt_get_link_ksettings, 1622 + .set_link_ksettings = bnxt_set_link_ksettings, 1672 1623 .get_pauseparam = bnxt_get_pauseparam, 1673 1624 .set_pauseparam = bnxt_set_pauseparam, 1674 1625 .get_drvinfo = bnxt_get_drvinfo,
+3
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
··· 143 143 u16 vlan_tag; 144 144 int rc; 145 145 146 + if (bp->hwrm_spec_code < 0x10201) 147 + return -ENOTSUPP; 148 + 146 149 rc = bnxt_vf_ndo_prep(bp, vf_id); 147 150 if (rc) 148 151 return rc;