Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ice: Add code for DCB initialization part 3/4

This patch adds a new function ice_pf_dcb_cfg (and related helpers)
which applies the DCB configuration obtained from the firmware. As
part of this, VSIs/netdevs are updated with traffic class information.

This patch requires a bit of a refactor of existing code.

1. For a MIB change event, the associated VSI is closed and brought up
again. The gap between closing and opening the VSI can cause a race
condition. Fix this by grabbing the rtnl_lock prior to closing the
VSI and then only free it after re-opening the VSI during a MIB
change event.

2. ice_sched_query_elem is used in ice_sched.c and with this patch, in
ice_dcb.c as well. However, ice_dcb.c is not built when CONFIG_DCB is
unset. This results in namespace warnings (ice_sched.o: Externally
defined symbols with no external references) when CONFIG_DCB is unset.
To avoid this move ice_sched_query_elem from ice_sched.c to
ice_common.c.

Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Anirudh Venkataramanan and committed by
Jeff Kirsher
7b9ffc76 0ebd3ff1

+999 -84
+7 -6
drivers/net/ethernet/intel/ice/ice.h
··· 378 378 struct ice_hw_port_stats stats_prev; 379 379 struct ice_hw hw; 380 380 u8 stat_prev_loaded; /* has previous stats been loaded */ 381 + #ifdef CONFIG_DCB 382 + u16 dcbx_cap; 383 + #endif /* CONFIG_DCB */ 381 384 u32 tx_timeout_count; 382 385 unsigned long tx_timeout_last_recovery; 383 386 u32 tx_timeout_recovery_level; ··· 417 414 wr32(hw, GLINT_DYN_CTL(vector), val); 418 415 } 419 416 420 - static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 421 - { 422 - vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS; 423 - vsi->tc_cfg.numtc = 1; 424 - } 425 - 426 417 void ice_set_ethtool_ops(struct net_device *netdev); 427 418 int ice_up(struct ice_vsi *vsi); 428 419 int ice_down(struct ice_vsi *vsi); ··· 425 428 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); 426 429 void ice_print_link_msg(struct ice_vsi *vsi, bool isup); 427 430 void ice_napi_del(struct ice_vsi *vsi); 431 + #ifdef CONFIG_DCB 432 + int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked); 433 + void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked); 434 + #endif /* CONFIG_DCB */ 428 435 429 436 #endif /* _ICE_H_ */
+47
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
··· 747 747 __le32 teid[1]; 748 748 }; 749 749 750 + /* Query Port ETS (indirect 0x040E) 751 + * 752 + * This indirect command is used to query port TC node configuration. 753 + */ 754 + struct ice_aqc_query_port_ets { 755 + __le32 port_teid; 756 + __le32 reserved; 757 + __le32 addr_high; 758 + __le32 addr_low; 759 + }; 760 + 761 + struct ice_aqc_port_ets_elem { 762 + u8 tc_valid_bits; 763 + u8 reserved[3]; 764 + /* 3 bits for UP per TC 0-7, 4th byte reserved */ 765 + __le32 up2tc; 766 + u8 tc_bw_share[8]; 767 + __le32 port_eir_prof_id; 768 + __le32 port_cir_prof_id; 769 + /* 3 bits per Node priority to TC 0-7, 4th byte reserved */ 770 + __le32 tc_node_prio; 771 + #define ICE_TC_NODE_PRIO_S 0x4 772 + u8 reserved1[4]; 773 + __le32 tc_node_teid[8]; /* Used for response, reserved in command */ 774 + }; 775 + 750 776 /* Query Scheduler Resource Allocation (indirect 0x0412) 751 777 * This indirect command retrieves the scheduler resources allocated by 752 778 * EMP Firmware to the given PF. ··· 1238 1212 u8 reserved[12]; 1239 1213 }; 1240 1214 1215 + /* Set Local LLDP MIB (indirect 0x0A08) 1216 + * Used to replace the local MIB of a given LLDP agent. e.g. DCBx 1217 + */ 1218 + struct ice_aqc_lldp_set_local_mib { 1219 + u8 type; 1220 + #define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0) 1221 + #define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0 1222 + #define SET_LOCAL_MIB_TYPE_CEE_M BIT(1) 1223 + #define SET_LOCAL_MIB_TYPE_CEE_WILLING 0 1224 + #define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M 1225 + u8 reserved0; 1226 + __le16 length; 1227 + u8 reserved1[4]; 1228 + __le32 addr_high; 1229 + __le32 addr_low; 1230 + }; 1231 + 1241 1232 /* Stop/Start LLDP Agent (direct 0x0A09) 1242 1233 * Used for stopping/starting specific LLDP agent. e.g. DCBx. 1243 1234 * The same structure is used for the response, with the command field ··· 1524 1481 struct ice_aqc_get_topo get_topo; 1525 1482 struct ice_aqc_sched_elem_cmd sched_elem_cmd; 1526 1483 struct ice_aqc_query_txsched_res query_sched_res; 1484 + struct ice_aqc_query_port_ets port_ets; 1527 1485 struct ice_aqc_nvm nvm; 1528 1486 struct ice_aqc_pf_vf_msg virt; 1529 1487 struct ice_aqc_lldp_get_mib lldp_get_mib; 1530 1488 struct ice_aqc_lldp_set_mib_change lldp_set_event; 1531 1489 struct ice_aqc_lldp_start lldp_start; 1490 + struct ice_aqc_lldp_set_local_mib lldp_set_mib; 1532 1491 struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; 1533 1492 struct ice_aqc_get_set_rss_lut get_set_rss_lut; 1534 1493 struct ice_aqc_get_set_rss_key get_set_rss_key; ··· 1618 1573 ice_aqc_opc_get_sched_elems = 0x0404, 1619 1574 ice_aqc_opc_suspend_sched_elems = 0x0409, 1620 1575 ice_aqc_opc_resume_sched_elems = 0x040A, 1576 + ice_aqc_opc_query_port_ets = 0x040E, 1621 1577 ice_aqc_opc_delete_sched_elems = 0x040F, 1622 1578 ice_aqc_opc_query_sched_res = 0x0412, 1623 1579 ··· 1641 1595 ice_aqc_opc_lldp_set_mib_change = 0x0A01, 1642 1596 ice_aqc_opc_lldp_start = 0x0A06, 1643 1597 ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, 1598 + ice_aqc_opc_lldp_set_local_mib = 0x0A08, 1644 1599 ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, 1645 1600 1646 1601 /* RSS commands */
+25
drivers/net/ethernet/intel/ice/ice_common.c
··· 3106 3106 /* to manage the potential roll-over */ 3107 3107 *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; 3108 3108 } 3109 + 3110 + /** 3111 + * ice_sched_query_elem - query element information from HW 3112 + * @hw: pointer to the HW struct 3113 + * @node_teid: node TEID to be queried 3114 + * @buf: buffer to element information 3115 + * 3116 + * This function queries HW element information 3117 + */ 3118 + enum ice_status 3119 + ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 3120 + struct ice_aqc_get_elem *buf) 3121 + { 3122 + u16 buf_size, num_elem_ret = 0; 3123 + enum ice_status status; 3124 + 3125 + buf_size = sizeof(*buf); 3126 + memset(buf, 0, buf_size); 3127 + buf->generic[0].node_teid = cpu_to_le32(node_teid); 3128 + status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 3129 + NULL); 3130 + if (status || num_elem_ret != 1) 3131 + ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 3132 + return status; 3133 + }
+3
drivers/net/ethernet/intel/ice/ice_common.h
··· 118 118 void 119 119 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, 120 120 u64 *prev_stat, u64 *cur_stat); 121 + enum ice_status 122 + ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 123 + struct ice_aqc_get_elem *buf); 121 124 #endif /* _ICE_COMMON_H_ */
+463
drivers/net/ethernet/intel/ice/ice_dcb.c
··· 99 99 } 100 100 101 101 /** 102 + * ice_aq_set_lldp_mib - Set the LLDP MIB 103 + * @hw: pointer to the HW struct 104 + * @mib_type: Local, Remote or both Local and Remote MIBs 105 + * @buf: pointer to the caller-supplied buffer to store the MIB block 106 + * @buf_size: size of the buffer (in bytes) 107 + * @cd: pointer to command details structure or NULL 108 + * 109 + * Set the LLDP MIB. (0x0A08) 110 + */ 111 + static enum ice_status 112 + ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, 113 + struct ice_sq_cd *cd) 114 + { 115 + struct ice_aqc_lldp_set_local_mib *cmd; 116 + struct ice_aq_desc desc; 117 + 118 + cmd = &desc.params.lldp_set_mib; 119 + 120 + if (buf_size == 0 || !buf) 121 + return ICE_ERR_PARAM; 122 + 123 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib); 124 + 125 + desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD); 126 + desc.datalen = cpu_to_le16(buf_size); 127 + 128 + cmd->type = mib_type; 129 + cmd->length = cpu_to_le16(buf_size); 130 + 131 + return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); 132 + } 133 + 134 + /** 102 135 * ice_get_dcbx_status 103 136 * @hw: pointer to the HW struct 104 137 * ··· 934 901 pi->is_sw_lldp = false; 935 902 936 903 return ret; 904 + } 905 + 906 + /** 907 + * ice_add_ieee_ets_common_tlv 908 + * @buf: Data buffer to be populated with ice_dcb_ets_cfg data 909 + * @ets_cfg: Container for ice_dcb_ets_cfg data 910 + * 911 + * Populate the TLV buffer with ice_dcb_ets_cfg data 912 + */ 913 + static void 914 + ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg) 915 + { 916 + u8 priority0, priority1; 917 + u8 offset = 0; 918 + int i; 919 + 920 + /* Priority Assignment Table (4 octets) 921 + * Octets:| 1 | 2 | 3 | 4 | 922 + * ----------------------------------------- 923 + * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7| 924 + * ----------------------------------------- 925 + * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0| 926 + * ----------------------------------------- 927 + */ 928 + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) { 929 + priority0 = ets_cfg->prio_table[i * 2] & 0xF; 930 + priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF; 931 + buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1; 932 + offset++; 933 + } 934 + 935 + /* TC Bandwidth Table (8 octets) 936 + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 937 + * --------------------------------- 938 + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| 939 + * --------------------------------- 940 + * 941 + * TSA Assignment Table (8 octets) 942 + * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 943 + * --------------------------------- 944 + * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7| 945 + * --------------------------------- 946 + */ 947 + ice_for_each_traffic_class(i) { 948 + buf[offset] = ets_cfg->tcbwtable[i]; 949 + buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i]; 950 + offset++; 951 + } 952 + } 953 + 954 + /** 955 + * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format 956 + * @tlv: Fill the ETS config data in IEEE format 957 + * @dcbcfg: Local store which holds the DCB Config 958 + * 959 + * Prepare IEEE 802.1Qaz ETS CFG TLV 960 + */ 961 + static void 962 + ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) 963 + { 964 + struct ice_dcb_ets_cfg *etscfg; 965 + u8 *buf = tlv->tlvinfo; 966 + u8 maxtcwilling = 0; 967 + u32 ouisubtype; 968 + u16 typelen; 969 + 970 + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 971 + ICE_IEEE_ETS_TLV_LEN); 972 + tlv->typelen = htons(typelen); 973 + 974 + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 975 + ICE_IEEE_SUBTYPE_ETS_CFG); 976 + tlv->ouisubtype = htonl(ouisubtype); 977 + 978 + /* First Octet post subtype 979 + * -------------------------- 980 + * |will-|CBS | Re- | Max | 981 + * |ing | |served| TCs | 982 + * -------------------------- 983 + * |1bit | 1bit|3 bits|3bits| 984 + */ 985 + etscfg = &dcbcfg->etscfg; 986 + if (etscfg->willing) 987 + maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S); 988 + maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M; 989 + buf[0] = maxtcwilling; 990 + 991 + /* Begin adding at Priority Assignment Table (offset 1 in buf) */ 992 + ice_add_ieee_ets_common_tlv(&buf[1], etscfg); 993 + } 994 + 995 + /** 996 + * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format 997 + * @tlv: Fill ETS Recommended TLV in IEEE format 998 + * @dcbcfg: Local store which holds the DCB Config 999 + * 1000 + * Prepare IEEE 802.1Qaz ETS REC TLV 1001 + */ 1002 + static void 1003 + ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv, 1004 + struct ice_dcbx_cfg *dcbcfg) 1005 + { 1006 + struct ice_dcb_ets_cfg *etsrec; 1007 + u8 *buf = tlv->tlvinfo; 1008 + u32 ouisubtype; 1009 + u16 typelen; 1010 + 1011 + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 1012 + ICE_IEEE_ETS_TLV_LEN); 1013 + tlv->typelen = htons(typelen); 1014 + 1015 + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 1016 + ICE_IEEE_SUBTYPE_ETS_REC); 1017 + tlv->ouisubtype = htonl(ouisubtype); 1018 + 1019 + etsrec = &dcbcfg->etsrec; 1020 + 1021 + /* First Octet is reserved */ 1022 + /* Begin adding at Priority Assignment Table (offset 1 in buf) */ 1023 + ice_add_ieee_ets_common_tlv(&buf[1], etsrec); 1024 + } 1025 + 1026 + /** 1027 + * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format 1028 + * @tlv: Fill PFC TLV in IEEE format 1029 + * @dcbcfg: Local store which holds the PFC CFG data 1030 + * 1031 + * Prepare IEEE 802.1Qaz PFC CFG TLV 1032 + */ 1033 + static void 1034 + ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) 1035 + { 1036 + u8 *buf = tlv->tlvinfo; 1037 + u32 ouisubtype; 1038 + u16 typelen; 1039 + 1040 + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | 1041 + ICE_IEEE_PFC_TLV_LEN); 1042 + tlv->typelen = htons(typelen); 1043 + 1044 + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 1045 + ICE_IEEE_SUBTYPE_PFC_CFG); 1046 + tlv->ouisubtype = htonl(ouisubtype); 1047 + 1048 + /* ---------------------------------------- 1049 + * |will-|MBC | Re- | PFC | PFC Enable | 1050 + * |ing | |served| cap | | 1051 + * ----------------------------------------- 1052 + * |1bit | 1bit|2 bits|4bits| 1 octet | 1053 + */ 1054 + if (dcbcfg->pfc.willing) 1055 + buf[0] = BIT(ICE_IEEE_PFC_WILLING_S); 1056 + 1057 + if (dcbcfg->pfc.mbc) 1058 + buf[0] |= BIT(ICE_IEEE_PFC_MBC_S); 1059 + 1060 + buf[0] |= dcbcfg->pfc.pfccap & 0xF; 1061 + buf[1] = dcbcfg->pfc.pfcena; 1062 + } 1063 + 1064 + /** 1065 + * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format 1066 + * @tlv: Fill APP TLV in IEEE format 1067 + * @dcbcfg: Local store which holds the APP CFG data 1068 + * 1069 + * Prepare IEEE 802.1Qaz APP CFG TLV 1070 + */ 1071 + static void 1072 + ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv, 1073 + struct ice_dcbx_cfg *dcbcfg) 1074 + { 1075 + u16 typelen, len, offset = 0; 1076 + u8 priority, selector, i = 0; 1077 + u8 *buf = tlv->tlvinfo; 1078 + u32 ouisubtype; 1079 + 1080 + /* No APP TLVs then just return */ 1081 + if (dcbcfg->numapps == 0) 1082 + return; 1083 + ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) | 1084 + ICE_IEEE_SUBTYPE_APP_PRI); 1085 + tlv->ouisubtype = htonl(ouisubtype); 1086 + 1087 + /* Move offset to App Priority Table */ 1088 + offset++; 1089 + /* Application Priority Table (3 octets) 1090 + * Octets:| 1 | 2 | 3 | 1091 + * ----------------------------------------- 1092 + * |Priority|Rsrvd| Sel | Protocol ID | 1093 + * ----------------------------------------- 1094 + * Bits:|23 21|20 19|18 16|15 0| 1095 + * ----------------------------------------- 1096 + */ 1097 + while (i < dcbcfg->numapps) { 1098 + priority = dcbcfg->app[i].priority & 0x7; 1099 + selector = dcbcfg->app[i].selector & 0x7; 1100 + buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector; 1101 + buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF; 1102 + buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF; 1103 + /* Move to next app */ 1104 + offset += 3; 1105 + i++; 1106 + if (i >= ICE_DCBX_MAX_APPS) 1107 + break; 1108 + } 1109 + /* len includes size of ouisubtype + 1 reserved + 3*numapps */ 1110 + len = sizeof(tlv->ouisubtype) + 1 + (i * 3); 1111 + typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF)); 1112 + tlv->typelen = htons(typelen); 1113 + } 1114 + 1115 + /** 1116 + * ice_add_dcb_tlv - Add all IEEE TLVs 1117 + * @tlv: Fill TLV data in IEEE format 1118 + * @dcbcfg: Local store which holds the DCB Config 1119 + * @tlvid: Type of IEEE TLV 1120 + * 1121 + * Add tlv information 1122 + */ 1123 + static void 1124 + ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg, 1125 + u16 tlvid) 1126 + { 1127 + switch (tlvid) { 1128 + case ICE_IEEE_TLV_ID_ETS_CFG: 1129 + ice_add_ieee_ets_tlv(tlv, dcbcfg); 1130 + break; 1131 + case ICE_IEEE_TLV_ID_ETS_REC: 1132 + ice_add_ieee_etsrec_tlv(tlv, dcbcfg); 1133 + break; 1134 + case ICE_IEEE_TLV_ID_PFC_CFG: 1135 + ice_add_ieee_pfc_tlv(tlv, dcbcfg); 1136 + break; 1137 + case ICE_IEEE_TLV_ID_APP_PRI: 1138 + ice_add_ieee_app_pri_tlv(tlv, dcbcfg); 1139 + break; 1140 + default: 1141 + break; 1142 + } 1143 + } 1144 + 1145 + /** 1146 + * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format 1147 + * @lldpmib: pointer to the HW struct 1148 + * @miblen: length of LLDP MIB 1149 + * @dcbcfg: Local store which holds the DCB Config 1150 + * 1151 + * Convert the DCB configuration to MIB format 1152 + */ 1153 + static void 1154 + ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg) 1155 + { 1156 + u16 len, offset = 0, tlvid = ICE_TLV_ID_START; 1157 + struct ice_lldp_org_tlv *tlv; 1158 + u16 typelen; 1159 + 1160 + tlv = (struct ice_lldp_org_tlv *)lldpmib; 1161 + while (1) { 1162 + ice_add_dcb_tlv(tlv, dcbcfg, tlvid++); 1163 + typelen = ntohs(tlv->typelen); 1164 + len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S; 1165 + if (len) 1166 + offset += len + 2; 1167 + /* END TLV or beyond LLDPDU size */ 1168 + if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU || 1169 + offset > ICE_LLDPDU_SIZE) 1170 + break; 1171 + /* Move to next TLV */ 1172 + if (len) 1173 + tlv = (struct ice_lldp_org_tlv *) 1174 + ((char *)tlv + sizeof(tlv->typelen) + len); 1175 + } 1176 + *miblen = offset; 1177 + } 1178 + 1179 + /** 1180 + * ice_set_dcb_cfg - Set the local LLDP MIB to FW 1181 + * @pi: port information structure 1182 + * 1183 + * Set DCB configuration to the Firmware 1184 + */ 1185 + enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) 1186 + { 1187 + u8 mib_type, *lldpmib = NULL; 1188 + struct ice_dcbx_cfg *dcbcfg; 1189 + enum ice_status ret; 1190 + struct ice_hw *hw; 1191 + u16 miblen; 1192 + 1193 + if (!pi) 1194 + return ICE_ERR_PARAM; 1195 + 1196 + hw = pi->hw; 1197 + 1198 + /* update the HW local config */ 1199 + dcbcfg = &pi->local_dcbx_cfg; 1200 + /* Allocate the LLDPDU */ 1201 + lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL); 1202 + if (!lldpmib) 1203 + return ICE_ERR_NO_MEMORY; 1204 + 1205 + mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB; 1206 + if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING) 1207 + mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING; 1208 + 1209 + ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg); 1210 + ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, 1211 + NULL); 1212 + 1213 + devm_kfree(ice_hw_to_dev(hw), lldpmib); 1214 + 1215 + return ret; 1216 + } 1217 + 1218 + /** 1219 + * ice_aq_query_port_ets - query port ets configuration 1220 + * @pi: port information structure 1221 + * @buf: pointer to buffer 1222 + * @buf_size: buffer size in bytes 1223 + * @cd: pointer to command details structure or NULL 1224 + * 1225 + * query current port ets configuration 1226 + */ 1227 + static enum ice_status 1228 + ice_aq_query_port_ets(struct ice_port_info *pi, 1229 + struct ice_aqc_port_ets_elem *buf, u16 buf_size, 1230 + struct ice_sq_cd *cd) 1231 + { 1232 + struct ice_aqc_query_port_ets *cmd; 1233 + struct ice_aq_desc desc; 1234 + enum ice_status status; 1235 + 1236 + if (!pi) 1237 + return ICE_ERR_PARAM; 1238 + cmd = &desc.params.port_ets; 1239 + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets); 1240 + cmd->port_teid = pi->root->info.node_teid; 1241 + 1242 + status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd); 1243 + return status; 1244 + } 1245 + 1246 + /** 1247 + * ice_update_port_tc_tree_cfg - update TC tree configuration 1248 + * @pi: port information structure 1249 + * @buf: pointer to buffer 1250 + * 1251 + * update the SW DB with the new TC changes 1252 + */ 1253 + static enum ice_status 1254 + ice_update_port_tc_tree_cfg(struct ice_port_info *pi, 1255 + struct ice_aqc_port_ets_elem *buf) 1256 + { 1257 + struct ice_sched_node *node, *tc_node; 1258 + struct ice_aqc_get_elem elem; 1259 + enum ice_status status = 0; 1260 + u32 teid1, teid2; 1261 + u8 i, j; 1262 + 1263 + if (!pi) 1264 + return ICE_ERR_PARAM; 1265 + /* suspend the missing TC nodes */ 1266 + for (i = 0; i < pi->root->num_children; i++) { 1267 + teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid); 1268 + ice_for_each_traffic_class(j) { 1269 + teid2 = le32_to_cpu(buf->tc_node_teid[j]); 1270 + if (teid1 == teid2) 1271 + break; 1272 + } 1273 + if (j < ICE_MAX_TRAFFIC_CLASS) 1274 + continue; 1275 + /* TC is missing */ 1276 + pi->root->children[i]->in_use = false; 1277 + } 1278 + /* add the new TC nodes */ 1279 + ice_for_each_traffic_class(j) { 1280 + teid2 = le32_to_cpu(buf->tc_node_teid[j]); 1281 + if (teid2 == ICE_INVAL_TEID) 1282 + continue; 1283 + /* Is it already present in the tree ? */ 1284 + for (i = 0; i < pi->root->num_children; i++) { 1285 + tc_node = pi->root->children[i]; 1286 + if (!tc_node) 1287 + continue; 1288 + teid1 = le32_to_cpu(tc_node->info.node_teid); 1289 + if (teid1 == teid2) { 1290 + tc_node->tc_num = j; 1291 + tc_node->in_use = true; 1292 + break; 1293 + } 1294 + } 1295 + if (i < pi->root->num_children) 1296 + continue; 1297 + /* new TC */ 1298 + status = ice_sched_query_elem(pi->hw, teid2, &elem); 1299 + if (!status) 1300 + status = ice_sched_add_node(pi, 1, &elem.generic[0]); 1301 + if (status) 1302 + break; 1303 + /* update the TC number */ 1304 + node = ice_sched_find_node_by_teid(pi->root, teid2); 1305 + if (node) 1306 + node->tc_num = j; 1307 + } 1308 + return status; 1309 + } 1310 + 1311 + /** 1312 + * ice_query_port_ets - query port ets configuration 1313 + * @pi: port information structure 1314 + * @buf: pointer to buffer 1315 + * @buf_size: buffer size in bytes 1316 + * @cd: pointer to command details structure or NULL 1317 + * 1318 + * query current port ets configuration and update the 1319 + * SW DB with the TC changes 1320 + */ 1321 + enum ice_status 1322 + ice_query_port_ets(struct ice_port_info *pi, 1323 + struct ice_aqc_port_ets_elem *buf, u16 buf_size, 1324 + struct ice_sq_cd *cd) 1325 + { 1326 + enum ice_status status; 1327 + 1328 + mutex_lock(&pi->sched_lock); 1329 + status = ice_aq_query_port_ets(pi, buf, buf_size, cd); 1330 + if (!status) 1331 + status = ice_update_port_tc_tree_cfg(pi, buf); 1332 + mutex_unlock(&pi->sched_lock); 1333 + return status; 937 1334 }
+17
drivers/net/ethernet/intel/ice/ice_dcb.h
··· 70 70 #define ICE_IEEE_APP_PRIO_S 5 71 71 #define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S) 72 72 73 + /* TLV definitions for preparing MIB */ 74 + #define ICE_IEEE_TLV_ID_ETS_CFG 3 75 + #define ICE_IEEE_TLV_ID_ETS_REC 4 76 + #define ICE_IEEE_TLV_ID_PFC_CFG 5 77 + #define ICE_IEEE_TLV_ID_APP_PRI 6 78 + #define ICE_TLV_ID_END_OF_LLDPPDU 7 79 + #define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG 80 + 81 + #define ICE_IEEE_ETS_TLV_LEN 25 82 + #define ICE_IEEE_PFC_TLV_LEN 6 83 + #define ICE_IEEE_APP_TLV_LEN 11 84 + 73 85 /* IEEE 802.1AB LLDP Organization specific TLV */ 74 86 struct ice_lldp_org_tlv { 75 87 __be16 typelen; ··· 120 108 } __packed; 121 109 122 110 u8 ice_get_dcbx_status(struct ice_hw *hw); 111 + enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi); 123 112 enum ice_status ice_init_dcb(struct ice_hw *hw); 113 + enum ice_status 114 + ice_query_port_ets(struct ice_port_info *pi, 115 + struct ice_aqc_port_ets_elem *buf, u16 buf_size, 116 + struct ice_sq_cd *cmd_details); 124 117 #ifdef CONFIG_DCB 125 118 enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd); 126 119 enum ice_status
+203 -1
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
··· 4 4 #include "ice_dcb_lib.h" 5 5 6 6 /** 7 + * ice_dcb_get_ena_tc - return bitmap of enabled TCs 8 + * @dcbcfg: DCB config to evaluate for enabled TCs 9 + */ 10 + u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg) 11 + { 12 + u8 i, num_tc, ena_tc = 1; 13 + 14 + num_tc = ice_dcb_get_num_tc(dcbcfg); 15 + 16 + for (i = 0; i < num_tc; i++) 17 + ena_tc |= BIT(i); 18 + 19 + return ena_tc; 20 + } 21 + 22 + /** 23 + * ice_dcb_get_num_tc - Get the number of TCs from DCBX config 24 + * @dcbcfg: config to retrieve number of TCs from 25 + */ 26 + u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg) 27 + { 28 + bool tc_unused = false; 29 + u8 num_tc = 0; 30 + u8 ret = 0; 31 + int i; 32 + 33 + /* Scan the ETS Config Priority Table to find traffic classes 34 + * enabled and create a bitmask of enabled TCs 35 + */ 36 + for (i = 0; i < CEE_DCBX_MAX_PRIO; i++) 37 + num_tc |= BIT(dcbcfg->etscfg.prio_table[i]); 38 + 39 + /* Scan bitmask for contiguous TCs starting with TC0 */ 40 + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 41 + if (num_tc & BIT(i)) { 42 + if (!tc_unused) { 43 + ret++; 44 + } else { 45 + pr_err("Non-contiguous TCs - Disabling DCB\n"); 46 + return 1; 47 + } 48 + } else { 49 + tc_unused = true; 50 + } 51 + } 52 + 53 + /* There is always at least 1 TC */ 54 + if (!ret) 55 + ret = 1; 56 + 57 + return ret; 58 + } 59 + 60 + /** 61 + * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs 62 + * @pf: pointer to the PF struct 63 + * 64 + * Assumed caller has already disabled all VSIs before 65 + * calling this function. Reconfiguring DCB based on 66 + * local_dcbx_cfg. 67 + */ 68 + static void ice_pf_dcb_recfg(struct ice_pf *pf) 69 + { 70 + struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg; 71 + u8 tc_map = 0; 72 + int v, ret; 73 + 74 + /* Update each VSI */ 75 + ice_for_each_vsi(pf, v) { 76 + if (!pf->vsi[v]) 77 + continue; 78 + 79 + if (pf->vsi[v]->type == ICE_VSI_PF) 80 + tc_map = ice_dcb_get_ena_tc(dcbcfg); 81 + else 82 + tc_map = ICE_DFLT_TRAFFIC_CLASS; 83 + 84 + ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); 85 + if (ret) 86 + dev_err(&pf->pdev->dev, 87 + "Failed to config TC for VSI index: %d\n", 88 + pf->vsi[v]->idx); 89 + else 90 + ice_vsi_map_rings_to_vectors(pf->vsi[v]); 91 + } 92 + } 93 + 94 + /** 95 + * ice_pf_dcb_cfg - Apply new DCB configuration 96 + * @pf: pointer to the PF struct 97 + * @new_cfg: DCBX config to apply 98 + */ 99 + static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg) 100 + { 101 + struct ice_dcbx_cfg *old_cfg, *curr_cfg; 102 + struct ice_aqc_port_ets_elem buf = { 0 }; 103 + int ret = 0; 104 + 105 + curr_cfg = &pf->hw.port_info->local_dcbx_cfg; 106 + 107 + /* Enable DCB tagging only when more than one TC */ 108 + if (ice_dcb_get_num_tc(new_cfg) > 1) { 109 + dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n"); 110 + set_bit(ICE_FLAG_DCB_ENA, pf->flags); 111 + } else { 112 + dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n"); 113 + clear_bit(ICE_FLAG_DCB_ENA, pf->flags); 114 + } 115 + 116 + if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) { 117 + dev_dbg(&pf->pdev->dev, "No change in DCB config required\n"); 118 + return ret; 119 + } 120 + 121 + /* Store old config in case FW config fails */ 122 + old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL); 123 + memcpy(old_cfg, curr_cfg, sizeof(*old_cfg)); 124 + 125 + /* avoid race conditions by holding the lock while disabling and 126 + * re-enabling the VSI 127 + */ 128 + rtnl_lock(); 129 + ice_pf_dis_all_vsi(pf, true); 130 + 131 + memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg)); 132 + memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec)); 133 + 134 + /* Only send new config to HW if we are in SW LLDP mode. Otherwise, 135 + * the new config came from the HW in the first place. 136 + */ 137 + if (pf->hw.port_info->is_sw_lldp) { 138 + ret = ice_set_dcb_cfg(pf->hw.port_info); 139 + if (ret) { 140 + dev_err(&pf->pdev->dev, "Set DCB Config failed\n"); 141 + /* Restore previous settings to local config */ 142 + memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg)); 143 + goto out; 144 + } 145 + } 146 + 147 + ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); 148 + if (ret) { 149 + dev_err(&pf->pdev->dev, "Query Port ETS failed\n"); 150 + goto out; 151 + } 152 + 153 + ice_pf_dcb_recfg(pf); 154 + 155 + out: 156 + ice_pf_ena_all_vsi(pf, true); 157 + rtnl_unlock(); 158 + devm_kfree(&pf->pdev->dev, old_cfg); 159 + return ret; 160 + } 161 + 162 + /** 163 + * ice_dcb_init_cfg - set the initial DCB config in SW 164 + * @pf: pf to apply config to 165 + */ 166 + static int ice_dcb_init_cfg(struct ice_pf *pf) 167 + { 168 + struct ice_dcbx_cfg *newcfg; 169 + struct ice_port_info *pi; 170 + int ret = 0; 171 + 172 + pi = pf->hw.port_info; 173 + newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL); 174 + if (!newcfg) 175 + return -ENOMEM; 176 + 177 + memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg)); 178 + memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg)); 179 + 180 + dev_info(&pf->pdev->dev, "Configuring initial DCB values\n"); 181 + if (ice_pf_dcb_cfg(pf, newcfg)) 182 + ret = -EINVAL; 183 + 184 + devm_kfree(&pf->pdev->dev, newcfg); 185 + 186 + return ret; 187 + } 188 + 189 + /** 7 190 * ice_init_pf_dcb - initialize DCB for a PF 8 191 * @pf: pf to initiialize DCB for 9 192 */ ··· 195 12 struct device *dev = &pf->pdev->dev; 196 13 struct ice_port_info *port_info; 197 14 struct ice_hw *hw = &pf->hw; 15 + int err; 198 16 199 17 port_info = hw->port_info; 200 18 ··· 222 38 ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL); 223 39 } 224 40 225 - return ice_init_dcb(hw); 41 + err = ice_init_dcb(hw); 42 + if (err) 43 + goto dcb_init_err; 44 + 45 + /* DCBX in FW and LLDP enabled in FW */ 46 + pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE; 47 + 48 + set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); 49 + 50 + err = ice_dcb_init_cfg(pf); 51 + if (err) 52 + goto dcb_init_err; 53 + 54 + dev_info(&pf->pdev->dev, "DCBX offload supported\n"); 55 + return err; 56 + 57 + dcb_init_err: 58 + dev_err(dev, "DCB init failed\n"); 59 + return err; 226 60 }
+13
drivers/net/ethernet/intel/ice/ice_dcb_lib.h
··· 8 8 #include "ice_lib.h" 9 9 10 10 #ifdef CONFIG_DCB 11 + u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg); 12 + u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg); 11 13 int ice_init_pf_dcb(struct ice_pf *pf); 12 14 #else 15 + static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) 16 + { 17 + return ICE_DFLT_TRAFFIC_CLASS; 18 + } 19 + 20 + static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg) 21 + { 22 + return 1; 23 + } 24 + 13 25 static inline int ice_init_pf_dcb(struct ice_pf *pf) 14 26 { 15 27 dev_dbg(&pf->pdev->dev, "DCB not supported\n"); 16 28 return -EOPNOTSUPP; 17 29 } 30 + 18 31 #endif /* CONFIG_DCB */ 19 32 #endif /* _ICE_DCB_LIB_H_ */
+135
drivers/net/ethernet/intel/ice/ice_lib.c
··· 3 3 4 4 #include "ice.h" 5 5 #include "ice_lib.h" 6 + #include "ice_dcb_lib.h" 6 7 7 8 /** 8 9 * ice_setup_rx_ctx - Configure a receive ring context ··· 1302 1301 * through the MSI-X enabling code. On a constrained vector budget, we map Tx 1303 1302 * and Rx rings to the vector as "efficiently" as possible. 1304 1303 */ 1304 + #ifdef CONFIG_DCB 1305 + void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1306 + #else 1305 1307 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) 1308 + #endif /* CONFIG_DCB */ 1306 1309 { 1307 1310 int q_vectors = vsi->num_q_vectors; 1308 1311 int tx_rings_rem, rx_rings_rem; ··· 2177 2172 return -EIO; 2178 2173 } 2179 2174 2175 + static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) 2176 + { 2177 + struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg; 2178 + 2179 + vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg); 2180 + vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); 2181 + } 2182 + 2180 2183 /** 2181 2184 * ice_vsi_setup - Set up a VSI by a given type 2182 2185 * @pf: board private structure ··· 2828 2815 test_bit(__ICE_CORER_REQ, state) || 2829 2816 test_bit(__ICE_GLOBR_REQ, state); 2830 2817 } 2818 + 2819 + #ifdef CONFIG_DCB 2820 + /** 2821 + * ice_vsi_update_q_map - update our copy of the VSI info with new queue map 2822 + * @vsi: VSI being configured 2823 + * @ctx: the context buffer returned from AQ VSI update command 2824 + */ 2825 + static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) 2826 + { 2827 + vsi->info.mapping_flags = ctx->info.mapping_flags; 2828 + memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping, 2829 + sizeof(vsi->info.q_mapping)); 2830 + memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping, 2831 + sizeof(vsi->info.tc_mapping)); 2832 + } 2833 + 2834 + /** 2835 + * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration 2836 + * @vsi: the VSI being configured 2837 + * @ena_tc: TC map to be enabled 2838 + */ 2839 + static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) 2840 + { 2841 + struct net_device *netdev = vsi->netdev; 2842 + struct ice_pf *pf = vsi->back; 2843 + struct ice_dcbx_cfg *dcbcfg; 2844 + u8 netdev_tc; 2845 + int i; 2846 + 2847 + if (!netdev) 2848 + return; 2849 + 2850 + if (!ena_tc) { 2851 + netdev_reset_tc(netdev); 2852 + return; 2853 + } 2854 + 2855 + if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc)) 2856 + return; 2857 + 2858 + dcbcfg = &pf->hw.port_info->local_dcbx_cfg; 2859 + 2860 + ice_for_each_traffic_class(i) 2861 + if (vsi->tc_cfg.ena_tc & BIT(i)) 2862 + netdev_set_tc_queue(netdev, 2863 + vsi->tc_cfg.tc_info[i].netdev_tc, 2864 + vsi->tc_cfg.tc_info[i].qcount_tx, 2865 + vsi->tc_cfg.tc_info[i].qoffset); 2866 + 2867 + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { 2868 + u8 ets_tc = dcbcfg->etscfg.prio_table[i]; 2869 + 2870 + /* Get the mapped netdev TC# for the UP */ 2871 + netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc; 2872 + netdev_set_prio_tc_map(netdev, i, netdev_tc); 2873 + } 2874 + } 2875 + 2876 + /** 2877 + * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map 2878 + * @vsi: VSI to be configured 2879 + * @ena_tc: TC bitmap 2880 + * 2881 + * VSI queues expected to be quiesced before calling this function 2882 + */ 2883 + int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) 2884 + { 2885 + u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; 2886 + struct ice_vsi_ctx *ctx; 2887 + struct ice_pf *pf = vsi->back; 2888 + enum ice_status status; 2889 + int i, ret = 0; 2890 + u8 num_tc = 0; 2891 + 2892 + ice_for_each_traffic_class(i) { 2893 + /* build bitmap of enabled TCs */ 2894 + if (ena_tc & BIT(i)) 2895 + num_tc++; 2896 + /* populate max_txqs per TC */ 2897 + max_txqs[i] = pf->num_lan_tx; 2898 + } 2899 + 2900 + vsi->tc_cfg.ena_tc = ena_tc; 2901 + vsi->tc_cfg.numtc = num_tc; 2902 + 2903 + ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL); 2904 + if (!ctx) 2905 + return -ENOMEM; 2906 + 2907 + ctx->vf_num = 0; 2908 + ctx->info = vsi->info; 2909 + 2910 + ice_vsi_setup_q_map(vsi, ctx); 2911 + 2912 + /* must to indicate which section of VSI context are being modified */ 2913 + ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); 2914 + status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL); 2915 + if (status) { 2916 + dev_info(&pf->pdev->dev, "Failed VSI Update\n"); 2917 + ret = -EIO; 2918 + goto out; 2919 + } 2920 + 2921 + status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, 2922 + max_txqs); 2923 + 2924 + if (status) { 2925 + dev_err(&pf->pdev->dev, 2926 + "VSI %d failed TC config, error %d\n", 2927 + vsi->vsi_num, status); 2928 + ret = -EIO; 2929 + goto out; 2930 + } 2931 + ice_vsi_update_q_map(vsi, ctx); 2932 + vsi->info.valid_sections = 0; 2933 + 2934 + ice_vsi_cfg_netdev_tc(vsi, ena_tc); 2935 + out: 2936 + devm_kfree(&pf->pdev->dev, ctx); 2937 + return ret; 2938 + } 2939 + #endif /* CONFIG_DCB */
+8
drivers/net/ethernet/intel/ice/ice_lib.h
··· 41 41 42 42 int ice_vsi_clear(struct ice_vsi *vsi); 43 43 44 + #ifdef CONFIG_DCB 45 + int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); 46 + #endif /* CONFIG_DCB */ 47 + 44 48 struct ice_vsi * 45 49 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, 46 50 enum ice_vsi_type type, u16 vf_id); ··· 65 61 void ice_vsi_free_q_vectors(struct ice_vsi *vsi); 66 62 67 63 void ice_vsi_put_qs(struct ice_vsi *vsi); 64 + 65 + #ifdef CONFIG_DCB 66 + void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); 67 + #endif /* CONFIG_DCB */ 68 68 69 69 void ice_vsi_dis_irq(struct ice_vsi *vsi); 70 70
+71 -51
drivers/net/ethernet/intel/ice/ice_main.c
··· 31 31 static struct workqueue_struct *ice_wq; 32 32 static const struct net_device_ops ice_netdev_ops; 33 33 34 - static void ice_pf_dis_all_vsi(struct ice_pf *pf); 35 34 static void ice_rebuild(struct ice_pf *pf); 36 35 37 36 static void ice_vsi_release_all(struct ice_pf *pf); ··· 397 398 } 398 399 399 400 /** 401 + * ice_dis_vsi - pause a VSI 402 + * @vsi: the VSI being paused 403 + * @locked: is the rtnl_lock already held 404 + */ 405 + static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 406 + { 407 + if (test_bit(__ICE_DOWN, vsi->state)) 408 + return; 409 + 410 + set_bit(__ICE_NEEDS_RESTART, vsi->state); 411 + 412 + if (vsi->type == ICE_VSI_PF && vsi->netdev) { 413 + if (netif_running(vsi->netdev)) { 414 + if (!locked) { 415 + rtnl_lock(); 416 + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 417 + rtnl_unlock(); 418 + } else { 419 + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 420 + } 421 + } else { 422 + ice_vsi_close(vsi); 423 + } 424 + } 425 + } 426 + 427 + /** 428 + * ice_pf_dis_all_vsi - Pause all VSIs on a PF 429 + * @pf: the PF 430 + * @locked: is the rtnl_lock already held 431 + */ 432 + #ifdef CONFIG_DCB 433 + void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 434 + #else 435 + static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) 436 + #endif /* CONFIG_DCB */ 437 + { 438 + int v; 439 + 440 + ice_for_each_vsi(pf, v) 441 + if (pf->vsi[v]) 442 + ice_dis_vsi(pf->vsi[v], locked); 443 + } 444 + 445 + /** 400 446 * ice_prepare_for_reset - prep for the core to reset 401 447 * @pf: board private structure 402 448 * ··· 461 417 ice_vc_notify_reset(pf); 462 418 463 419 /* disable the VSIs and their queues that are not already DOWN */ 464 - ice_pf_dis_all_vsi(pf); 420 + ice_pf_dis_all_vsi(pf, false); 465 421 466 422 if (hw->port_info) 467 423 ice_sched_clear_port(hw->port_info); ··· 3625 3581 } 3626 3582 3627 3583 /** 3628 - * ice_dis_vsi - pause a VSI 3629 - * @vsi: the VSI being paused 3630 - * @locked: is the rtnl_lock already held 3631 - */ 3632 - static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) 3633 - { 3634 - if (test_bit(__ICE_DOWN, vsi->state)) 3635 - return; 3636 - 3637 - set_bit(__ICE_NEEDS_RESTART, vsi->state); 3638 - 3639 - if (vsi->type == ICE_VSI_PF && vsi->netdev) { 3640 - if (netif_running(vsi->netdev)) { 3641 - if (!locked) { 3642 - rtnl_lock(); 3643 - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3644 - rtnl_unlock(); 3645 - } else { 3646 - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); 3647 - } 3648 - } else { 3649 - ice_vsi_close(vsi); 3650 - } 3651 - } 3652 - } 3653 - 3654 - /** 3655 3584 * ice_ena_vsi - resume a VSI 3656 3585 * @vsi: the VSI being resume 3586 + * @locked: is the rtnl_lock already held 3657 3587 */ 3658 - static int ice_ena_vsi(struct ice_vsi *vsi) 3588 + static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) 3659 3589 { 3660 3590 int err = 0; 3661 3591 3662 - if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && 3663 - vsi->netdev) { 3592 + if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) 3593 + return err; 3594 + 3595 + clear_bit(__ICE_NEEDS_RESTART, vsi->state); 3596 + 3597 + if (vsi->netdev && vsi->type == ICE_VSI_PF) { 3598 + struct net_device *netd = vsi->netdev; 3599 + 3664 3600 if (netif_running(vsi->netdev)) { 3665 - rtnl_lock(); 3666 - err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); 3667 - rtnl_unlock(); 3601 + if (locked) { 3602 + err = netd->netdev_ops->ndo_open(netd); 3603 + } else { 3604 + rtnl_lock(); 3605 + err = netd->netdev_ops->ndo_open(netd); 3606 + rtnl_unlock(); 3607 + } 3668 3608 } else { 3669 3609 err = ice_vsi_open(vsi); 3670 3610 } ··· 3658 3630 } 3659 3631 3660 3632 /** 3661 - * ice_pf_dis_all_vsi - Pause all VSIs on a PF 3662 - * @pf: the PF 3663 - */ 3664 - static void ice_pf_dis_all_vsi(struct ice_pf *pf) 3665 - { 3666 - int v; 3667 - 3668 - ice_for_each_vsi(pf, v) 3669 - if (pf->vsi[v]) 3670 - ice_dis_vsi(pf->vsi[v], false); 3671 - } 3672 - 3673 - /** 3674 3633 * ice_pf_ena_all_vsi - Resume all VSIs on a PF 3675 3634 * @pf: the PF 3635 + * @locked: is the rtnl_lock already held 3676 3636 */ 3677 - static int ice_pf_ena_all_vsi(struct ice_pf *pf) 3637 + #ifdef CONFIG_DCB 3638 + int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) 3639 + #else 3640 + static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) 3641 + #endif /* CONFIG_DCB */ 3678 3642 { 3679 3643 int v; 3680 3644 3681 3645 ice_for_each_vsi(pf, v) 3682 3646 if (pf->vsi[v]) 3683 - if (ice_ena_vsi(pf->vsi[v])) 3647 + if (ice_ena_vsi(pf->vsi[v], locked)) 3684 3648 return -EIO; 3685 3649 3686 3650 return 0; ··· 3820 3800 } 3821 3801 3822 3802 /* restart the VSIs that were rebuilt and running before the reset */ 3823 - err = ice_pf_ena_all_vsi(pf); 3803 + err = ice_pf_ena_all_vsi(pf, false); 3824 3804 if (err) { 3825 3805 dev_err(&pf->pdev->dev, "error enabling VSIs\n"); 3826 3806 /* no need to disable VSIs in tear down path in ice_rebuild()
+1 -26
drivers/net/ethernet/intel/ice/ice_sched.c
··· 127 127 * 128 128 * Query scheduling elements (0x0404) 129 129 */ 130 - static enum ice_status 130 + enum ice_status 131 131 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 132 132 struct ice_aqc_get_elem *buf, u16 buf_size, 133 133 u16 *elems_ret, struct ice_sq_cd *cd) ··· 135 135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, 136 136 elems_req, (void *)buf, buf_size, 137 137 elems_ret, cd); 138 - } 139 - 140 - /** 141 - * ice_sched_query_elem - query element information from HW 142 - * @hw: pointer to the HW struct 143 - * @node_teid: node TEID to be queried 144 - * @buf: buffer to element information 145 - * 146 - * This function queries HW element information 147 - */ 148 - static enum ice_status 149 - ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, 150 - struct ice_aqc_get_elem *buf) 151 - { 152 - u16 buf_size, num_elem_ret = 0; 153 - enum ice_status status; 154 - 155 - buf_size = sizeof(*buf); 156 - memset(buf, 0, buf_size); 157 - buf->generic[0].node_teid = cpu_to_le32(node_teid); 158 - status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret, 159 - NULL); 160 - if (status || num_elem_ret != 1) 161 - ice_debug(hw, ICE_DBG_SCHED, "query element failed\n"); 162 - return status; 163 138 } 164 139 165 140 /**
+4
drivers/net/ethernet/intel/ice/ice_sched.h
··· 24 24 }; 25 25 26 26 /* FW AQ command calls */ 27 + enum ice_status 28 + ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, 29 + struct ice_aqc_get_elem *buf, u16 buf_size, 30 + u16 *elems_ret, struct ice_sq_cd *cd); 27 31 enum ice_status ice_sched_init_port(struct ice_port_info *pi); 28 32 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); 29 33 void ice_sched_clear_port(struct ice_port_info *pi);
+2
drivers/net/ethernet/intel/ice/ice_type.h
··· 215 215 #define ice_for_each_traffic_class(_i) \ 216 216 for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) 217 217 218 + #define ICE_INVAL_TEID 0xFFFFFFFF 219 + 218 220 struct ice_sched_node { 219 221 struct ice_sched_node *parent; 220 222 struct ice_sched_node *sibling; /* next sibling in the same layer */