Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'qed-vf-tunnel'

Manish Chopra says:

====================
qed/qede: VF tunnelling support

With this series VFs can run vxlan/geneve/gre tunnels over it.
Please consider applying this series to "net-next"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+854 -252
+28 -3
drivers/net/ethernet/qlogic/qed/qed.h
··· 149 149 QED_TUNN_CLSS_MAC_VNI, 150 150 QED_TUNN_CLSS_INNER_MAC_VLAN, 151 151 QED_TUNN_CLSS_INNER_MAC_VNI, 152 + QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE, 152 153 MAX_QED_TUNN_CLSS, 154 + }; 155 + 156 + struct qed_tunn_update_type { 157 + bool b_update_mode; 158 + bool b_mode_enabled; 159 + enum qed_tunn_clss tun_cls; 160 + }; 161 + 162 + struct qed_tunn_update_udp_port { 163 + bool b_update_port; 164 + u16 port; 165 + }; 166 + 167 + struct qed_tunnel_info { 168 + struct qed_tunn_update_type vxlan; 169 + struct qed_tunn_update_type l2_geneve; 170 + struct qed_tunn_update_type ip_geneve; 171 + struct qed_tunn_update_type l2_gre; 172 + struct qed_tunn_update_type ip_gre; 173 + 174 + struct qed_tunn_update_udp_port vxlan_port; 175 + struct qed_tunn_update_udp_port geneve_port; 176 + 177 + bool b_update_rx_cls; 178 + bool b_update_tx_cls; 153 179 }; 154 180 155 181 struct qed_tunn_start_params { ··· 674 648 /* SRIOV */ 675 649 struct qed_hw_sriov_info *p_iov_info; 676 650 #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) 677 - 678 - unsigned long tunn_mode; 679 - 651 + struct qed_tunnel_info tunnel; 680 652 bool b_is_vf; 681 653 u32 drv_type; 682 654 struct qed_eth_stats *reset_stats; ··· 718 694 u32 rdma_max_sge; 719 695 u32 rdma_max_inline; 720 696 u32 rdma_max_srq_sge; 697 + u16 tunn_feature_mask; 721 698 }; 722 699 723 700 #define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
+15 -2
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 1453 1453 1454 1454 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, 1455 1455 struct qed_ptt *p_ptt, 1456 - struct qed_tunn_start_params *p_tunn, 1456 + struct qed_tunnel_info *p_tunn, 1457 1457 int hw_mode, 1458 1458 bool b_hw_start, 1459 1459 enum qed_int_mode int_mode, ··· 1594 1594 p_load_req->override_force_load = p_drv_load->override_force_load; 1595 1595 } 1596 1596 1597 + static int qed_vf_start(struct qed_hwfn *p_hwfn, 1598 + struct qed_hw_init_params *p_params) 1599 + { 1600 + if (p_params->p_tunn) { 1601 + qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); 1602 + qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); 1603 + } 1604 + 1605 + p_hwfn->b_int_enabled = 1; 1606 + 1607 + return 0; 1608 + } 1609 + 1597 1610 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 1598 1611 { 1599 1612 struct qed_load_req_params load_req_params; ··· 1636 1623 } 1637 1624 1638 1625 if (IS_VF(cdev)) { 1639 - p_hwfn->b_int_enabled = 1; 1626 + qed_vf_start(p_hwfn, p_params); 1640 1627 continue; 1641 1628 } 1642 1629
+1 -1
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
··· 113 113 114 114 struct qed_hw_init_params { 115 115 /* Tunneling parameters */ 116 - struct qed_tunn_start_params *p_tunn; 116 + struct qed_tunnel_info *p_tunn; 117 117 118 118 bool b_hw_start; 119 119
+26 -11
drivers/net/ethernet/qlogic/qed/qed_l2.c
··· 2285 2285 static int qed_tunn_configure(struct qed_dev *cdev, 2286 2286 struct qed_tunn_params *tunn_params) 2287 2287 { 2288 - struct qed_tunn_update_params tunn_info; 2288 + struct qed_tunnel_info tunn_info; 2289 2289 int i, rc; 2290 2290 2291 - if (IS_VF(cdev)) 2292 - return 0; 2293 - 2294 2291 memset(&tunn_info, 0, sizeof(tunn_info)); 2295 - if (tunn_params->update_vxlan_port == 1) { 2296 - tunn_info.update_vxlan_udp_port = 1; 2297 - tunn_info.vxlan_udp_port = tunn_params->vxlan_port; 2292 + if (tunn_params->update_vxlan_port) { 2293 + tunn_info.vxlan_port.b_update_port = true; 2294 + tunn_info.vxlan_port.port = tunn_params->vxlan_port; 2298 2295 } 2299 2296 2300 - if (tunn_params->update_geneve_port == 1) { 2301 - tunn_info.update_geneve_udp_port = 1; 2302 - tunn_info.geneve_udp_port = tunn_params->geneve_port; 2297 + if (tunn_params->update_geneve_port) { 2298 + tunn_info.geneve_port.b_update_port = true; 2299 + tunn_info.geneve_port.port = tunn_params->geneve_port; 2303 2300 } 2304 2301 2305 2302 for_each_hwfn(cdev, i) { 2306 2303 struct qed_hwfn *hwfn = &cdev->hwfns[i]; 2304 + struct qed_tunnel_info *tun; 2305 + 2306 + tun = &hwfn->cdev->tunnel; 2307 2307 2308 2308 rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, 2309 2309 QED_SPQ_MODE_EBLOCK, NULL); 2310 - 2311 2310 if (rc) 2312 2311 return rc; 2312 + 2313 + if (IS_PF_SRIOV(hwfn)) { 2314 + u16 vxlan_port, geneve_port; 2315 + int j; 2316 + 2317 + vxlan_port = tun->vxlan_port.port; 2318 + geneve_port = tun->geneve_port.port; 2319 + 2320 + qed_for_each_vf(hwfn, j) { 2321 + qed_iov_bulletin_set_udp_ports(hwfn, j, 2322 + vxlan_port, 2323 + geneve_port); 2324 + } 2325 + 2326 + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2327 + } 2313 2328 } 2314 2329 2315 2330 return 0;
+35 -12
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 230 230 int qed_fill_dev_info(struct qed_dev *cdev, 231 231 struct qed_dev_info *dev_info) 232 232 { 233 + struct qed_tunnel_info *tun = &cdev->tunnel; 233 234 struct qed_ptt *ptt; 234 235 235 236 memset(dev_info, 0, sizeof(struct qed_dev_info)); 237 + 238 + if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 239 + tun->vxlan.b_mode_enabled) 240 + dev_info->vxlan_enable = true; 241 + 242 + if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 243 + tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 244 + tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 245 + dev_info->gre_enable = true; 246 + 247 + if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 248 + tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 249 + tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 250 + dev_info->geneve_enable = true; 236 251 237 252 dev_info->num_hwfns = cdev->num_hwfns; 238 253 dev_info->pci_mem_start = cdev->pci_params.mem_start; ··· 924 909 { 925 910 struct qed_drv_load_params drv_load_params; 926 911 struct qed_hw_init_params hw_init_params; 927 - struct qed_tunn_start_params tunn_info; 928 912 struct qed_mcp_drv_version drv_version; 913 + struct qed_tunnel_info tunn_info; 929 914 const u8 *data = NULL; 930 915 struct qed_hwfn *hwfn; 931 916 struct qed_ptt *p_ptt; ··· 989 974 qed_dbg_pf_init(cdev); 990 975 } 991 976 992 - memset(&tunn_info, 0, sizeof(tunn_info)); 993 - tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | 994 - 1 << QED_MODE_L2GRE_TUNN | 995 - 1 << QED_MODE_IPGRE_TUNN | 996 - 1 << QED_MODE_L2GENEVE_TUNN | 997 - 1 << QED_MODE_IPGENEVE_TUNN; 998 - 999 - tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; 1000 - tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; 1001 - tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; 1002 - 1003 977 /* Start the slowpath */ 1004 978 memset(&hw_init_params, 0, sizeof(hw_init_params)); 979 + memset(&tunn_info, 0, sizeof(tunn_info)); 980 + tunn_info.vxlan.b_mode_enabled = true; 981 + tunn_info.l2_gre.b_mode_enabled = true; 982 + tunn_info.ip_gre.b_mode_enabled = true; 983 + tunn_info.l2_geneve.b_mode_enabled = true; 984 + tunn_info.ip_geneve.b_mode_enabled = true; 985 + tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 986 + tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 987 + tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 988 + tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 989 + tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1005 990 hw_init_params.p_tunn = &tunn_info; 1006 991 hw_init_params.b_hw_start = true; 1007 992 hw_init_params.int_mode = cdev->int_params.out.int_mode; ··· 1021 1006 1022 1007 DP_INFO(cdev, 1023 1008 "HW initialization and function start completed successfully\n"); 1009 + 1010 + if (IS_PF(cdev)) { 1011 + cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1012 + BIT(QED_MODE_L2GENEVE_TUNN) | 1013 + BIT(QED_MODE_IPGENEVE_TUNN) | 1014 + BIT(QED_MODE_L2GRE_TUNN) | 1015 + BIT(QED_MODE_IPGRE_TUNN)); 1016 + } 1024 1017 1025 1018 /* Allocate LL2 interface if needed */ 1026 1019 if (QED_LEADING_HWFN(cdev)->using_ll2) {
+2 -2
drivers/net/ethernet/qlogic/qed/qed_sp.h
··· 409 409 */ 410 410 411 411 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 412 - struct qed_tunn_start_params *p_tunn, 412 + struct qed_tunnel_info *p_tunn, 413 413 enum qed_mf_mode mode, bool allow_npar_tx_switch); 414 414 415 415 /** ··· 442 442 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); 443 443 444 444 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, 445 - struct qed_tunn_update_params *p_tunn, 445 + struct qed_tunnel_info *p_tunn, 446 446 enum spq_mode comp_mode, 447 447 struct qed_spq_comp_cb *p_comp_data); 448 448 /**
+157 -167
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
··· 111 111 return 0; 112 112 } 113 113 114 - static enum tunnel_clss qed_tunn_get_clss_type(u8 type) 114 + static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) 115 115 { 116 116 switch (type) { 117 117 case QED_TUNN_CLSS_MAC_VLAN: ··· 122 122 return TUNNEL_CLSS_INNER_MAC_VLAN; 123 123 case QED_TUNN_CLSS_INNER_MAC_VNI: 124 124 return TUNNEL_CLSS_INNER_MAC_VNI; 125 + case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: 126 + return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; 125 127 default: 126 128 return TUNNEL_CLSS_MAC_VLAN; 127 129 } 128 130 } 129 131 130 132 static void 131 - qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn, 132 - struct qed_tunn_update_params *p_src, 133 - struct pf_update_tunnel_config *p_tunn_cfg) 133 + qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, 134 + struct qed_tunnel_info *p_src, bool b_pf_start) 134 135 { 135 - unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode; 136 - unsigned long update_mask = p_src->tunn_mode_update_mask; 137 - unsigned long tunn_mode = p_src->tunn_mode; 138 - unsigned long new_tunn_mode = 0; 136 + if (p_src->vxlan.b_update_mode || b_pf_start) 137 + p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled; 139 138 140 - if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) { 141 - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) 142 - __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); 143 - } else { 144 - if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode)) 145 - __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); 139 + if (p_src->l2_gre.b_update_mode || b_pf_start) 140 + p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled; 141 + 142 + if (p_src->ip_gre.b_update_mode || b_pf_start) 143 + p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled; 144 + 145 + if (p_src->l2_geneve.b_update_mode || b_pf_start) 146 + p_tun->l2_geneve.b_mode_enabled = 147 + p_src->l2_geneve.b_mode_enabled; 148 + 149 + if (p_src->ip_geneve.b_update_mode || b_pf_start) 150 + p_tun->ip_geneve.b_mode_enabled = 151 + p_src->ip_geneve.b_mode_enabled; 152 + } 153 + 154 + static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, 155 + struct qed_tunnel_info *p_src) 156 + { 157 + enum tunnel_clss type; 158 + 159 + p_tun->b_update_rx_cls = p_src->b_update_rx_cls; 160 + p_tun->b_update_tx_cls = p_src->b_update_tx_cls; 161 + 162 + type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); 163 + p_tun->vxlan.tun_cls = type; 164 + type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); 165 + p_tun->l2_gre.tun_cls = type; 166 + type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); 167 + p_tun->ip_gre.tun_cls = type; 168 + type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); 169 + p_tun->l2_geneve.tun_cls = type; 170 + type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); 171 + p_tun->ip_geneve.tun_cls = type; 172 + } 173 + 174 + static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, 175 + struct qed_tunnel_info *p_src) 176 + { 177 + p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port; 178 + p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port; 179 + 180 + if (p_src->geneve_port.b_update_port) 181 + p_tun->geneve_port.port = p_src->geneve_port.port; 182 + 183 + if (p_src->vxlan_port.b_update_port) 184 + p_tun->vxlan_port.port = p_src->vxlan_port.port; 185 + } 186 + 187 + static void 188 + __qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, 189 + struct qed_tunn_update_type *tun_type) 190 + { 191 + *p_tunn_cls = tun_type->tun_cls; 192 + 193 + if (tun_type->b_mode_enabled) 194 + *p_enable_tx_clas = 1; 195 + } 196 + 197 + static void 198 + qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas, 199 + struct qed_tunn_update_type *tun_type, 200 + u8 *p_update_port, __le16 *p_port, 201 + struct qed_tunn_update_udp_port *p_udp_port) 202 + { 203 + __qed_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas, tun_type); 204 + if (p_udp_port->b_update_port) { 205 + *p_update_port = 1; 206 + *p_port = cpu_to_le16(p_udp_port->port); 146 207 } 147 - 148 - if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) { 149 - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) 150 - __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); 151 - } else { 152 - if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode)) 153 - __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); 154 - } 155 - 156 - if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) { 157 - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) 158 - __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); 159 - } else { 160 - if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode)) 161 - __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); 162 - } 163 - 164 - if (p_src->update_geneve_udp_port) { 165 - p_tunn_cfg->set_geneve_udp_port_flg = 1; 166 - p_tunn_cfg->geneve_udp_port = 167 - cpu_to_le16(p_src->geneve_udp_port); 168 - } 169 - 170 - if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) { 171 - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) 172 - __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); 173 - } else { 174 - if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) 175 - __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); 176 - } 177 - 178 - if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) { 179 - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) 180 - __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); 181 - } else { 182 - if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) 183 - __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); 184 - } 185 - 186 - p_src->tunn_mode = new_tunn_mode; 187 208 } 188 209 189 210 static void 190 211 qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, 191 - struct qed_tunn_update_params *p_src, 212 + struct qed_tunnel_info *p_src, 192 213 struct pf_update_tunnel_config *p_tunn_cfg) 193 214 { 194 - unsigned long tunn_mode = p_src->tunn_mode; 195 - enum tunnel_clss type; 215 + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 196 216 197 - qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); 198 - p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; 199 - p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; 217 + qed_set_pf_update_tunn_mode(p_tun, p_src, false); 218 + qed_set_tunn_cls_info(p_tun, p_src); 219 + qed_set_tunn_ports(p_tun, p_src); 200 220 201 - type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); 202 - p_tunn_cfg->tunnel_clss_vxlan = type; 221 + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, 222 + &p_tunn_cfg->tx_enable_vxlan, 223 + &p_tun->vxlan, 224 + &p_tunn_cfg->set_vxlan_udp_port_flg, 225 + &p_tunn_cfg->vxlan_udp_port, 226 + &p_tun->vxlan_port); 203 227 204 - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); 205 - p_tunn_cfg->tunnel_clss_l2gre = type; 228 + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, 229 + &p_tunn_cfg->tx_enable_l2geneve, 230 + &p_tun->l2_geneve, 231 + &p_tunn_cfg->set_geneve_udp_port_flg, 232 + &p_tunn_cfg->geneve_udp_port, 233 + &p_tun->geneve_port); 206 234 207 - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); 208 - p_tunn_cfg->tunnel_clss_ipgre = type; 235 + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, 236 + &p_tunn_cfg->tx_enable_ipgeneve, 237 + &p_tun->ip_geneve); 209 238 210 - if (p_src->update_vxlan_udp_port) { 211 - p_tunn_cfg->set_vxlan_udp_port_flg = 1; 212 - p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); 213 - } 239 + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, 240 + &p_tunn_cfg->tx_enable_l2gre, 241 + &p_tun->l2_gre); 214 242 215 - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) 216 - p_tunn_cfg->tx_enable_l2gre = 1; 243 + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, 244 + &p_tunn_cfg->tx_enable_ipgre, 245 + &p_tun->ip_gre); 217 246 218 - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) 219 - p_tunn_cfg->tx_enable_ipgre = 1; 220 - 221 - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) 222 - p_tunn_cfg->tx_enable_vxlan = 1; 223 - 224 - if (p_src->update_geneve_udp_port) { 225 - p_tunn_cfg->set_geneve_udp_port_flg = 1; 226 - p_tunn_cfg->geneve_udp_port = 227 - cpu_to_le16(p_src->geneve_udp_port); 228 - } 229 - 230 - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) 231 - p_tunn_cfg->tx_enable_l2geneve = 1; 232 - 233 - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) 234 - p_tunn_cfg->tx_enable_ipgeneve = 1; 235 - 236 - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); 237 - p_tunn_cfg->tunnel_clss_l2geneve = type; 238 - 239 - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); 240 - p_tunn_cfg->tunnel_clss_ipgeneve = type; 247 + p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; 248 + p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls; 241 249 } 242 250 243 251 static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, 244 252 struct qed_ptt *p_ptt, 245 - unsigned long tunn_mode) 253 + struct qed_tunnel_info *p_tun) 246 254 { 247 - u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; 248 - u8 l2geneve_enable = 0, ipgeneve_enable = 0; 255 + qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled, 256 + p_tun->ip_gre.b_mode_enabled); 257 + qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled); 249 258 250 - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) 251 - l2gre_enable = 1; 259 + qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled, 260 + p_tun->ip_geneve.b_mode_enabled); 261 + } 252 262 253 - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) 254 - ipgre_enable = 1; 263 + static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, 264 + struct qed_tunnel_info *p_tunn) 265 + { 266 + if (p_tunn->vxlan_port.b_update_port) 267 + qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, 268 + p_tunn->vxlan_port.port); 255 269 256 - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) 257 - vxlan_enable = 1; 270 + if (p_tunn->geneve_port.b_update_port) 271 + qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, 272 + p_tunn->geneve_port.port); 258 273 259 - qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); 260 - qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); 261 - 262 - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) 263 - l2geneve_enable = 1; 264 - 265 - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) 266 - ipgeneve_enable = 1; 267 - 268 - qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, 269 - ipgeneve_enable); 274 + qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn); 270 275 } 271 276 272 277 static void 273 278 qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, 274 - struct qed_tunn_start_params *p_src, 279 + struct qed_tunnel_info *p_src, 275 280 struct pf_start_tunnel_config *p_tunn_cfg) 276 281 { 277 - unsigned long tunn_mode; 278 - enum tunnel_clss type; 282 + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 279 283 280 284 if (!p_src) 281 285 return; 282 286 283 - tunn_mode = p_src->tunn_mode; 284 - type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); 285 - p_tunn_cfg->tunnel_clss_vxlan = type; 286 - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); 287 - p_tunn_cfg->tunnel_clss_l2gre = type; 288 - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); 289 - p_tunn_cfg->tunnel_clss_ipgre = type; 287 + qed_set_pf_update_tunn_mode(p_tun, p_src, true); 288 + qed_set_tunn_cls_info(p_tun, p_src); 289 + qed_set_tunn_ports(p_tun, p_src); 290 290 291 - if (p_src->update_vxlan_udp_port) { 292 - p_tunn_cfg->set_vxlan_udp_port_flg = 1; 293 - p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); 294 - } 291 + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, 292 + &p_tunn_cfg->tx_enable_vxlan, 293 + &p_tun->vxlan, 294 + &p_tunn_cfg->set_vxlan_udp_port_flg, 295 + &p_tunn_cfg->vxlan_udp_port, 296 + &p_tun->vxlan_port); 295 297 296 - if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) 297 - p_tunn_cfg->tx_enable_l2gre = 1; 298 + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, 299 + &p_tunn_cfg->tx_enable_l2geneve, 300 + &p_tun->l2_geneve, 301 + &p_tunn_cfg->set_geneve_udp_port_flg, 302 + &p_tunn_cfg->geneve_udp_port, 303 + &p_tun->geneve_port); 298 304 299 - if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) 300 - p_tunn_cfg->tx_enable_ipgre = 1; 305 + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, 306 + &p_tunn_cfg->tx_enable_ipgeneve, 307 + &p_tun->ip_geneve); 301 308 302 - if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) 303 - p_tunn_cfg->tx_enable_vxlan = 1; 309 + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, 310 + &p_tunn_cfg->tx_enable_l2gre, 311 + &p_tun->l2_gre); 304 312 305 - if (p_src->update_geneve_udp_port) { 306 - p_tunn_cfg->set_geneve_udp_port_flg = 1; 307 - p_tunn_cfg->geneve_udp_port = 308 - cpu_to_le16(p_src->geneve_udp_port); 309 - } 310 - 311 - if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) 312 - p_tunn_cfg->tx_enable_l2geneve = 1; 313 - 314 - if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) 315 - p_tunn_cfg->tx_enable_ipgeneve = 1; 316 - 317 - type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); 318 - p_tunn_cfg->tunnel_clss_l2geneve = type; 319 - type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); 320 - p_tunn_cfg->tunnel_clss_ipgeneve = type; 313 + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, 314 + &p_tunn_cfg->tx_enable_ipgre, 315 + &p_tun->ip_gre); 321 316 } 322 317 323 318 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 324 - struct qed_tunn_start_params *p_tunn, 319 + struct qed_tunnel_info *p_tunn, 325 320 enum qed_mf_mode mode, bool allow_npar_tx_switch) 326 321 { 327 322 struct pf_start_ramrod_data *p_ramrod = NULL; ··· 411 416 412 417 rc = qed_spq_post(p_hwfn, p_ent, NULL); 413 418 414 - if (p_tunn) { 415 - qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, 416 - p_tunn->tunn_mode); 417 - p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; 418 - } 419 + if (p_tunn) 420 + qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); 419 421 420 422 return rc; 421 423 } ··· 443 451 444 452 /* Set pf update ramrod command params */ 445 453 int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, 446 - struct qed_tunn_update_params *p_tunn, 454 + struct qed_tunnel_info *p_tunn, 447 455 enum spq_mode comp_mode, 448 456 struct qed_spq_comp_cb *p_comp_data) 449 457 { 450 458 struct qed_spq_entry *p_ent = NULL; 451 459 struct qed_sp_init_data init_data; 452 460 int rc = -EINVAL; 461 + 462 + if (IS_VF(p_hwfn->cdev)) 463 + return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn); 464 + 465 + if (!p_tunn) 466 + return -EINVAL; 453 467 454 468 /* Get SPQ entry */ 455 469 memset(&init_data, 0, sizeof(init_data)); ··· 477 479 if (rc) 478 480 return rc; 479 481 480 - if (p_tunn->update_vxlan_udp_port) 481 - qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, 482 - p_tunn->vxlan_udp_port); 483 - if (p_tunn->update_geneve_udp_port) 484 - qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, 485 - p_tunn->geneve_udp_port); 486 - 487 - qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); 488 - p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; 482 + qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); 489 483 490 484 return rc; 491 485 }
+240
drivers/net/ethernet/qlogic/qed/qed_sriov.c
··· 2019 2019 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf); 2020 2020 } 2021 2021 2022 + static void 2023 + qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, 2024 + struct qed_tunnel_info *p_tun, 2025 + u16 tunn_feature_mask) 2026 + { 2027 + p_resp->tunn_feature_mask = tunn_feature_mask; 2028 + p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; 2029 + p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; 2030 + p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; 2031 + p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; 2032 + p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; 2033 + p_resp->vxlan_clss = p_tun->vxlan.tun_cls; 2034 + p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; 2035 + p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; 2036 + p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; 2037 + p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; 2038 + p_resp->geneve_udp_port = p_tun->geneve_port.port; 2039 + p_resp->vxlan_udp_port = p_tun->vxlan_port.port; 2040 + } 2041 + 2042 + static void 2043 + __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2044 + struct qed_tunn_update_type *p_tun, 2045 + enum qed_tunn_mode mask, u8 tun_cls) 2046 + { 2047 + if (p_req->tun_mode_update_mask & BIT(mask)) { 2048 + p_tun->b_update_mode = true; 2049 + 2050 + if (p_req->tunn_mode & BIT(mask)) 2051 + p_tun->b_mode_enabled = true; 2052 + } 2053 + 2054 + p_tun->tun_cls = tun_cls; 2055 + } 2056 + 2057 + static void 2058 + qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, 2059 + struct qed_tunn_update_type *p_tun, 2060 + struct qed_tunn_update_udp_port *p_port, 2061 + enum qed_tunn_mode mask, 2062 + u8 tun_cls, u8 update_port, u16 port) 2063 + { 2064 + if (update_port) { 2065 + p_port->b_update_port = true; 2066 + p_port->port = port; 2067 + } 2068 + 2069 + __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); 2070 + } 2071 + 2072 + static bool 2073 + qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) 2074 + { 2075 + bool b_update_requested = false; 2076 + 2077 + if (p_req->tun_mode_update_mask || p_req->update_tun_cls || 2078 + p_req->update_geneve_port || p_req->update_vxlan_port) 2079 + b_update_requested = true; 2080 + 2081 + return b_update_requested; 2082 + } 2083 + 2084 + static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) 2085 + { 2086 + if (tun->b_update_mode && !tun->b_mode_enabled) { 2087 + tun->b_update_mode = false; 2088 + *rc = -EINVAL; 2089 + } 2090 + } 2091 + 2092 + static int 2093 + qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, 2094 + u16 *tun_features, bool *update, 2095 + struct qed_tunnel_info *tun_src) 2096 + { 2097 + struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; 2098 + struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; 2099 + u16 bultn_vxlan_port, bultn_geneve_port; 2100 + void *cookie = p_hwfn->cdev->ops_cookie; 2101 + int i, rc = 0; 2102 + 2103 + *tun_features = p_hwfn->cdev->tunn_feature_mask; 2104 + bultn_vxlan_port = tun->vxlan_port.port; 2105 + bultn_geneve_port = tun->geneve_port.port; 2106 + qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); 2107 + qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); 2108 + qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); 2109 + qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); 2110 + qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); 2111 + 2112 + if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && 2113 + (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2114 + tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2115 + tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2116 + tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || 2117 + tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { 2118 + tun_src->b_update_rx_cls = false; 2119 + tun_src->b_update_tx_cls = false; 2120 + rc = -EINVAL; 2121 + } 2122 + 2123 + if (tun_src->vxlan_port.b_update_port) { 2124 + if (tun_src->vxlan_port.port == tun->vxlan_port.port) { 2125 + tun_src->vxlan_port.b_update_port = false; 2126 + } else { 2127 + *update = true; 2128 + bultn_vxlan_port = tun_src->vxlan_port.port; 2129 + } 2130 + } 2131 + 2132 + if (tun_src->geneve_port.b_update_port) { 2133 + if (tun_src->geneve_port.port == tun->geneve_port.port) { 2134 + tun_src->geneve_port.b_update_port = false; 2135 + } else { 2136 + *update = true; 2137 + bultn_geneve_port = tun_src->geneve_port.port; 2138 + } 2139 + } 2140 + 2141 + qed_for_each_vf(p_hwfn, i) { 2142 + qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, 2143 + bultn_geneve_port); 2144 + } 2145 + 2146 + qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); 2147 + ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); 2148 + 2149 + return rc; 2150 + } 2151 + 2152 + static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, 2153 + struct qed_ptt *p_ptt, 2154 + struct qed_vf_info *p_vf) 2155 + { 2156 + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 2157 + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; 2158 + struct pfvf_update_tunn_param_tlv *p_resp; 2159 + struct vfpf_update_tunn_param_tlv *p_req; 2160 + u8 status = PFVF_STATUS_SUCCESS; 2161 + bool b_update_required = false; 2162 + struct qed_tunnel_info tunn; 2163 + u16 tunn_feature_mask = 0; 2164 + int i, rc = 0; 2165 + 2166 + mbx->offset = (u8 *)mbx->reply_virt; 2167 + 2168 + memset(&tunn, 0, sizeof(tunn)); 2169 + p_req = &mbx->req_virt->tunn_param_update; 2170 + 2171 + if (!qed_iov_pf_validate_tunn_param(p_req)) { 2172 + DP_VERBOSE(p_hwfn, QED_MSG_IOV, 2173 + "No tunnel update requested by VF\n"); 2174 + status = PFVF_STATUS_FAILURE; 2175 + goto send_resp; 2176 + } 2177 + 2178 + tunn.b_update_rx_cls = p_req->update_tun_cls; 2179 + tunn.b_update_tx_cls = p_req->update_tun_cls; 2180 + 2181 + qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, 2182 + QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, 2183 + p_req->update_vxlan_port, 2184 + p_req->vxlan_port); 2185 + qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, 2186 + QED_MODE_L2GENEVE_TUNN, 2187 + p_req->l2geneve_clss, 2188 + p_req->update_geneve_port, 2189 + p_req->geneve_port); 2190 + __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, 2191 + QED_MODE_IPGENEVE_TUNN, 2192 + p_req->ipgeneve_clss); 2193 + __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, 2194 + QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); 2195 + __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, 2196 + QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); 2197 + 2198 + /* If PF modifies VF's req then it should 2199 + * still return an error in case of partial configuration 2200 + * or modified configuration as opposed to requested one. 2201 + */ 2202 + rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, 2203 + &b_update_required, &tunn); 2204 + 2205 + if (rc) 2206 + status = PFVF_STATUS_FAILURE; 2207 + 2208 + /* If QED client is willing to update anything ? */ 2209 + if (b_update_required) { 2210 + u16 geneve_port; 2211 + 2212 + rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn, 2213 + QED_SPQ_MODE_EBLOCK, NULL); 2214 + if (rc) 2215 + status = PFVF_STATUS_FAILURE; 2216 + 2217 + geneve_port = p_tun->geneve_port.port; 2218 + qed_for_each_vf(p_hwfn, i) { 2219 + qed_iov_bulletin_set_udp_ports(p_hwfn, i, 2220 + p_tun->vxlan_port.port, 2221 + geneve_port); 2222 + } 2223 + } 2224 + 2225 + send_resp: 2226 + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, 2227 + CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); 2228 + 2229 + qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); 2230 + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, 2231 + sizeof(struct channel_list_end_tlv)); 2232 + 2233 + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); 2234 + } 2235 + 2022 2236 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, 2023 2237 struct qed_ptt *p_ptt, 2024 2238 struct qed_vf_info *p_vf, u8 status) ··· 3489 3275 case CHANNEL_TLV_RELEASE: 3490 3276 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); 3491 3277 break; 3278 + case CHANNEL_TLV_UPDATE_TUNN_PARAM: 3279 + qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); 3280 + break; 3492 3281 } 3493 3282 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { 3494 3283 DP_VERBOSE(p_hwfn, QED_MSG_IOV, ··· 3726 3509 vf_info->bulletin.p_virt->valid_bitmap &= ~feature; 3727 3510 3728 3511 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); 3512 + } 3513 + 3514 + void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 3515 + int vfid, u16 vxlan_port, u16 geneve_port) 3516 + { 3517 + struct qed_vf_info *vf_info; 3518 + 3519 + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); 3520 + if (!vf_info) { 3521 + DP_NOTICE(p_hwfn->cdev, 3522 + "Can not set udp ports, invalid vfid [%d]\n", vfid); 3523 + return; 3524 + } 3525 + 3526 + if (vf_info->b_malicious) { 3527 + DP_VERBOSE(p_hwfn, QED_MSG_IOV, 3528 + "Can not set udp ports to malicious VF [%d]\n", 3529 + vfid); 3530 + return; 3531 + } 3532 + 3533 + vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; 3534 + vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; 3729 3535 } 3730 3536 3731 3537 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+9
drivers/net/ethernet/qlogic/qed/qed_sriov.h
··· 270 270 */ 271 271 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); 272 272 273 + void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, 274 + int vfid, u16 vxlan_port, u16 geneve_port); 275 + 273 276 /** 274 277 * @brief Read sriov related information and allocated resources 275 278 * reads from configuraiton space, shmem, etc. ··· 379 376 u16 rel_vf_id) 380 377 { 381 378 return MAX_NUM_VFS; 379 + } 380 + 381 + static inline void 382 + qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, 383 + u16 vxlan_port, u16 geneve_port) 384 + { 382 385 } 383 386 384 387 static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+165
drivers/net/ethernet/qlogic/qed/qed_vf.c
··· 418 418 #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ 419 419 (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) 420 420 421 + static void 422 + __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 423 + struct qed_tunn_update_type *p_src, 424 + enum qed_tunn_clss mask, u8 *p_cls) 425 + { 426 + if (p_src->b_update_mode) { 427 + p_req->tun_mode_update_mask |= BIT(mask); 428 + 429 + if (p_src->b_mode_enabled) 430 + p_req->tunn_mode |= BIT(mask); 431 + } 432 + 433 + *p_cls = p_src->tun_cls; 434 + } 435 + 436 + static void 437 + qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, 438 + struct qed_tunn_update_type *p_src, 439 + enum qed_tunn_clss mask, 440 + u8 *p_cls, struct qed_tunn_update_udp_port *p_port, 441 + u8 *p_update_port, u16 *p_udp_port) 442 + { 443 + if (p_port->b_update_port) { 444 + *p_update_port = 1; 445 + *p_udp_port = p_port->port; 446 + } 447 + 448 + __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); 449 + } 450 + 451 + void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) 452 + { 453 + if (p_tun->vxlan.b_mode_enabled) 454 + p_tun->vxlan.b_update_mode = true; 455 + if (p_tun->l2_geneve.b_mode_enabled) 456 + p_tun->l2_geneve.b_update_mode = true; 457 + if (p_tun->ip_geneve.b_mode_enabled) 458 + p_tun->ip_geneve.b_update_mode = true; 459 + if (p_tun->l2_gre.b_mode_enabled) 460 + p_tun->l2_gre.b_update_mode = true; 461 + if (p_tun->ip_gre.b_mode_enabled) 462 + p_tun->ip_gre.b_update_mode = true; 463 + 464 + p_tun->b_update_rx_cls = true; 465 + p_tun->b_update_tx_cls = true; 466 + } 467 + 468 + static void 469 + __qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun, 470 + u16 feature_mask, u8 tunn_mode, 471 + u8 tunn_cls, enum qed_tunn_mode val) 472 + { 473 + if (feature_mask & BIT(val)) { 474 + p_tun->b_mode_enabled = tunn_mode; 475 + p_tun->tun_cls = tunn_cls; 476 + } else { 477 + p_tun->b_mode_enabled = false; 478 + } 479 + } 480 + 481 + static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn, 482 + struct qed_tunnel_info *p_tun, 483 + struct pfvf_update_tunn_param_tlv *p_resp) 484 + { 485 + /* Update mode and classes provided by PF */ 486 + u16 feat_mask = p_resp->tunn_feature_mask; 487 + 488 + __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask, 489 + p_resp->vxlan_mode, p_resp->vxlan_clss, 490 + QED_MODE_VXLAN_TUNN); 491 + __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, 492 + p_resp->l2geneve_mode, 493 + p_resp->l2geneve_clss, 494 + QED_MODE_L2GENEVE_TUNN); 495 + __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, 496 + p_resp->ipgeneve_mode, 497 + p_resp->ipgeneve_clss, 498 + QED_MODE_IPGENEVE_TUNN); 499 + __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, 500 + p_resp->l2gre_mode, p_resp->l2gre_clss, 501 + QED_MODE_L2GRE_TUNN); 502 + __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, 503 + p_resp->ipgre_mode, p_resp->ipgre_clss, 504 + QED_MODE_IPGRE_TUNN); 505 + p_tun->geneve_port.port = p_resp->geneve_udp_port; 506 + p_tun->vxlan_port.port = p_resp->vxlan_udp_port; 507 + 508 + DP_VERBOSE(p_hwfn, QED_MSG_IOV, 509 + "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", 510 + p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, 511 + p_tun->ip_geneve.b_mode_enabled, 512 + p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); 513 + } 514 + 515 + int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 516 + struct qed_tunnel_info *p_src) 517 + { 518 + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; 519 + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; 520 + struct pfvf_update_tunn_param_tlv *p_resp; 521 + struct vfpf_update_tunn_param_tlv *p_req; 522 + int rc; 523 + 524 + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, 525 + sizeof(*p_req)); 526 + 527 + if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) 528 + p_req->update_tun_cls = 1; 529 + 530 + qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN, 531 + &p_req->vxlan_clss, &p_src->vxlan_port, 532 + &p_req->update_vxlan_port, 533 + &p_req->vxlan_port); 534 + qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, 535 + QED_MODE_L2GENEVE_TUNN, 536 + &p_req->l2geneve_clss, &p_src->geneve_port, 537 + &p_req->update_geneve_port, 538 + &p_req->geneve_port); 539 + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, 540 + QED_MODE_IPGENEVE_TUNN, 541 + &p_req->ipgeneve_clss); 542 + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, 543 + QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss); 544 + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, 545 + QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss); 546 + 547 + /* add list termination tlv */ 548 + qed_add_tlv(p_hwfn, &p_iov->offset, 549 + CHANNEL_TLV_LIST_END, 550 + sizeof(struct channel_list_end_tlv)); 551 + 552 + p_resp = &p_iov->pf2vf_reply->tunn_param_resp; 553 + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); 554 + 555 + if (rc) 556 + goto exit; 557 + 558 + if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { 559 + DP_VERBOSE(p_hwfn, QED_MSG_IOV, 560 + "Failed to update tunnel parameters\n"); 561 + rc = -EINVAL; 562 + } 563 + 564 + qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp); 565 + exit: 566 + qed_vf_pf_req_end(p_hwfn, rc); 567 + return rc; 568 + } 569 + 421 570 int 422 571 qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, 423 572 struct qed_queue_cid *p_cid, ··· 1400 1251 return true; 1401 1252 } 1402 1253 1254 + static void 1255 + qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn, 1256 + u16 *p_vxlan_port, u16 *p_geneve_port) 1257 + { 1258 + struct qed_bulletin_content *p_bulletin; 1259 + 1260 + p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; 1261 + 1262 + *p_vxlan_port = p_bulletin->vxlan_udp_port; 1263 + *p_geneve_port = p_bulletin->geneve_udp_port; 1264 + } 1265 + 1403 1266 void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, 1404 1267 u16 *fw_major, u16 *fw_minor, 1405 1268 u16 *fw_rev, u16 *fw_eng) ··· 1431 1270 struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; 1432 1271 u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; 1433 1272 void *cookie = hwfn->cdev->ops_cookie; 1273 + u16 vxlan_port, geneve_port; 1434 1274 1275 + qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port); 1435 1276 is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, 1436 1277 &is_mac_forced); 1437 1278 if (is_mac_exist && cookie) 1438 1279 ops->force_mac(cookie, mac, !!is_mac_forced); 1280 + 1281 + ops->ports_update(cookie, vxlan_port, geneve_port); 1439 1282 1440 1283 /* Always update link configuration according to bulletin */ 1441 1284 qed_link_update(hwfn);
+57 -1
drivers/net/ethernet/qlogic/qed/qed_vf.h
··· 429 429 u16 padding[3]; 430 430 }; 431 431 432 + /* tunnel update param tlv */ 433 + struct vfpf_update_tunn_param_tlv { 434 + struct vfpf_first_tlv first_tlv; 435 + 436 + u8 tun_mode_update_mask; 437 + u8 tunn_mode; 438 + u8 update_tun_cls; 439 + u8 vxlan_clss; 440 + u8 l2gre_clss; 441 + u8 ipgre_clss; 442 + u8 l2geneve_clss; 443 + u8 ipgeneve_clss; 444 + u8 update_geneve_port; 445 + u8 update_vxlan_port; 446 + u16 geneve_port; 447 + u16 vxlan_port; 448 + u8 padding[2]; 449 + }; 450 + 451 + struct pfvf_update_tunn_param_tlv { 452 + struct pfvf_tlv hdr; 453 + 454 + u16 tunn_feature_mask; 455 + u8 vxlan_mode; 456 + u8 l2geneve_mode; 457 + u8 ipgeneve_mode; 458 + u8 l2gre_mode; 459 + u8 ipgre_mode; 460 + u8 vxlan_clss; 461 + u8 l2gre_clss; 462 + u8 ipgre_clss; 463 + u8 l2geneve_clss; 464 + u8 ipgeneve_clss; 465 + u16 vxlan_udp_port; 466 + u16 geneve_udp_port; 467 + }; 468 + 432 469 struct tlv_buffer_size { 433 470 u8 tlv_buffer[TLV_BUFFER_SIZE]; 434 471 }; ··· 481 444 struct vfpf_vport_start_tlv start_vport; 482 445 struct vfpf_vport_update_tlv vport_update; 483 446 struct vfpf_ucast_filter_tlv ucast_filter; 447 + struct vfpf_update_tunn_param_tlv tunn_param_update; 484 448 struct channel_list_end_tlv list_end; 485 449 struct tlv_buffer_size tlv_buf_size; 486 450 }; ··· 491 453 struct pfvf_acquire_resp_tlv acquire_resp; 492 454 struct tlv_buffer_size tlv_buf_size; 493 455 struct pfvf_start_queue_resp_tlv queue_start; 456 + struct pfvf_update_tunn_param_tlv tunn_param_resp; 494 457 }; 495 458 496 459 enum qed_bulletin_bit { ··· 552 513 u8 partner_rx_flow_ctrl_en; 553 514 u8 partner_adv_pause; 554 515 u8 sfp_tx_fault; 555 - u8 padding4[6]; 516 + u16 vxlan_udp_port; 517 + u16 geneve_udp_port; 518 + u8 padding4[2]; 556 519 557 520 u32 speed; 558 521 u32 partner_adv_speed; ··· 596 555 CHANNEL_TLV_VPORT_UPDATE_RSS, 597 556 CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, 598 557 CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, 558 + CHANNEL_TLV_UPDATE_TUNN_PARAM, 599 559 CHANNEL_TLV_MAX, 600 560 601 561 /* Required for iterating over vport-update tlvs. ··· 914 872 struct qed_bulletin_content *p_bulletin); 915 873 916 874 void qed_iov_vf_task(struct work_struct *work); 875 + void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun); 876 + int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 877 + struct qed_tunnel_info *p_tunn); 917 878 #else 918 879 static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, 919 880 struct qed_mcp_link_params *params) ··· 1077 1032 1078 1033 static inline void qed_iov_vf_task(struct work_struct *work) 1079 1034 { 1035 + } 1036 + 1037 + static inline void 1038 + qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) 1039 + { 1040 + } 1041 + 1042 + static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, 1043 + struct qed_tunnel_info *p_tunn) 1044 + { 1045 + return -EINVAL; 1080 1046 } 1081 1047 #endif 1082 1048
+1 -2
drivers/net/ethernet/qlogic/qede/qede.h
··· 442 442 #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) 443 443 444 444 #define QEDE_SP_RX_MODE 1 445 - #define QEDE_SP_VXLAN_PORT_CONFIG 2 446 - #define QEDE_SP_GENEVE_PORT_CONFIG 3 447 445 448 446 #ifdef CONFIG_RFS_ACCEL 449 447 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ··· 480 482 481 483 /* Filtering function definitions */ 482 484 void qede_force_mac(void *dev, u8 *mac, bool forced); 485 + void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port); 483 486 int qede_set_mac_addr(struct net_device *ndev, void *p); 484 487 485 488 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+69 -15
drivers/net/ethernet/qlogic/qede/qede_filter.c
··· 480 480 } 481 481 #endif 482 482 483 + void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port) 484 + { 485 + struct qede_dev *edev = dev; 486 + 487 + if (edev->vxlan_dst_port != vxlan_port) 488 + edev->vxlan_dst_port = 0; 489 + 490 + if (edev->geneve_dst_port != geneve_port) 491 + edev->geneve_dst_port = 0; 492 + } 493 + 483 494 void qede_force_mac(void *dev, u8 *mac, bool forced) 484 495 { 485 496 struct qede_dev *edev = dev; ··· 894 883 void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti) 895 884 { 896 885 struct qede_dev *edev = netdev_priv(dev); 886 + struct qed_tunn_params tunn_params; 897 887 u16 t_port = ntohs(ti->port); 888 + int rc; 889 + 890 + memset(&tunn_params, 0, sizeof(tunn_params)); 898 891 899 892 switch (ti->type) { 900 893 case UDP_TUNNEL_TYPE_VXLAN: 894 + if (!edev->dev_info.common.vxlan_enable) 895 + return; 896 + 901 897 if (edev->vxlan_dst_port) 902 898 return; 903 899 904 - edev->vxlan_dst_port = t_port; 900 + tunn_params.update_vxlan_port = 1; 901 + tunn_params.vxlan_port = t_port; 905 902 906 - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", 907 - t_port); 903 + __qede_lock(edev); 904 + rc = edev->ops->tunn_config(edev->cdev, &tunn_params); 905 + __qede_unlock(edev); 908 906 909 - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); 907 + if (!rc) { 908 + edev->vxlan_dst_port = t_port; 909 + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n", 910 + t_port); 911 + } else { 912 + DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n", 913 + t_port); 914 + } 915 + 910 916 break; 911 917 case UDP_TUNNEL_TYPE_GENEVE: 918 + if (!edev->dev_info.common.geneve_enable) 919 + return; 920 + 912 921 if (edev->geneve_dst_port) 913 922 return; 914 923 915 - edev->geneve_dst_port = t_port; 924 + tunn_params.update_geneve_port = 1; 925 + tunn_params.geneve_port = t_port; 916 926 917 - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n", 918 - t_port); 919 - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); 927 + __qede_lock(edev); 928 + rc = edev->ops->tunn_config(edev->cdev, &tunn_params); 929 + __qede_unlock(edev); 930 + 931 + if (!rc) { 932 + edev->geneve_dst_port = t_port; 933 + DP_VERBOSE(edev, QED_MSG_DEBUG, 934 + "Added geneve port=%d\n", t_port); 935 + } else { 936 + DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n", 937 + t_port); 938 + } 939 + 920 940 break; 921 941 default: 922 942 return; 923 943 } 924 - 925 - schedule_delayed_work(&edev->sp_task, 0); 926 944 } 927 945 928 - void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti) 946 + void qede_udp_tunnel_del(struct net_device *dev, 947 + struct udp_tunnel_info *ti) 929 948 { 930 949 struct qede_dev *edev = netdev_priv(dev); 950 + struct qed_tunn_params tunn_params; 931 951 u16 t_port = ntohs(ti->port); 952 + 953 + memset(&tunn_params, 0, sizeof(tunn_params)); 932 954 933 955 switch (ti->type) { 934 956 case UDP_TUNNEL_TYPE_VXLAN: 935 957 if (t_port != edev->vxlan_dst_port) 936 958 return; 937 959 960 + tunn_params.update_vxlan_port = 1; 961 + tunn_params.vxlan_port = 0; 962 + 963 + __qede_lock(edev); 964 + edev->ops->tunn_config(edev->cdev, &tunn_params); 965 + __qede_unlock(edev); 966 + 938 967 edev->vxlan_dst_port = 0; 939 968 940 969 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n", 941 970 t_port); 942 971 943 - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); 944 972 break; 945 973 case UDP_TUNNEL_TYPE_GENEVE: 946 974 if (t_port != edev->geneve_dst_port) 947 975 return; 948 976 977 + tunn_params.update_geneve_port = 1; 978 + tunn_params.geneve_port = 0; 979 + 980 + __qede_lock(edev); 981 + edev->ops->tunn_config(edev->cdev, &tunn_params); 982 + __qede_unlock(edev); 983 + 949 984 edev->geneve_dst_port = 0; 950 985 951 986 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n", 952 987 t_port); 953 - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); 954 988 break; 955 989 default: 956 990 return; 957 991 } 958 - 959 - schedule_delayed_work(&edev->sp_task, 0); 960 992 } 961 993 962 994 static void qede_xdp_reload_func(struct qede_dev *edev,
+17 -6
drivers/net/ethernet/qlogic/qede/qede_fp.c
··· 1697 1697 } 1698 1698 1699 1699 /* Disable offloads for geneve tunnels, as HW can't parse 1700 - * the geneve header which has option length greater than 32B. 1700 + * the geneve header which has option length greater than 32b 1701 + * and disable offloads for the ports which are not offloaded. 1701 1702 */ 1702 - if ((l4_proto == IPPROTO_UDP) && 1703 - ((skb_inner_mac_header(skb) - 1704 - skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN)) 1705 - return features & ~(NETIF_F_CSUM_MASK | 1706 - NETIF_F_GSO_MASK); 1703 + if (l4_proto == IPPROTO_UDP) { 1704 + struct qede_dev *edev = netdev_priv(dev); 1705 + u16 hdrlen, vxln_port, gnv_port; 1706 + 1707 + hdrlen = QEDE_MAX_TUN_HDR_LEN; 1708 + vxln_port = edev->vxlan_dst_port; 1709 + gnv_port = edev->geneve_dst_port; 1710 + 1711 + if ((skb_inner_mac_header(skb) - 1712 + skb_transport_header(skb)) > hdrlen || 1713 + (ntohs(udp_hdr(skb)->dest) != vxln_port && 1714 + ntohs(udp_hdr(skb)->dest) != gnv_port)) 1715 + return features & ~(NETIF_F_CSUM_MASK | 1716 + NETIF_F_GSO_MASK); 1717 + } 1707 1718 } 1708 1719 1709 1720 return features;
+26 -30
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 231 231 .link_update = qede_link_update, 232 232 }, 233 233 .force_mac = qede_force_mac, 234 + .ports_update = qede_udp_ports_update, 234 235 }; 235 236 236 237 static int qede_netdev_event(struct notifier_block *this, unsigned long event, ··· 610 609 { 611 610 struct net_device *ndev = edev->ndev; 612 611 struct pci_dev *pdev = edev->pdev; 612 + bool udp_tunnel_enable = false; 613 613 netdev_features_t hw_features; 614 614 615 615 pci_set_drvdata(pdev, ndev); ··· 633 631 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 634 632 NETIF_F_TSO | NETIF_F_TSO6; 635 633 636 - /* Encap features*/ 637 - hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 638 - NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM | 639 - NETIF_F_GSO_GRE_CSUM; 640 - 641 634 if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) 642 635 hw_features |= NETIF_F_NTUPLE; 643 636 644 - ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 645 - NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | 646 - NETIF_F_TSO6 | NETIF_F_GSO_GRE | 647 - NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | 648 - NETIF_F_GSO_UDP_TUNNEL_CSUM | 649 - NETIF_F_GSO_GRE_CSUM; 637 + if (edev->dev_info.common.vxlan_enable || 638 + edev->dev_info.common.geneve_enable) 639 + udp_tunnel_enable = true; 640 + 641 + if (udp_tunnel_enable || edev->dev_info.common.gre_enable) { 642 + hw_features |= NETIF_F_TSO_ECN; 643 + ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 644 + NETIF_F_SG | NETIF_F_TSO | 645 + NETIF_F_TSO_ECN | NETIF_F_TSO6 | 646 + NETIF_F_RXCSUM; 647 + } 648 + 649 + if (udp_tunnel_enable) { 650 + hw_features |= (NETIF_F_GSO_UDP_TUNNEL | 651 + NETIF_F_GSO_UDP_TUNNEL_CSUM); 652 + ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL | 653 + NETIF_F_GSO_UDP_TUNNEL_CSUM); 654 + } 655 + 656 + if (edev->dev_info.common.gre_enable) { 657 + hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM); 658 + ndev->hw_enc_features |= (NETIF_F_GSO_GRE | 659 + NETIF_F_GSO_GRE_CSUM); 660 + } 650 661 651 662 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | 652 663 NETIF_F_HIGHDMA; ··· 797 782 { 798 783 struct qede_dev *edev = container_of(work, struct qede_dev, 799 784 sp_task.work); 800 - struct qed_dev *cdev = edev->cdev; 801 785 802 786 __qede_lock(edev); 803 787 804 788 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags)) 805 789 if (edev->state == QEDE_STATE_OPEN) 806 790 qede_config_rx_mode(edev->ndev); 807 - 808 - if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) { 809 - struct qed_tunn_params tunn_params; 810 - 811 - memset(&tunn_params, 0, sizeof(tunn_params)); 812 - tunn_params.update_vxlan_port = 1; 813 - tunn_params.vxlan_port = edev->vxlan_dst_port; 814 - qed_ops->tunn_config(cdev, &tunn_params); 815 - } 816 - 817 - if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) { 818 - struct qed_tunn_params tunn_params; 819 - 820 - memset(&tunn_params, 0, sizeof(tunn_params)); 821 - tunn_params.update_geneve_port = 1; 822 - tunn_params.geneve_port = edev->geneve_dst_port; 823 - qed_ops->tunn_config(cdev, &tunn_params); 824 - } 825 791 826 792 #ifdef CONFIG_RFS_ACCEL 827 793 if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
+1
include/linux/qed/qed_eth_if.h
··· 158 158 struct qed_eth_cb_ops { 159 159 struct qed_common_cb_ops common; 160 160 void (*force_mac) (void *dev, u8 *mac, bool forced); 161 + void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port); 161 162 }; 162 163 163 164 #define QED_MAX_PHC_DRIFT_PPB 291666666
+5
include/linux/qed/qed_if.h
··· 338 338 bool wol_support; 339 339 340 340 enum qed_dev_type dev_type; 341 + 342 + /* Output parameters for qede */ 343 + bool vxlan_enable; 344 + bool gre_enable; 345 + bool geneve_enable; 341 346 }; 342 347 343 348 enum qed_sb_type {