Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-mlx5-support-disabling-host-pfs'

Tariq Toukan says:

====================
net/mlx5: Support disabling host PFs

This small series by Daniel adds support for disabling host PFs.
If device is capable and configured, the driver won't access vports of
disabled host functions.
====================

Link: https://patch.msgid.link/1755112796-467444-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+90 -37
+62 -23
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1038 1038 return ERR_PTR(err); 1039 1039 } 1040 1040 1041 + static int mlx5_esw_host_functions_enabled_query(struct mlx5_eswitch *esw) 1042 + { 1043 + const u32 *query_host_out; 1044 + 1045 + if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) 1046 + return 0; 1047 + 1048 + query_host_out = mlx5_esw_query_functions(esw->dev); 1049 + if (IS_ERR(query_host_out)) 1050 + return PTR_ERR(query_host_out); 1051 + 1052 + esw->esw_funcs.host_funcs_disabled = 1053 + MLX5_GET(query_esw_functions_out, query_host_out, 1054 + host_params_context.host_pf_not_exist); 1055 + 1056 + kvfree(query_host_out); 1057 + return 0; 1058 + } 1059 + 1041 1060 static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw) 1042 1061 { 1043 1062 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) { ··· 1297 1278 esw->mode == MLX5_ESWITCH_LEGACY; 1298 1279 1299 1280 /* Enable PF vport */ 1300 - if (pf_needed) { 1281 + if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) { 1301 1282 ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, 1302 1283 enabled_events); 1303 1284 if (ret) 1304 1285 return ret; 1305 1286 } 1306 1287 1307 - /* Enable external host PF HCA */ 1308 - ret = host_pf_enable_hca(esw->dev); 1309 - if (ret) 1310 - goto pf_hca_err; 1288 + if (mlx5_esw_host_functions_enabled(esw->dev)) { 1289 + /* Enable external host PF HCA */ 1290 + ret = host_pf_enable_hca(esw->dev); 1291 + if (ret) 1292 + goto pf_hca_err; 1293 + } 1311 1294 1312 1295 /* Enable ECPF vport */ 1313 1296 if (mlx5_ecpf_vport_exists(esw->dev)) { ··· 1341 1320 if (mlx5_ecpf_vport_exists(esw->dev)) 1342 1321 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF); 1343 1322 ecpf_err: 1344 - host_pf_disable_hca(esw->dev); 1323 + if (mlx5_esw_host_functions_enabled(esw->dev)) 1324 + host_pf_disable_hca(esw->dev); 1345 1325 pf_hca_err: 1346 - if (pf_needed) 1326 + if (pf_needed && mlx5_esw_host_functions_enabled(esw->dev)) 1347 1327 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); 1348 1328 return ret; 1349 1329 } ··· 1364 1342 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF); 1365 1343 } 1366 1344 1367 - host_pf_disable_hca(esw->dev); 1345 + if (mlx5_esw_host_functions_enabled(esw->dev)) 1346 + host_pf_disable_hca(esw->dev); 1368 1347 1369 - if (mlx5_core_is_ecpf_esw_manager(esw->dev) || 1370 - esw->mode == MLX5_ESWITCH_LEGACY) 1348 + if ((mlx5_core_is_ecpf_esw_manager(esw->dev) || 1349 + esw->mode == MLX5_ESWITCH_LEGACY) && 1350 + mlx5_esw_host_functions_enabled(esw->dev)) 1371 1351 mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF); 1372 1352 } 1373 1353 ··· 1698 1674 void *hca_caps; 1699 1675 int err; 1700 1676 1701 - if (!mlx5_core_is_ecpf(dev)) { 1677 + if (!mlx5_core_is_ecpf(dev) || 1678 + !mlx5_esw_host_functions_enabled(dev)) { 1702 1679 *max_sfs = 0; 1703 1680 return 0; 1704 1681 } ··· 1775 1750 1776 1751 xa_init(&esw->vports); 1777 1752 1778 - err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF); 1779 - if (err) 1780 - goto err; 1781 - if (esw->first_host_vport == MLX5_VPORT_PF) 1782 - xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1783 - idx++; 1784 - 1785 - for (i = 0; i < mlx5_core_max_vfs(dev); i++) { 1786 - err = mlx5_esw_vport_alloc(esw, idx, idx); 1753 + if (mlx5_esw_host_functions_enabled(dev)) { 1754 + err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_PF); 1787 1755 if (err) 1788 1756 goto err; 1789 - xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); 1790 - xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1757 + if (esw->first_host_vport == MLX5_VPORT_PF) 1758 + xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1791 1759 idx++; 1760 + for (i = 0; i < mlx5_core_max_vfs(dev); i++) { 1761 + err = mlx5_esw_vport_alloc(esw, idx, idx); 1762 + if (err) 1763 + goto err; 1764 + xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF); 1765 + xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN); 1766 + idx++; 1767 + } 1792 1768 } 1769 + 1793 1770 base_sf_num = mlx5_sf_start_function_id(dev); 1794 1771 for (i = 0; i < mlx5_sf_max_functions(dev); i++) { 1795 1772 err = mlx5_esw_vport_alloc(esw, idx, base_sf_num + i); ··· 1891 1864 goto free_esw; 1892 1865 1893 1866 esw->dev = dev; 1867 + dev->priv.eswitch = esw; 1894 1868 esw->manager_vport = mlx5_eswitch_manager_vport(dev); 1895 1869 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev); 1896 1870 ··· 1902 1874 goto abort; 1903 1875 } 1904 1876 1877 + err = mlx5_esw_host_functions_enabled_query(esw); 1878 + if (err) 1879 + goto abort; 1880 + 1905 1881 err = mlx5_esw_vports_init(esw); 1906 1882 if (err) 1907 1883 goto abort; 1908 1884 1909 - dev->priv.eswitch = esw; 1910 1885 err = esw_offloads_init(esw); 1911 1886 if (err) 1912 1887 goto reps_err; ··· 2440 2409 mutex_lock(&esw->state_lock); 2441 2410 dev->num_ipsec_offloads--; 2442 2411 mutex_unlock(&esw->state_lock); 2412 + } 2413 + 2414 + bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev) 2415 + { 2416 + if (!dev->priv.eswitch) 2417 + return true; 2418 + 2419 + return !dev->priv.eswitch->esw_funcs.host_funcs_disabled; 2443 2420 }
+8
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 323 323 324 324 struct mlx5_esw_functions { 325 325 struct mlx5_nb nb; 326 + bool host_funcs_disabled; 326 327 u16 num_vfs; 327 328 u16 num_ec_vfs; 328 329 }; ··· 899 898 bool enable); 900 899 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, 901 900 u16 vport_num); 901 + bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev); 902 902 #else /* CONFIG_MLX5_ESWITCH */ 903 903 /* eswitch API stubs */ 904 904 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } ··· 967 965 } 968 966 969 967 static inline void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev) {} 968 + 969 + static inline bool 970 + mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev) 971 + { 972 + return true; 973 + } 970 974 #endif /* CONFIG_MLX5_ESWITCH */ 971 975 972 976 #endif /* __MLX5_ESWITCH_H__ */
+20 -14
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1213 1213 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1214 1214 misc_parameters); 1215 1215 1216 - if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1216 + if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1217 + mlx5_esw_host_functions_enabled(peer_dev)) { 1217 1218 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1218 1219 esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1219 1220 MLX5_VPORT_PF); ··· 1240 1239 flows[peer_vport->index] = flow; 1241 1240 } 1242 1241 1243 - mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1244 - mlx5_core_max_vfs(peer_dev)) { 1245 - esw_set_peer_miss_rule_source_port(esw, 1246 - peer_esw, 1247 - spec, peer_vport->vport); 1242 + if (mlx5_esw_host_functions_enabled(esw->dev)) { 1243 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1244 + mlx5_core_max_vfs(peer_dev)) { 1245 + esw_set_peer_miss_rule_source_port(esw, peer_esw, 1246 + spec, 1247 + peer_vport->vport); 1248 1248 1249 - flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1250 - spec, &flow_act, &dest, 1); 1251 - if (IS_ERR(flow)) { 1252 - err = PTR_ERR(flow); 1253 - goto add_vf_flow_err; 1249 + flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1250 + spec, &flow_act, &dest, 1); 1251 + if (IS_ERR(flow)) { 1252 + err = PTR_ERR(flow); 1253 + goto add_vf_flow_err; 1254 + } 1255 + flows[peer_vport->index] = flow; 1254 1256 } 1255 - flows[peer_vport->index] = flow; 1256 1257 } 1257 1258 1258 1259 if (mlx5_core_ec_sriov_enabled(peer_dev)) { ··· 1304 1301 mlx5_del_flow_rules(flows[peer_vport->index]); 1305 1302 } 1306 1303 add_ecpf_flow_err: 1307 - if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1304 + 1305 + if (mlx5_core_is_ecpf_esw_manager(peer_dev) && 1306 + mlx5_esw_host_functions_enabled(peer_dev)) { 1308 1307 peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1309 1308 mlx5_del_flow_rules(flows[peer_vport->index]); 1310 1309 } ··· 4064 4059 { 4065 4060 /* Currently, only ECPF based device has representor for host PF. */ 4066 4061 if (vport_num == MLX5_VPORT_PF && 4067 - !mlx5_core_is_ecpf_esw_manager(esw->dev)) 4062 + (!mlx5_core_is_ecpf_esw_manager(esw->dev) || 4063 + !mlx5_esw_host_functions_enabled(esw->dev))) 4068 4064 return false; 4069 4065 4070 4066 if (vport_num == MLX5_VPORT_ECPF &&