Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-misc-fixes-2025-07-17'

Tariq Toukan says:

====================
mlx5 misc fixes 2025-07-17

This small patchset provides misc bug fixes from the team to the mlx5
driver.
====================

Link: https://patch.msgid.link/1752753970-261832-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+56 -56
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1947 1947 1948 1948 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1949 1949 pages_queue, token, force_polling); 1950 - if (callback) 1951 - return err; 1950 + if (callback && !err) 1951 + return 0; 1952 1952 1953 1953 if (err > 0) /* Failed in FW, command didn't execute */ 1954 1954 err = deliv_status_to_err(err);
+54 -54
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1182 1182 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1183 1183 struct mlx5_core_dev *peer_dev) 1184 1184 { 1185 + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; 1185 1186 struct mlx5_flow_destination dest = {}; 1186 1187 struct mlx5_flow_act flow_act = {0}; 1187 1188 struct mlx5_flow_handle **flows; 1188 - /* total vports is the same for both e-switches */ 1189 - int nvports = esw->total_vports; 1190 1189 struct mlx5_flow_handle *flow; 1190 + struct mlx5_vport *peer_vport; 1191 1191 struct mlx5_flow_spec *spec; 1192 - struct mlx5_vport *vport; 1193 1192 int err, pfindex; 1194 1193 unsigned long i; 1195 1194 void *misc; 1196 1195 1197 - if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) 1196 + if (!MLX5_VPORT_MANAGER(peer_dev) && 1197 + !mlx5_core_is_ecpf_esw_manager(peer_dev)) 1198 1198 return 0; 1199 1199 1200 1200 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); ··· 1203 1203 1204 1204 peer_miss_rules_setup(esw, peer_dev, spec, &dest); 1205 1205 1206 - flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); 1206 + flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL); 1207 1207 if (!flows) { 1208 1208 err = -ENOMEM; 1209 1209 goto alloc_flows_err; ··· 1213 1213 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1214 1214 misc_parameters); 1215 1215 1216 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1217 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1218 - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1219 - spec, MLX5_VPORT_PF); 1216 + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1217 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1218 + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, 1219 + MLX5_VPORT_PF); 1220 1220 1221 1221 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1222 1222 spec, &flow_act, &dest, 1); ··· 1224 1224 err = PTR_ERR(flow); 1225 1225 goto add_pf_flow_err; 1226 1226 } 1227 - flows[vport->index] = flow; 1227 + flows[peer_vport->index] = flow; 1228 1228 } 1229 1229 1230 - if (mlx5_ecpf_vport_exists(esw->dev)) { 1231 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1230 + if (mlx5_ecpf_vport_exists(peer_dev)) { 1231 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1232 1232 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); 1233 1233 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1234 1234 spec, &flow_act, &dest, 1); ··· 1236 1236 err = PTR_ERR(flow); 1237 1237 goto add_ecpf_flow_err; 1238 1238 } 1239 - flows[vport->index] = flow; 1239 + flows[peer_vport->index] = flow; 1240 1240 } 1241 1241 1242 - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1242 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1243 + mlx5_core_max_vfs(peer_dev)) { 1243 1244 esw_set_peer_miss_rule_source_port(esw, 1244 - peer_dev->priv.eswitch, 1245 - spec, vport->vport); 1245 + peer_esw, 1246 + spec, peer_vport->vport); 1246 1247 1247 1248 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), 1248 1249 spec, &flow_act, &dest, 1); ··· 1251 1250 err = PTR_ERR(flow); 1252 1251 goto add_vf_flow_err; 1253 1252 } 1254 - flows[vport->index] = flow; 1253 + flows[peer_vport->index] = flow; 1255 1254 } 1256 1255 1257 - if (mlx5_core_ec_sriov_enabled(esw->dev)) { 1258 - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1259 - if (i >= mlx5_core_max_ec_vfs(peer_dev)) 1260 - break; 1261 - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, 1262 - spec, vport->vport); 1256 + if (mlx5_core_ec_sriov_enabled(peer_dev)) { 1257 + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1258 + mlx5_core_max_ec_vfs(peer_dev)) { 1259 + esw_set_peer_miss_rule_source_port(esw, peer_esw, 1260 + spec, 1261 + peer_vport->vport); 1263 1262 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, 1264 1263 spec, &flow_act, &dest, 1); 1265 1264 if (IS_ERR(flow)) { 1266 1265 err = PTR_ERR(flow); 1267 1266 goto add_ec_vf_flow_err; 1268 1267 } 1269 - flows[vport->index] = flow; 1268 + flows[peer_vport->index] = flow; 1270 1269 } 1271 1270 } 1272 1271 ··· 1283 1282 return 0; 1284 1283 1285 1284 add_ec_vf_flow_err: 1286 - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1287 - if (!flows[vport->index]) 1285 + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1286 + mlx5_core_max_ec_vfs(peer_dev)) { 1287 + if (!flows[peer_vport->index]) 1288 1288 continue; 1289 - mlx5_del_flow_rules(flows[vport->index]); 1289 + mlx5_del_flow_rules(flows[peer_vport->index]); 1290 1290 } 1291 1291 add_vf_flow_err: 1292 - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { 1293 - if (!flows[vport->index]) 1292 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1293 + mlx5_core_max_vfs(peer_dev)) { 1294 + if (!flows[peer_vport->index]) 1294 1295 continue; 1295 - mlx5_del_flow_rules(flows[vport->index]); 1296 + mlx5_del_flow_rules(flows[peer_vport->index]); 1296 1297 } 1297 - if (mlx5_ecpf_vport_exists(esw->dev)) { 1298 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1299 - mlx5_del_flow_rules(flows[vport->index]); 1298 + if (mlx5_ecpf_vport_exists(peer_dev)) { 1299 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1300 + mlx5_del_flow_rules(flows[peer_vport->index]); 1300 1301 } 1301 1302 add_ecpf_flow_err: 1302 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1303 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1304 - mlx5_del_flow_rules(flows[vport->index]); 1303 + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1304 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1305 + mlx5_del_flow_rules(flows[peer_vport->index]); 1305 1306 } 1306 1307 add_pf_flow_err: 1307 1308 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); ··· 1316 1313 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, 1317 1314 struct mlx5_core_dev *peer_dev) 1318 1315 { 1316 + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; 1319 1317 u16 peer_index = mlx5_get_dev_index(peer_dev); 1320 1318 struct mlx5_flow_handle **flows; 1321 - struct mlx5_vport *vport; 1319 + struct mlx5_vport *peer_vport; 1322 1320 unsigned long i; 1323 1321 1324 1322 flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; 1325 1323 if (!flows) 1326 1324 return; 1327 1325 1328 - if (mlx5_core_ec_sriov_enabled(esw->dev)) { 1329 - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { 1330 - /* The flow for a particular vport could be NULL if the other ECPF 1331 - * has fewer or no VFs enabled 1332 - */ 1333 - if (!flows[vport->index]) 1334 - continue; 1335 - mlx5_del_flow_rules(flows[vport->index]); 1336 - } 1326 + if (mlx5_core_ec_sriov_enabled(peer_dev)) { 1327 + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, 1328 + mlx5_core_max_ec_vfs(peer_dev)) 1329 + mlx5_del_flow_rules(flows[peer_vport->index]); 1337 1330 } 1338 1331 1339 - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) 1340 - mlx5_del_flow_rules(flows[vport->index]); 1332 + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, 1333 + mlx5_core_max_vfs(peer_dev)) 1334 + mlx5_del_flow_rules(flows[peer_vport->index]); 1341 1335 1342 - if (mlx5_ecpf_vport_exists(esw->dev)) { 1343 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); 1344 - mlx5_del_flow_rules(flows[vport->index]); 1336 + if (mlx5_ecpf_vport_exists(peer_dev)) { 1337 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); 1338 + mlx5_del_flow_rules(flows[peer_vport->index]); 1345 1339 } 1346 1340 1347 - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { 1348 - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); 1349 - mlx5_del_flow_rules(flows[vport->index]); 1341 + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { 1342 + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); 1343 + mlx5_del_flow_rules(flows[peer_vport->index]); 1350 1344 } 1351 1345 1352 1346 kvfree(flows);