Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2021-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-10-12

* tag 'mlx5-fixes-2021-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
net/mlx5e: Fix division by 0 in mlx5e_select_queue for representors
net/mlx5e: Mutually exclude RX-FCS and RX-port-timestamp
net/mlx5e: Switchdev representors are not vlan challenged
net/mlx5e: Fix memory leak in mlx5_core_destroy_cq() error path
net/mlx5e: Allow only complete TXQs partition in MQPRIO channel mode
net/mlx5: Fix cleanup of bridge delayed work
====================

Link: https://lore.kernel.org/r/20211012205323.20123-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+74 -18
+3 -4
drivers/net/ethernet/mellanox/mlx5/core/cq.c
··· 155 155 u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; 156 156 int err; 157 157 158 + mlx5_debug_cq_remove(dev, cq); 159 + 158 160 mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); 159 161 mlx5_eq_del_cq(&cq->eq->core, cq); 160 162 ··· 164 162 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); 165 163 MLX5_SET(destroy_cq_in, in, uid, cq->uid); 166 164 err = mlx5_cmd_exec_in(dev, destroy_cq, in); 167 - if (err) 168 - return err; 169 165 170 166 synchronize_irq(cq->irqn); 171 167 172 - mlx5_debug_cq_remove(dev, cq); 173 168 mlx5_cq_put(cq); 174 169 wait_for_completion(&cq->free); 175 170 176 - return 0; 171 + return err; 177 172 } 178 173 EXPORT_SYMBOL(mlx5_core_destroy_cq); 179 174
+4 -4
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
··· 475 475 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n"); 476 476 goto err_alloc_wq; 477 477 } 478 - INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work); 479 - queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 480 - msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL)); 481 478 482 479 br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event; 483 480 err = register_switchdev_notifier(&br_offloads->nb); ··· 497 500 err); 498 501 goto err_register_netdev; 499 502 } 503 + INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work); 504 + queue_delayed_work(br_offloads->wq, &br_offloads->update_work, 505 + msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL)); 500 506 return; 501 507 502 508 err_register_netdev: ··· 523 523 if (!br_offloads) 524 524 return; 525 525 526 + cancel_delayed_work_sync(&br_offloads->update_work); 526 527 unregister_netdevice_notifier(&br_offloads->netdev_nb); 527 528 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); 528 529 unregister_switchdev_notifier(&br_offloads->nb); 529 - cancel_delayed_work(&br_offloads->update_work); 530 530 destroy_workqueue(br_offloads->wq); 531 531 rtnl_lock(); 532 532 mlx5_esw_bridge_cleanup(esw);
+54 -7
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2981 2981 agg_count += mqprio->qopt.count[i]; 2982 2982 } 2983 2983 2984 - if (priv->channels.params.num_channels < agg_count) { 2985 - netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n", 2984 + if (priv->channels.params.num_channels != agg_count) { 2985 + netdev_err(netdev, "Num of queues (%d) does not match available (%d)\n", 2986 2986 agg_count, priv->channels.params.num_channels); 2987 2987 return -EINVAL; 2988 2988 } ··· 3325 3325 return mlx5_set_port_fcs(mdev, !enable); 3326 3326 } 3327 3327 3328 + static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) 3329 + { 3330 + u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; 3331 + bool supported, curr_state; 3332 + int err; 3333 + 3334 + if (!MLX5_CAP_GEN(mdev, ports_check)) 3335 + return 0; 3336 + 3337 + err = mlx5_query_ports_check(mdev, in, sizeof(in)); 3338 + if (err) 3339 + return err; 3340 + 3341 + supported = MLX5_GET(pcmr_reg, in, rx_ts_over_crc_cap); 3342 + curr_state = MLX5_GET(pcmr_reg, in, rx_ts_over_crc); 3343 + 3344 + if (!supported || enable == curr_state) 3345 + return 0; 3346 + 3347 + MLX5_SET(pcmr_reg, in, local_port, 1); 3348 + MLX5_SET(pcmr_reg, in, rx_ts_over_crc, enable); 3349 + 3350 + return mlx5_set_ports_check(mdev, in, sizeof(in)); 3351 + } 3352 + 3328 3353 static int set_feature_rx_fcs(struct net_device *netdev, bool enable) 3329 3354 { 3330 3355 struct mlx5e_priv *priv = netdev_priv(netdev); 3356 + struct mlx5e_channels *chs = &priv->channels; 3357 + struct mlx5_core_dev *mdev = priv->mdev; 3331 3358 int err; 3332 3359 3333 3360 mutex_lock(&priv->state_lock); 3334 3361 3335 - priv->channels.params.scatter_fcs_en = enable; 3336 - err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable); 3337 - if (err) 3338 - priv->channels.params.scatter_fcs_en = !enable; 3362 + if (enable) { 3363 + err = mlx5e_set_rx_port_ts(mdev, false); 3364 + if (err) 3365 + goto out; 3339 3366 3367 + chs->params.scatter_fcs_en = true; 3368 + err = mlx5e_modify_channels_scatter_fcs(chs, true); 3369 + if (err) { 3370 + chs->params.scatter_fcs_en = false; 3371 + mlx5e_set_rx_port_ts(mdev, true); 3372 + } 3373 + } else { 3374 + chs->params.scatter_fcs_en = false; 3375 + err = mlx5e_modify_channels_scatter_fcs(chs, false); 3376 + if (err) { 3377 + chs->params.scatter_fcs_en = true; 3378 + goto out; 3379 + } 3380 + err = mlx5e_set_rx_port_ts(mdev, true); 3381 + if (err) { 3382 + mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err); 3383 + err = 0; 3384 + } 3385 + } 3386 + 3387 + out: 3340 3388 mutex_unlock(&priv->state_lock); 3341 - 3342 3389 return err; 3343 3390 } 3344 3391
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 618 618 params->mqprio.num_tc = 1; 619 619 params->tunneled_offload_en = false; 620 620 621 + /* Set an initial non-zero value, so that mlx5e_select_queue won't 622 + * divide by zero if called before first activating channels. 623 + */ 624 + priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; 625 + 621 626 mlx5_query_min_inline(mdev, &params->tx_min_inline_mode); 622 627 } 623 628 ··· 648 643 netdev->hw_features |= NETIF_F_RXCSUM; 649 644 650 645 netdev->features |= netdev->hw_features; 651 - netdev->features |= NETIF_F_VLAN_CHALLENGED; 652 646 netdev->features |= NETIF_F_NETNS_LOCAL; 653 647 } 654 648
+8 -2
include/linux/mlx5/mlx5_ifc.h
··· 9475 9475 u8 reserved_at_0[0x8]; 9476 9476 u8 local_port[0x8]; 9477 9477 u8 reserved_at_10[0x10]; 9478 + 9478 9479 u8 entropy_force_cap[0x1]; 9479 9480 u8 entropy_calc_cap[0x1]; 9480 9481 u8 entropy_gre_calc_cap[0x1]; 9481 - u8 reserved_at_23[0x1b]; 9482 + u8 reserved_at_23[0xf]; 9483 + u8 rx_ts_over_crc_cap[0x1]; 9484 + u8 reserved_at_33[0xb]; 9482 9485 u8 fcs_cap[0x1]; 9483 9486 u8 reserved_at_3f[0x1]; 9487 + 9484 9488 u8 entropy_force[0x1]; 9485 9489 u8 entropy_calc[0x1]; 9486 9490 u8 entropy_gre_calc[0x1]; 9487 - u8 reserved_at_43[0x1b]; 9491 + u8 reserved_at_43[0xf]; 9492 + u8 rx_ts_over_crc[0x1]; 9493 + u8 reserved_at_53[0xb]; 9488 9494 u8 fcs_chk[0x1]; 9489 9495 u8 reserved_at_5f[0x1]; 9490 9496 };