Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5e: Rewrite IPsec vs. TC block interface

In the commit 366e46242b8e ("net/mlx5e: Make IPsec offload work together
with eswitch and TC"), new API to block IPsec vs. TC creation was introduced.

Internally, that API used devlink lock to avoid races with userspace, but it is
not really needed as dev->priv.eswitch is stable and can't be changed. So remove
dependency on devlink lock and move block encap code back to its original place.

Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Link: https://lore.kernel.org/r/20230825062836.103744-5-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Leon Romanovsky and committed by
Jakub Kicinski
e2537341 c46fb773

+38 -93
+24 -39
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 254 254 mlx5_del_flow_rules(rx->sa.rule); 255 255 mlx5_destroy_flow_group(rx->sa.group); 256 256 mlx5_destroy_flow_table(rx->ft.sa); 257 + if (rx->allow_tunnel_mode) 258 + mlx5_eswitch_unblock_encap(mdev); 257 259 if (rx == ipsec->rx_esw) { 258 260 mlx5_esw_ipsec_rx_status_destroy(ipsec, rx); 259 261 } else { ··· 359 357 goto err_add; 360 358 361 359 /* Create FT */ 360 + if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) 361 + rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); 362 362 if (rx->allow_tunnel_mode) 363 363 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 364 364 ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags); ··· 415 411 err_fs: 416 412 mlx5_destroy_flow_table(rx->ft.sa); 417 413 err_fs_ft: 414 + if (rx->allow_tunnel_mode) 415 + mlx5_eswitch_unblock_encap(mdev); 418 416 mlx5_del_flow_rules(rx->status.rule); 419 417 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr); 420 418 err_add: ··· 434 428 if (rx->ft.refcnt) 435 429 goto skip; 436 430 437 - if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) 438 - rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); 439 - 440 - err = mlx5_eswitch_block_mode_trylock(mdev); 431 + err = mlx5_eswitch_block_mode(mdev); 441 432 if (err) 442 - goto err_out; 433 + return err; 443 434 444 435 err = rx_create(mdev, ipsec, rx, family); 445 - mlx5_eswitch_block_mode_unlock(mdev, err); 446 - if (err) 447 - goto err_out; 436 + if (err) { 437 + mlx5_eswitch_unblock_mode(mdev); 438 + return err; 439 + } 448 440 449 441 skip: 450 442 rx->ft.refcnt++; 451 443 return 0; 452 - 453 - err_out: 454 - if (rx->allow_tunnel_mode) 455 - mlx5_eswitch_unblock_encap(mdev); 456 - return err; 457 444 } 458 445 459 446 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx, ··· 455 456 if (--rx->ft.refcnt) 456 457 return; 457 458 458 - mlx5_eswitch_unblock_mode_lock(ipsec->mdev); 459 459 rx_destroy(ipsec->mdev, ipsec, rx, family); 460 - mlx5_eswitch_unblock_mode_unlock(ipsec->mdev); 461 - 462 - if (rx->allow_tunnel_mode) 463 - mlx5_eswitch_unblock_encap(ipsec->mdev); 460 + mlx5_eswitch_unblock_mode(ipsec->mdev); 464 461 } 465 462 466 463 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev, ··· 576 581 mlx5_destroy_flow_group(tx->sa.group); 577 582 } 578 583 mlx5_destroy_flow_table(tx->ft.sa); 584 + if (tx->allow_tunnel_mode) 585 + mlx5_eswitch_unblock_encap(ipsec->mdev); 579 586 mlx5_del_flow_rules(tx->status.rule); 580 587 mlx5_destroy_flow_table(tx->ft.status); 581 588 } ··· 618 621 if (err) 619 622 goto err_status_rule; 620 623 624 + if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) 625 + tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); 621 626 if (tx->allow_tunnel_mode) 622 627 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; 623 628 ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags); ··· 686 687 err_sa_miss: 687 688 mlx5_destroy_flow_table(tx->ft.sa); 688 689 err_sa_ft: 690 + if (tx->allow_tunnel_mode) 691 + mlx5_eswitch_unblock_encap(mdev); 689 692 mlx5_del_flow_rules(tx->status.rule); 690 693 err_status_rule: 691 694 mlx5_destroy_flow_table(tx->ft.status); ··· 721 720 if (tx->ft.refcnt) 722 721 goto skip; 723 722 724 - if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL) 725 - tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev); 726 - 727 - err = mlx5_eswitch_block_mode_trylock(mdev); 723 + err = mlx5_eswitch_block_mode(mdev); 728 724 if (err) 729 - goto err_out; 725 + return err; 730 726 731 727 err = tx_create(ipsec, tx, ipsec->roce); 732 728 if (err) { 733 - mlx5_eswitch_block_mode_unlock(mdev, err); 734 - goto err_out; 729 + mlx5_eswitch_unblock_mode(mdev); 730 + return err; 735 731 } 736 732 737 733 if (tx == ipsec->tx_esw) 738 734 ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol); 739 735 740 - mlx5_eswitch_block_mode_unlock(mdev, err); 741 - 742 736 skip: 743 737 tx->ft.refcnt++; 744 738 return 0; 745 - 746 - err_out: 747 - if (tx->allow_tunnel_mode) 748 - mlx5_eswitch_unblock_encap(mdev); 749 - return err; 750 739 } 751 740 752 741 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx) ··· 744 753 if (--tx->ft.refcnt) 745 754 return; 746 755 747 - mlx5_eswitch_unblock_mode_lock(ipsec->mdev); 748 - 749 756 if (tx == ipsec->tx_esw) { 750 757 mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev); 751 758 ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL); 752 759 } 753 760 754 761 tx_destroy(ipsec, tx, ipsec->roce); 755 - 756 - mlx5_eswitch_unblock_mode_unlock(ipsec->mdev); 757 - 758 - if (tx->allow_tunnel_mode) 759 - mlx5_eswitch_unblock_encap(ipsec->mdev); 762 + mlx5_eswitch_unblock_mode(ipsec->mdev); 760 763 } 761 764 762 765 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
+4 -11
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 829 829 bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev); 830 830 void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev); 831 831 832 - int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev); 833 - void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err); 834 - void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev); 835 - void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev); 832 + int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev); 833 + void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev); 836 834 837 835 static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw) 838 836 { ··· 914 916 { 915 917 } 916 918 917 - static inline int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) { return 0; } 918 - 919 - static inline void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) {} 920 - 921 - static inline void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) {} 922 - 923 - static inline void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) {} 919 + static inline int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) { return 0; } 920 + static inline void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) {} 924 921 #endif /* CONFIG_MLX5_ESWITCH */ 925 922 926 923 #endif /* __MLX5_ESWITCH_H__ */
+10 -43
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 3641 3641 return net_eq(devl_net, netdev_net); 3642 3642 } 3643 3643 3644 - int mlx5_eswitch_block_mode_trylock(struct mlx5_core_dev *dev) 3644 + int mlx5_eswitch_block_mode(struct mlx5_core_dev *dev) 3645 3645 { 3646 - struct devlink *devlink = priv_to_devlink(dev); 3647 - struct mlx5_eswitch *esw; 3646 + struct mlx5_eswitch *esw = dev->priv.eswitch; 3648 3647 int err; 3649 3648 3650 - devl_lock(devlink); 3651 - esw = mlx5_devlink_eswitch_get(devlink); 3652 - if (IS_ERR(esw)) { 3653 - /* Failure means no eswitch => not possible to change eswitch mode */ 3654 - devl_unlock(devlink); 3649 + if (!mlx5_esw_allowed(esw)) 3655 3650 return 0; 3656 - } 3657 3651 3652 + /* Take TC into account */ 3658 3653 err = mlx5_esw_try_lock(esw); 3659 - if (err < 0) { 3660 - devl_unlock(devlink); 3654 + if (err < 0) 3661 3655 return err; 3662 - } 3663 3656 3657 + esw->offloads.num_block_mode++; 3658 + mlx5_esw_unlock(esw); 3664 3659 return 0; 3665 3660 } 3666 3661 3667 - void mlx5_eswitch_block_mode_unlock(struct mlx5_core_dev *dev, int err) 3662 + void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev) 3668 3663 { 3669 - struct devlink *devlink = priv_to_devlink(dev); 3670 - struct mlx5_eswitch *esw; 3664 + struct mlx5_eswitch *esw = dev->priv.eswitch; 3671 3665 3672 - esw = mlx5_devlink_eswitch_get(devlink); 3673 - if (IS_ERR(esw)) 3674 - return; 3675 - 3676 - if (!err) 3677 - esw->offloads.num_block_mode++; 3678 - mlx5_esw_unlock(esw); 3679 - devl_unlock(devlink); 3680 - } 3681 - 3682 - void mlx5_eswitch_unblock_mode_lock(struct mlx5_core_dev *dev) 3683 - { 3684 - struct devlink *devlink = priv_to_devlink(dev); 3685 - struct mlx5_eswitch *esw; 3686 - 3687 - esw = mlx5_devlink_eswitch_get(devlink); 3688 - if (IS_ERR(esw)) 3666 + if (!mlx5_esw_allowed(esw)) 3689 3667 return; 3690 3668 3691 3669 down_write(&esw->mode_lock); 3692 - } 3693 - 3694 - void mlx5_eswitch_unblock_mode_unlock(struct mlx5_core_dev *dev) 3695 - { 3696 - struct devlink *devlink = priv_to_devlink(dev); 3697 - struct mlx5_eswitch *esw; 3698 - 3699 - esw = mlx5_devlink_eswitch_get(devlink); 3700 - if (IS_ERR(esw)) 3701 - return; 3702 - 3703 3670 esw->offloads.num_block_mode--; 3704 3671 up_write(&esw->mode_lock); 3705 3672 }