Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2020-11-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2020-11-17

This series introduces some fixes to mlx5 driver.

* tag 'mlx5-fixes-2020-11-17' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
net/mlx5: fix error return code in mlx5e_tc_nic_init()
net/mlx5: E-Switch, Fail mlx5_esw_modify_vport_rate if qos disabled
net/mlx5: Disable QoS when min_rates on all VFs are zero
net/mlx5: Clear bw_share upon VF disable
net/mlx5: Add handling of port type in rule deletion
net/mlx5e: Fix check if netdev is bond slave
net/mlx5e: Fix IPsec packet drop by mlx5e_tc_update_skb
net/mlx5e: Set IPsec WAs only in IP's non checksum partial case.
net/mlx5e: Fix refcount leak on kTLS RX resync
====================

Link: https://lore.kernel.org/r/20201117195702.386113-1-saeedm@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+54 -34
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
··· 187 187 struct mlx5e_priv *priv; 188 188 189 189 /* A given netdev is not a representor or not a slave of LAG configuration */ 190 - if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev)) 190 + if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev)) 191 191 return false; 192 192 193 193 priv = netdev_priv(netdev);
+7 -7
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
··· 64 64 if (!spec) 65 65 return -ENOMEM; 66 66 67 - /* Action to copy 7 bit ipsec_syndrome to regB[0:6] */ 67 + /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */ 68 68 MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY); 69 69 MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME); 70 70 MLX5_SET(copy_action_in, action, src_offset, 0); 71 71 MLX5_SET(copy_action_in, action, length, 7); 72 72 MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 73 - MLX5_SET(copy_action_in, action, dst_offset, 0); 73 + MLX5_SET(copy_action_in, action, dst_offset, 24); 74 74 75 75 modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL, 76 76 1, action); ··· 488 488 489 489 setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act); 490 490 491 - /* Set 1 bit ipsec marker */ 492 - /* Set 24 bit ipsec_obj_id */ 491 + /* Set bit[31] ipsec marker */ 492 + /* Set bit[23-0] ipsec_obj_id */ 493 493 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); 494 494 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B); 495 - MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1); 496 - MLX5_SET(set_action_in, action, offset, 7); 497 - MLX5_SET(set_action_in, action, length, 25); 495 + MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31))); 496 + MLX5_SET(set_action_in, action, offset, 0); 497 + MLX5_SET(set_action_in, action, length, 32); 498 498 499 499 modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL, 500 500 1, action);
+1 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
··· 453 453 struct mlx5_cqe64 *cqe) 454 454 { 455 455 u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata); 456 - u8 ipsec_syndrome = ipsec_meta_data & 0xFF; 457 456 struct mlx5e_priv *priv; 458 457 struct xfrm_offload *xo; 459 458 struct xfrm_state *xs; ··· 480 481 xo = xfrm_offload(skb); 481 482 xo->flags = CRYPTO_DONE; 482 483 483 - switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) { 484 + switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) { 484 485 case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED: 485 486 xo->status = CRYPTO_SUCCESS; 486 487 if (WARN_ON_ONCE(priv->ipsec->no_trailer))
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
··· 39 39 #include "en.h" 40 40 #include "en/txrx.h" 41 41 42 - #define MLX5_IPSEC_METADATA_MARKER_MASK (0x80) 43 - #define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F) 44 - #define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF) 42 + /* Bit31: IPsec marker, Bit30-24: IPsec syndrome, Bit23-0: IPsec obj id */ 43 + #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) 44 + #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(6, 0)) 45 + #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) 45 46 46 47 struct mlx5e_accel_tx_ipsec_state { 47 48 struct xfrm_offload *xo; ··· 79 78 80 79 static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) 81 80 { 82 - return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata)); 81 + return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); 83 82 } 84 83 85 84 static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+8 -5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
··· 476 476 477 477 depth += sizeof(struct tcphdr); 478 478 479 - if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT)) 479 + if (unlikely(!sk)) 480 480 return; 481 + 482 + if (unlikely(sk->sk_state == TCP_TIME_WAIT)) 483 + goto unref; 481 484 482 485 if (unlikely(!resync_queue_get_psv(sk))) 483 - return; 484 - 485 - skb->sk = sk; 486 - skb->destructor = sock_edemux; 486 + goto unref; 487 487 488 488 seq = th->seq; 489 489 datalen = skb->len - depth; 490 490 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 491 491 rq->stats->tls_resync_req_start++; 492 + 493 + unref: 494 + sock_gen_put(sk); 492 495 } 493 496 494 497 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 5229 5229 5230 5230 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, 5231 5231 MLX5_FLOW_NAMESPACE_KERNEL); 5232 - if (IS_ERR(tc->ct)) 5232 + if (IS_ERR(tc->ct)) { 5233 + err = PTR_ERR(tc->ct); 5233 5234 goto err_ct; 5235 + } 5234 5236 5235 5237 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 5236 5238 err = register_netdevice_notifier_dev_net(priv->netdev,
+3
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
··· 283 283 284 284 reg_b = be32_to_cpu(cqe->ft_metadata); 285 285 286 + if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS)) 287 + return false; 288 + 286 289 chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK; 287 290 if (chain) 288 291 return true;
+6 -7
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 144 144 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); 145 145 } 146 146 147 - /* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */ 147 + /* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet), 148 + * need to set L3 checksum flag for IPsec 149 + */ 148 150 static void 149 151 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 150 152 struct mlx5_wqe_eth_seg *eseg) ··· 156 154 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; 157 155 sq->stats->csum_partial_inner++; 158 156 } else { 159 - eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 160 157 sq->stats->csum_partial++; 161 158 } 162 159 } ··· 163 162 static inline void 164 163 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) 165 164 { 166 - if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) { 167 - ipsec_txwqe_build_eseg_csum(sq, skb, eseg); 168 - return; 169 - } 170 - 171 165 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 172 166 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 173 167 if (skb->encapsulation) { ··· 173 177 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 174 178 sq->stats->csum_partial++; 175 179 } 180 + } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) { 181 + ipsec_txwqe_build_eseg_csum(sq, skb, eseg); 182 + 176 183 } else 177 184 sq->stats->csum_none++; 178 185 }
+13 -7
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1142 1142 struct mlx5_vport *vport; 1143 1143 1144 1144 vport = mlx5_eswitch_get_vport(esw, vport_num); 1145 + 1146 + if (!vport->qos.enabled) 1147 + return -EOPNOTSUPP; 1148 + 1145 1149 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); 1146 1150 1147 1151 return mlx5_modify_scheduling_element_cmd(esw->dev, ··· 1412 1408 int i; 1413 1409 1414 1410 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { 1411 + memset(&vport->qos, 0, sizeof(vport->qos)); 1415 1412 memset(&vport->info, 0, sizeof(vport->info)); 1416 1413 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 1417 1414 } ··· 2226 2221 max_guarantee = evport->info.min_rate; 2227 2222 } 2228 2223 2229 - return max_t(u32, max_guarantee / fw_max_bw_share, 1); 2224 + if (max_guarantee) 2225 + return max_t(u32, max_guarantee / fw_max_bw_share, 1); 2226 + return 0; 2230 2227 } 2231 2228 2232 - static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) 2229 + static int normalize_vports_min_rate(struct mlx5_eswitch *esw) 2233 2230 { 2234 2231 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); 2232 + u32 divider = calculate_vports_min_rate_divider(esw); 2235 2233 struct mlx5_vport *evport; 2236 2234 u32 vport_max_rate; 2237 2235 u32 vport_min_rate; ··· 2247 2239 continue; 2248 2240 vport_min_rate = evport->info.min_rate; 2249 2241 vport_max_rate = evport->info.max_rate; 2250 - bw_share = MLX5_MIN_BW_SHARE; 2242 + bw_share = 0; 2251 2243 2252 - if (vport_min_rate) 2244 + if (divider) 2253 2245 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, 2254 2246 divider, 2255 2247 fw_max_bw_share); ··· 2274 2266 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2275 2267 u32 fw_max_bw_share; 2276 2268 u32 previous_min_rate; 2277 - u32 divider; 2278 2269 bool min_rate_supported; 2279 2270 bool max_rate_supported; 2280 2271 int err = 0; ··· 2298 2291 2299 2292 previous_min_rate = evport->info.min_rate; 2300 2293 evport->info.min_rate = min_rate; 2301 - divider = calculate_vports_min_rate_divider(esw); 2302 - err = normalize_vports_min_rate(esw, divider); 2294 + err = normalize_vports_min_rate(esw); 2303 2295 if (err) { 2304 2296 evport->info.min_rate = previous_min_rate; 2305 2297 goto unlock;
+7
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 534 534 goto out; 535 535 } 536 536 537 + if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT && 538 + --fte->dests_size) { 539 + fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION); 540 + fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW; 541 + goto out; 542 + } 543 + 537 544 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && 538 545 --fte->dests_size) { 539 546 fte->modify_mask |=