Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2022-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-02-01

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.

Sorry about the long series, but I had to move the top two patches from
net-next to net to help avoiding a build break when kspp branch is merged
into linus-next on next merge window.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+102 -55
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 224 224 struct mlx5e_tx_wqe { 225 225 struct mlx5_wqe_ctrl_seg ctrl; 226 226 struct mlx5_wqe_eth_seg eth; 227 - struct mlx5_wqe_data_seg data[0]; 227 + struct mlx5_wqe_data_seg data[]; 228 228 }; 229 229 230 230 struct mlx5e_rx_wqe_ll { ··· 241 241 struct mlx5_wqe_umr_ctrl_seg uctrl; 242 242 struct mlx5_mkey_seg mkc; 243 243 union { 244 - struct mlx5_mtt inline_mtts[0]; 245 - struct mlx5_klm inline_klms[0]; 244 + DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); 245 + DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); 246 246 }; 247 247 }; 248 248
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
··· 570 570 571 571 static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw) 572 572 { 573 - *max_average_bw = div_u64(ceil, BYTES_IN_MBIT); 573 + /* Hardware treats 0 as "unlimited", set at least 1. */ 574 + *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1); 574 575 575 576 qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n", 576 577 ceil, *max_average_bw);
+14 -18
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
··· 183 183 184 184 static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) 185 185 { 186 - struct mlx5e_rep_priv *rpriv; 187 - struct mlx5e_priv *priv; 188 - 189 - /* A given netdev is not a representor or not a slave of LAG configuration */ 190 - if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev)) 191 - return false; 192 - 193 - priv = netdev_priv(netdev); 194 - rpriv = priv->ppriv; 195 - 196 - /* Egress acl forward to vport is supported only non-uplink representor */ 197 - return rpriv->rep->vport != MLX5_VPORT_UPLINK; 186 + return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev); 198 187 } 199 188 200 189 static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) ··· 198 209 u16 acl_vport_num; 199 210 u16 fwd_vport_num; 200 211 int err; 201 - 202 - if (!mlx5e_rep_is_lag_netdev(netdev)) 203 - return; 204 212 205 213 info = ptr; 206 214 lag_info = info->lower_state_info; ··· 252 266 struct net_device *lag_dev; 253 267 struct mlx5e_priv *priv; 254 268 255 - if (!mlx5e_rep_is_lag_netdev(netdev)) 256 - return; 257 - 258 269 priv = netdev_priv(netdev); 259 270 rpriv = priv->ppriv; 260 271 lag_dev = info->upper_dev; ··· 276 293 unsigned long event, void *ptr) 277 294 { 278 295 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 296 + struct mlx5e_rep_priv *rpriv; 297 + struct mlx5e_rep_bond *bond; 298 + struct mlx5e_priv *priv; 299 + 300 + if (!mlx5e_rep_is_lag_netdev(netdev)) 301 + return NOTIFY_DONE; 302 + 303 + bond = container_of(nb, struct mlx5e_rep_bond, nb); 304 + priv = netdev_priv(netdev); 305 + rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); 306 + /* Verify VF representor is on the same device of the bond handling the netevent. */ 307 + if (rpriv->uplink_priv.bond != bond) 308 + return NOTIFY_DONE; 279 309 280 310 switch (event) { 281 311 case NETDEV_CHANGELOWERSTATE:
+4 -2
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
··· 491 491 } 492 492 493 493 br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event; 494 - err = register_netdevice_notifier(&br_offloads->netdev_nb); 494 + err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); 495 495 if (err) { 496 496 esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n", 497 497 err); ··· 509 509 err_register_swdev: 510 510 destroy_workqueue(br_offloads->wq); 511 511 err_alloc_wq: 512 + rtnl_lock(); 512 513 mlx5_esw_bridge_cleanup(esw); 514 + rtnl_unlock(); 513 515 } 514 516 515 517 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) ··· 526 524 return; 527 525 528 526 cancel_delayed_work_sync(&br_offloads->update_work); 529 - unregister_netdevice_notifier(&br_offloads->netdev_nb); 527 + unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); 530 528 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); 531 529 unregister_switchdev_notifier(&br_offloads->nb); 532 530 destroy_workqueue(br_offloads->wq);
+5
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
··· 167 167 return pi; 168 168 } 169 169 170 + static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 171 + { 172 + return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); 173 + } 174 + 170 175 struct mlx5e_shampo_umr { 171 176 u16 len; 172 177 };
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
··· 341 341 342 342 /* copy the inline part if required */ 343 343 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { 344 - memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); 344 + memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); 345 345 eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); 346 + memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), 347 + MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start)); 346 348 dma_len -= MLX5E_XDP_MIN_INLINE; 347 349 dma_addr += MLX5E_XDP_MIN_INLINE; 348 350 dseg++;
+11 -2
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
··· 157 157 /* Tunnel mode */ 158 158 if (mode == XFRM_MODE_TUNNEL) { 159 159 eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; 160 - eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; 161 160 if (xo->proto == IPPROTO_IPV6) 162 161 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; 163 - if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP) 162 + 163 + switch (xo->inner_ipproto) { 164 + case IPPROTO_UDP: 164 165 eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; 166 + fallthrough; 167 + case IPPROTO_TCP: 168 + /* IP | ESP | IP | [TCP | UDP] */ 169 + eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; 170 + break; 171 + default: 172 + break; 173 + } 165 174 return; 166 175 } 167 176
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
··· 131 131 mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 132 132 struct mlx5_wqe_eth_seg *eseg) 133 133 { 134 - struct xfrm_offload *xo = xfrm_offload(skb); 134 + u8 inner_ipproto; 135 135 136 136 if (!mlx5e_ipsec_eseg_meta(eseg)) 137 137 return false; 138 138 139 139 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 140 - if (xo->inner_ipproto) { 141 - eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; 140 + inner_ipproto = xfrm_offload(skb)->inner_ipproto; 141 + if (inner_ipproto) { 142 + eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; 143 + if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) 144 + eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; 142 145 } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 143 146 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 144 147 sq->stats->csum_partial_inner++;
+19 -11
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 1117 1117 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, 1118 1118 struct tcphdr *skb_tcp_hd) 1119 1119 { 1120 - u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); 1120 + u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); 1121 1121 struct tcphdr *last_tcp_hd; 1122 1122 void *last_hd_addr; 1123 1123 ··· 1871 1871 return skb; 1872 1872 } 1873 1873 1874 - static void 1874 + static struct sk_buff * 1875 1875 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 1876 1876 struct mlx5_cqe64 *cqe, u16 header_index) 1877 1877 { ··· 1895 1895 skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); 1896 1896 1897 1897 if (unlikely(!skb)) 1898 - return; 1898 + return NULL; 1899 1899 1900 1900 /* queue up for recycling/reuse */ 1901 1901 page_ref_inc(head->page); ··· 1907 1907 ALIGN(head_size, sizeof(long))); 1908 1908 if (unlikely(!skb)) { 1909 1909 rq->stats->buff_alloc_err++; 1910 - return; 1910 + return NULL; 1911 1911 } 1912 1912 1913 1913 prefetchw(skb->data); ··· 1918 1918 skb->tail += head_size; 1919 1919 skb->len += head_size; 1920 1920 } 1921 - rq->hw_gro_data->skb = skb; 1922 - NAPI_GRO_CB(skb)->count = 1; 1923 - skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size; 1921 + return skb; 1924 1922 } 1925 1923 1926 1924 static void ··· 1971 1973 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) 1972 1974 { 1973 1975 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; 1974 - u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); 1976 + u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); 1975 1977 u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); 1976 1978 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); 1977 1979 u32 data_offset = wqe_offset & (PAGE_SIZE - 1); 1978 1980 u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); 1979 1981 u16 wqe_id = be16_to_cpu(cqe->wqe_id); 1980 1982 u32 page_idx = wqe_offset >> PAGE_SHIFT; 1983 + u16 head_size = cqe->shampo.header_size; 1981 1984 struct sk_buff **skb = &rq->hw_gro_data->skb; 1982 1985 bool flush = cqe->shampo.flush; 1983 1986 bool match = cqe->shampo.match; ··· 2010 2011 } 2011 2012 2012 2013 if (!*skb) { 2013 - mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); 2014 + if (likely(head_size)) 2015 + *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); 2016 + else 2017 + *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset, 2018 + page_idx); 2014 2019 if (unlikely(!*skb)) 2015 2020 goto free_hd_entry; 2021 + 2022 + NAPI_GRO_CB(*skb)->count = 1; 2023 + skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size; 2016 2024 } else { 2017 2025 NAPI_GRO_CB(*skb)->count++; 2018 2026 if (NAPI_GRO_CB(*skb)->count == 2 && ··· 2033 2027 } 2034 2028 } 2035 2029 2036 - di = &wi->umr.dma_info[page_idx]; 2037 - mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); 2030 + if (likely(head_size)) { 2031 + di = &wi->umr.dma_info[page_idx]; 2032 + mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); 2033 + } 2038 2034 2039 2035 mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); 2040 2036 if (flush)
+14 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1414 1414 if (err) 1415 1415 goto err_out; 1416 1416 1417 - if (!attr->chain && esw_attr->int_port) { 1417 + if (!attr->chain && esw_attr->int_port && 1418 + attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 1418 1419 /* If decap route device is internal port, change the 1419 1420 * source vport value in reg_c0 back to uplink just in 1420 1421 * case the rule performs goto chain > 0. If we have a miss ··· 3189 3188 if (!(actions & 3190 3189 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 3191 3190 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action"); 3191 + return false; 3192 + } 3193 + 3194 + if (!(~actions & 3195 + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { 3196 + NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); 3197 + return false; 3198 + } 3199 + 3200 + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && 3201 + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3202 + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); 3192 3203 return false; 3193 3204 } 3194 3205
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 208 208 int cpy1_sz = 2 * ETH_ALEN; 209 209 int cpy2_sz = ihs - cpy1_sz; 210 210 211 - memcpy(vhdr, skb->data, cpy1_sz); 211 + memcpy(&vhdr->addrs, skb->data, cpy1_sz); 212 212 vhdr->h_vlan_proto = skb->vlan_proto; 213 213 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); 214 214 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
+4
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
··· 1574 1574 { 1575 1575 struct mlx5_esw_bridge_offloads *br_offloads; 1576 1576 1577 + ASSERT_RTNL(); 1578 + 1577 1579 br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL); 1578 1580 if (!br_offloads) 1579 1581 return ERR_PTR(-ENOMEM); ··· 1591 1589 void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw) 1592 1590 { 1593 1591 struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads; 1592 + 1593 + ASSERT_RTNL(); 1594 1594 1595 1595 if (!br_offloads) 1596 1596 return;
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
··· 21 21 __field(unsigned int, used) 22 22 ), 23 23 TP_fast_assign( 24 - strncpy(__entry->dev_name, 24 + strscpy(__entry->dev_name, 25 25 netdev_name(fdb->dev), 26 26 IFNAMSIZ); 27 27 memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
··· 132 132 { 133 133 struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; 134 134 135 - del_timer(&fw_reset->timer); 135 + del_timer_sync(&fw_reset->timer); 136 136 } 137 137 138 138 static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
··· 121 121 122 122 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) 123 123 { 124 - if (!mlx5_chains_prios_supported(chains)) 125 - return 1; 126 - 127 124 if (mlx5_chains_ignore_flow_level_supported(chains)) 128 125 return UINT_MAX; 126 + 127 + if (!chains->dev->priv.eswitch || 128 + chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS) 129 + return 1; 129 130 130 131 /* We should get here only for eswitch case */ 131 132 return FDB_TC_MAX_PRIO; ··· 212 211 create_chain_restore(struct fs_chain *chain) 213 212 { 214 213 struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch; 215 - char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; 214 + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; 216 215 struct mlx5_fs_chains *chains = chain->chains; 217 216 enum mlx5e_tc_attr_to_reg chain_to_reg; 218 217 struct mlx5_modify_hdr *mod_hdr;
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 406 406 407 407 switch (module_id) { 408 408 case MLX5_MODULE_ID_SFP: 409 - mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); 409 + mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); 410 410 break; 411 411 case MLX5_MODULE_ID_QSFP: 412 412 case MLX5_MODULE_ID_QSFP_PLUS: 413 413 case MLX5_MODULE_ID_QSFP28: 414 - mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); 414 + mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); 415 415 break; 416 416 default: 417 417 mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); 418 418 return -EINVAL; 419 419 } 420 420 421 - if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH) 421 + if (offset + size > MLX5_EEPROM_PAGE_LENGTH) 422 422 /* Cross pages read, read until offset 256 in low page */ 423 - size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; 423 + size = MLX5_EEPROM_PAGE_LENGTH - offset; 424 424 425 425 query.size = size; 426 + query.offset = offset; 426 427 427 428 return mlx5_query_mcia(dev, &query, data); 428 429 }
+4 -2
include/linux/if_vlan.h
··· 46 46 * @h_vlan_encapsulated_proto: packet type ID or len 47 47 */ 48 48 struct vlan_ethhdr { 49 - unsigned char h_dest[ETH_ALEN]; 50 - unsigned char h_source[ETH_ALEN]; 49 + struct_group(addrs, 50 + unsigned char h_dest[ETH_ALEN]; 51 + unsigned char h_source[ETH_ALEN]; 52 + ); 51 53 __be16 h_vlan_proto; 52 54 __be16 h_vlan_TCI; 53 55 __be16 h_vlan_encapsulated_proto;