Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2023-01-09' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-fixes-2023-01-09

+104 -49
+2 -11
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 2176 2176 return -EINVAL; 2177 2177 } 2178 2178 2179 - cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); 2180 - if (!cmd->stats) 2181 - return -ENOMEM; 2182 - 2183 2179 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); 2184 - if (!cmd->pool) { 2185 - err = -ENOMEM; 2186 - goto dma_pool_err; 2187 - } 2180 + if (!cmd->pool) 2181 + return -ENOMEM; 2188 2182 2189 2183 err = alloc_cmd_page(dev, cmd); 2190 2184 if (err) ··· 2262 2268 2263 2269 err_free_pool: 2264 2270 dma_pool_destroy(cmd->pool); 2265 - dma_pool_err: 2266 - kvfree(cmd->stats); 2267 2271 return err; 2268 2272 } 2269 2273 ··· 2274 2282 destroy_msg_cache(dev); 2275 2283 free_cmd_page(dev, cmd); 2276 2284 dma_pool_destroy(cmd->pool); 2277 - kvfree(cmd->stats); 2278 2285 } 2279 2286 2280 2287 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
-6
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
··· 34 34 return -EOPNOTSUPP; 35 35 } 36 36 37 - if (act->police.rate_pkt_ps) { 38 - NL_SET_ERR_MSG_MOD(extack, 39 - "QoS offload not support packets per second"); 40 - return -EOPNOTSUPP; 41 - } 42 - 43 37 return 0; 44 38 } 45 39
+1
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
··· 127 127 attr->counter = act_counter; 128 128 129 129 attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT; 130 + attr->inner_match_level = MLX5_MATCH_NONE; 130 131 attr->outer_match_level = MLX5_MATCH_NONE; 131 132 attr->chain = 0; 132 133 attr->prio = 0;
+2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
··· 88 88 struct udphdr *udp = (struct udphdr *)(buf); 89 89 struct vxlanhdr *vxh; 90 90 91 + if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) 92 + return -EOPNOTSUPP; 91 93 vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); 92 94 *ip_proto = IPPROTO_UDP; 93 95
+9 -10
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
··· 62 62 u32 enc_key_id; 63 63 u32 next_pn; 64 64 sci_t sci; 65 + ssci_t ssci; 65 66 salt_t salt; 66 67 67 68 struct rhash_head hash; ··· 359 358 struct mlx5_core_dev *mdev = priv->mdev; 360 359 struct mlx5_macsec_obj_attrs obj_attrs; 361 360 union mlx5e_macsec_rule *macsec_rule; 362 - struct macsec_key *key; 363 361 int err; 364 362 365 363 obj_attrs.next_pn = sa->next_pn; ··· 368 368 obj_attrs.aso_pdn = macsec->aso.pdn; 369 369 obj_attrs.epn_state = sa->epn_state; 370 370 371 - key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key; 372 - 373 371 if (sa->epn_state.epn_enabled) { 374 - obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) : 375 - cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci); 376 - 377 - memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt)); 372 + obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci); 373 + memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt)); 378 374 } 379 375 380 376 obj_attrs.replay_window = ctx->secy->replay_window; ··· 495 499 } 496 500 497 501 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key, 498 - const pn_t *next_pn_halves) 502 + const pn_t *next_pn_halves, ssci_t ssci) 499 503 { 500 504 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state; 501 505 506 + sa->ssci = ssci; 502 507 sa->salt = key->salt; 503 508 epn_state->epn_enabled = 1; 504 509 epn_state->epn_msb = next_pn_halves->upper; ··· 547 550 tx_sa->assoc_num = assoc_num; 548 551 549 552 if (secy->xpn) 550 - update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves); 553 + update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves, 554 + ctx_tx_sa->ssci); 551 555 552 556 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len, 553 557 MLX5_ACCEL_OBJ_MACSEC_KEY, ··· 943 945 rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id; 944 946 945 947 if (ctx->secy->xpn) 946 - update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves); 948 + update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves, 949 + ctx_rx_sa->ssci); 947 950 948 951 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len, 949 952 MLX5_ACCEL_OBJ_MACSEC_KEY,
+3
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4084 4084 struct mlx5e_vlan_table *vlan; 4085 4085 struct mlx5e_params *params; 4086 4086 4087 + if (!netif_device_present(netdev)) 4088 + return features; 4089 + 4087 4090 vlan = mlx5e_fs_get_vlan(priv->fs); 4088 4091 mutex_lock(&priv->state_lock); 4089 4092 params = &priv->channels.params;
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 191 191 if (err) { 192 192 netdev_warn(priv->netdev, "vport %d error %d reading stats\n", 193 193 rep->vport, err); 194 - return; 194 + goto out; 195 195 } 196 196 197 197 #define MLX5_GET_CTR(p, x) \ ··· 241 241 rep_stats->tx_vport_rdma_multicast_bytes = 242 242 MLX5_GET_CTR(out, received_ib_multicast.octets); 243 243 244 + out: 244 245 kvfree(out); 245 246 } 246 247
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 2419 2419 2420 2420 priv = mlx5i_epriv(netdev); 2421 2421 tstamp = &priv->tstamp; 2422 - stats = rq->stats; 2422 + stats = &priv->channel_stats[rq->ix]->rq; 2423 2423 2424 2424 flags_rqpn = be32_to_cpu(cqe->flags_rqpn); 2425 2425 g = (flags_rqpn >> 28) & 3;
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1301 1301 1302 1302 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1303 1303 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1304 - mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 1305 1304 if (err) 1306 1305 return err; 1307 1306 } ··· 1358 1359 } 1359 1360 mutex_unlock(&tc->t_lock); 1360 1361 1361 - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1362 + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1363 + mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 1362 1364 mlx5e_detach_mod_hdr(priv, flow); 1365 + } 1363 1366 1364 1367 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1365 1368 mlx5_fc_destroy(priv->mdev, attr->counter);
+1 -5
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 143 143 if (mlx5_esw_indir_table_decap_vport(attr)) 144 144 vport = mlx5_esw_indir_table_decap_vport(attr); 145 145 146 - if (attr && !attr->chain && esw_attr->int_port) 146 + if (!attr->chain && esw_attr && esw_attr->int_port) 147 147 metadata = 148 148 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 149 149 else ··· 4143 4143 } 4144 4144 4145 4145 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4146 - memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability), 4147 - MLX5_UN_SZ_BYTES(hca_cap_union)); 4148 4146 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1); 4149 4147 4150 4148 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport, ··· 4234 4236 } 4235 4237 4236 4238 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4237 - memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability), 4238 - MLX5_UN_SZ_BYTES(hca_cap_union)); 4239 4239 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable); 4240 4240 4241 4241 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+14 -2
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
··· 90 90 static int mlx5i_set_channels(struct net_device *dev, 91 91 struct ethtool_channels *ch) 92 92 { 93 - struct mlx5e_priv *priv = mlx5i_epriv(dev); 93 + struct mlx5i_priv *ipriv = netdev_priv(dev); 94 + struct mlx5e_priv *epriv = mlx5i_epriv(dev); 94 95 95 - return mlx5e_ethtool_set_channels(priv, ch); 96 + /* rtnl lock protects from race between this ethtool op and sub 97 + * interface ndo_init/uninit. 98 + */ 99 + ASSERT_RTNL(); 100 + if (ipriv->num_sub_interfaces > 0) { 101 + mlx5_core_warn(epriv->mdev, 102 + "can't change number of channels for interfaces with sub interfaces (%u)\n", 103 + ipriv->num_sub_interfaces); 104 + return -EINVAL; 105 + } 106 + 107 + return mlx5e_ethtool_set_channels(epriv, ch); 96 108 } 97 109 98 110 static void mlx5i_get_channels(struct net_device *dev,
+38
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 160 160 stats->tx_dropped = sstats->tx_queue_dropped; 161 161 } 162 162 163 + struct net_device *mlx5i_parent_get(struct net_device *netdev) 164 + { 165 + struct mlx5e_priv *priv = mlx5i_epriv(netdev); 166 + struct mlx5i_priv *ipriv, *parent_ipriv; 167 + struct net_device *parent_dev; 168 + int parent_ifindex; 169 + 170 + ipriv = priv->ppriv; 171 + 172 + parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev); 173 + parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex); 174 + if (!parent_dev) 175 + return NULL; 176 + 177 + parent_ipriv = netdev_priv(parent_dev); 178 + 179 + ASSERT_RTNL(); 180 + parent_ipriv->num_sub_interfaces++; 181 + 182 + ipriv->parent_dev = parent_dev; 183 + 184 + return parent_dev; 185 + } 186 + 187 + void mlx5i_parent_put(struct net_device *netdev) 188 + { 189 + struct mlx5e_priv *priv = mlx5i_epriv(netdev); 190 + struct mlx5i_priv *ipriv, *parent_ipriv; 191 + 192 + ipriv = priv->ppriv; 193 + parent_ipriv = netdev_priv(ipriv->parent_dev); 194 + 195 + ASSERT_RTNL(); 196 + parent_ipriv->num_sub_interfaces--; 197 + 198 + dev_put(ipriv->parent_dev); 199 + } 200 + 163 201 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) 164 202 { 165 203 struct mlx5_core_dev *mdev = priv->mdev;
+6
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
··· 54 54 struct rdma_netdev rn; /* keep this first */ 55 55 u32 qpn; 56 56 bool sub_interface; 57 + u32 num_sub_interfaces; 57 58 u32 qkey; 58 59 u16 pkey_index; 59 60 struct mlx5i_pkey_qpn_ht *qpn_htbl; 61 + struct net_device *parent_dev; 60 62 char *mlx5e_priv[]; 61 63 }; 62 64 ··· 118 116 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 119 117 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more); 120 118 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 119 + 120 + /* Reference management for child to parent interfaces. */ 121 + struct net_device *mlx5i_parent_get(struct net_device *netdev); 122 + void mlx5i_parent_put(struct net_device *netdev); 121 123 122 124 #endif /* CONFIG_MLX5_CORE_IPOIB */ 123 125 #endif /* __MLX5E_IPOB_H__ */
+13 -5
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
··· 158 158 struct mlx5e_priv *priv = mlx5i_epriv(dev); 159 159 struct mlx5i_priv *ipriv, *parent_ipriv; 160 160 struct net_device *parent_dev; 161 - int parent_ifindex; 162 161 163 162 ipriv = priv->ppriv; 164 163 165 - /* Get QPN to netdevice hash table from parent */ 166 - parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev); 167 - parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex); 164 + /* Link to parent */ 165 + parent_dev = mlx5i_parent_get(dev); 168 166 if (!parent_dev) { 169 167 mlx5_core_warn(priv->mdev, "failed to get parent device\n"); 170 168 return -EINVAL; 171 169 } 172 170 171 + if (dev->num_rx_queues < parent_dev->real_num_rx_queues) { 172 + mlx5_core_warn(priv->mdev, 173 + "failed to create child device with rx queues [%d] less than parent's [%d]\n", 174 + dev->num_rx_queues, 175 + parent_dev->real_num_rx_queues); 176 + mlx5i_parent_put(dev); 177 + return -EINVAL; 178 + } 179 + 180 + /* Get QPN to netdevice hash table from parent */ 173 181 parent_ipriv = netdev_priv(parent_dev); 174 182 ipriv->qpn_htbl = parent_ipriv->qpn_htbl; 175 - dev_put(parent_dev); 176 183 177 184 return mlx5i_dev_init(dev); 178 185 } ··· 191 184 192 185 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) 193 186 { 187 + mlx5i_parent_put(netdev); 194 188 return mlx5i_dev_cleanup(netdev); 195 189 } 196 190
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 681 681 static const struct ptp_clock_info mlx5_ptp_clock_info = { 682 682 .owner = THIS_MODULE, 683 683 .name = "mlx5_ptp", 684 - .max_adj = 100000000, 684 + .max_adj = 50000000, 685 685 .n_alarm = 0, 686 686 .n_ext_ts = 0, 687 687 .n_per_out = 0,
+7 -4
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 3 3 4 4 #include "dr_types.h" 5 5 6 + #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048) 7 + /* don't try to optimize STE allocation if the stack is too constaraining */ 8 + #define DR_RULE_MAX_STES_OPTIMIZED 0 9 + #else 6 10 #define DR_RULE_MAX_STES_OPTIMIZED 5 11 + #endif 7 12 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES) 8 13 9 14 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn, ··· 1223 1218 1224 1219 mlx5dr_domain_nic_unlock(nic_dmn); 1225 1220 1226 - if (unlikely(!hw_ste_arr_is_opt)) 1227 - kfree(hw_ste_arr); 1228 - 1229 - return 0; 1221 + goto out; 1230 1222 1231 1223 free_rule: 1232 1224 dr_rule_clean_rule_members(rule, nic_rule); ··· 1240 1238 free_hw_ste: 1241 1239 mlx5dr_domain_nic_unlock(nic_dmn); 1242 1240 1241 + out: 1243 1242 if (unlikely(!hw_ste_arr_is_opt)) 1244 1243 kfree(hw_ste_arr); 1245 1244
+1 -1
include/linux/mlx5/driver.h
··· 315 315 struct mlx5_cmd_debug dbg; 316 316 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; 317 317 int checksum_disabled; 318 - struct mlx5_cmd_stats *stats; 318 + struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; 319 319 }; 320 320 321 321 struct mlx5_cmd_mailbox {