Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-fixes-2020-02-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2020-02-18

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

For -stable v5.3
('net/mlx5: Fix sleep while atomic in mlx5_eswitch_get_vepa')

For -stable v5.4
('net/mlx5: DR, Fix matching on vport gvmi')
('net/mlx5e: Fix crash in recovery flow without devlink reporter')

For -stable v5.5
('net/mlx5e: Reset RQ doorbell counter before moving RQ state from RST to RDY')
('net/mlx5e: Don't clear the whole vf config when switching modes')
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+70 -29
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
··· 200 200 netdev_err(priv->netdev, err_str); 201 201 202 202 if (!reporter) 203 - return err_ctx->recover(&err_ctx->ctx); 203 + return err_ctx->recover(err_ctx->ctx); 204 204 205 205 return devlink_health_report(reporter, err_str, err_ctx); 206 206 }
+8
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
··· 179 179 } 180 180 } 181 181 182 + static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq) 183 + { 184 + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 185 + mlx5_wq_ll_reset(&rq->mpwqe.wq); 186 + else 187 + mlx5_wq_cyc_reset(&rq->wqe.wq); 188 + } 189 + 182 190 /* SW parser related functions */ 183 191 184 192 struct mlx5e_swp_spec {
+3
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 712 712 if (!in) 713 713 return -ENOMEM; 714 714 715 + if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) 716 + mlx5e_rqwq_reset(rq); 717 + 715 718 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); 716 719 717 720 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+8 -12
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 459 459 460 460 static int esw_legacy_enable(struct mlx5_eswitch *esw) 461 461 { 462 - int ret; 462 + struct mlx5_vport *vport; 463 + int ret, i; 463 464 464 465 ret = esw_create_legacy_table(esw); 465 466 if (ret) 466 467 return ret; 468 + 469 + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) 470 + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; 467 471 468 472 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS); 469 473 if (ret) ··· 2456 2452 2457 2453 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting) 2458 2454 { 2459 - int err = 0; 2460 - 2461 2455 if (!esw) 2462 2456 return -EOPNOTSUPP; 2463 2457 2464 2458 if (!ESW_ALLOWED(esw)) 2465 2459 return -EPERM; 2466 2460 2467 - mutex_lock(&esw->state_lock); 2468 - if (esw->mode != MLX5_ESWITCH_LEGACY) { 2469 - err = -EOPNOTSUPP; 2470 - goto out; 2471 - } 2461 + if (esw->mode != MLX5_ESWITCH_LEGACY) 2462 + return -EOPNOTSUPP; 2472 2463 2473 2464 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0; 2474 - 2475 - out: 2476 - mutex_unlock(&esw->state_lock); 2477 - return err; 2465 + return 0; 2478 2466 } 2479 2467 2480 2468 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 1172 1172 return -EINVAL; 1173 1173 } 1174 1174 1175 - mlx5_eswitch_disable(esw, true); 1175 + mlx5_eswitch_disable(esw, false); 1176 1176 mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); 1177 1177 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); 1178 1178 if (err) { ··· 2065 2065 { 2066 2066 int err, err1; 2067 2067 2068 - mlx5_eswitch_disable(esw, true); 2068 + mlx5_eswitch_disable(esw, false); 2069 2069 err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); 2070 2070 if (err) { 2071 2071 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
··· 35 35 static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024, 36 36 1 * 1024 * 1024, 37 37 64 * 1024, 38 - 4 * 1024, }; 38 + 128 }; 39 39 40 40 struct mlx5_esw_chains_priv { 41 41 struct rhashtable chains_ht;
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
··· 2307 2307 struct mlx5dr_cmd_vport_cap *vport_cap; 2308 2308 struct mlx5dr_domain *dmn = sb->dmn; 2309 2309 struct mlx5dr_cmd_caps *caps; 2310 + u8 *bit_mask = sb->bit_mask; 2310 2311 u8 *tag = hw_ste->tag; 2312 + bool source_gvmi_set; 2311 2313 2312 2314 DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn); 2313 2315 ··· 2330 2328 if (!vport_cap) 2331 2329 return -EINVAL; 2332 2330 2333 - if (vport_cap->vport_gvmi) 2331 + source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); 2332 + if (vport_cap->vport_gvmi && source_gvmi_set) 2334 2333 MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi); 2335 2334 2336 2335 misc->source_eswitch_owner_vhca_id = 0;
+7 -2
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
··· 66 66 struct mlx5_flow_table *next_ft) 67 67 { 68 68 struct mlx5dr_table *tbl; 69 + u32 flags; 69 70 int err; 70 71 71 72 if (mlx5_dr_is_fw_table(ft->flags)) 72 73 return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, 73 74 log_size, 74 75 next_ft); 76 + flags = ft->flags; 77 + /* turn off encap/decap if not supported for sw-str by fw */ 78 + if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported)) 79 + flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | 80 + MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 75 81 76 - tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, 77 - ft->level, ft->flags); 82 + tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags); 78 83 if (!tbl) { 79 84 mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); 80 85 return -EINVAL;
+30 -9
drivers/net/ethernet/mellanox/mlx5/core/wq.c
··· 94 94 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false); 95 95 } 96 96 97 + void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq) 98 + { 99 + wq->wqe_ctr = 0; 100 + wq->cur_sz = 0; 101 + mlx5_wq_cyc_update_db_record(wq); 102 + } 103 + 97 104 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 98 105 void *qpc, struct mlx5_wq_qp *wq, 99 106 struct mlx5_wq_ctrl *wq_ctrl) ··· 199 192 return err; 200 193 } 201 194 195 + static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq) 196 + { 197 + struct mlx5_wqe_srq_next_seg *next_seg; 198 + int i; 199 + 200 + for (i = 0; i < wq->fbc.sz_m1; i++) { 201 + next_seg = mlx5_wq_ll_get_wqe(wq, i); 202 + next_seg->next_wqe_index = cpu_to_be16(i + 1); 203 + } 204 + next_seg = mlx5_wq_ll_get_wqe(wq, i); 205 + wq->tail_next = &next_seg->next_wqe_index; 206 + } 207 + 202 208 int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 203 209 void *wqc, struct mlx5_wq_ll *wq, 204 210 struct mlx5_wq_ctrl *wq_ctrl) ··· 219 199 u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); 220 200 u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); 221 201 struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; 222 - struct mlx5_wqe_srq_next_seg *next_seg; 223 202 int err; 224 - int i; 225 203 226 204 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 227 205 if (err) { ··· 238 220 239 221 mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc); 240 222 241 - for (i = 0; i < fbc->sz_m1; i++) { 242 - next_seg = mlx5_wq_ll_get_wqe(wq, i); 243 - next_seg->next_wqe_index = cpu_to_be16(i + 1); 244 - } 245 - next_seg = mlx5_wq_ll_get_wqe(wq, i); 246 - wq->tail_next = &next_seg->next_wqe_index; 247 - 223 + mlx5_wq_ll_init_list(wq); 248 224 wq_ctrl->mdev = mdev; 249 225 250 226 return 0; ··· 247 235 mlx5_db_free(mdev, &wq_ctrl->db); 248 236 249 237 return err; 238 + } 239 + 240 + void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq) 241 + { 242 + wq->head = 0; 243 + wq->wqe_ctr = 0; 244 + wq->cur_sz = 0; 245 + mlx5_wq_ll_init_list(wq); 246 + mlx5_wq_ll_update_db_record(wq); 250 247 } 251 248 252 249 void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+2
drivers/net/ethernet/mellanox/mlx5/core/wq.h
··· 80 80 void *wqc, struct mlx5_wq_cyc *wq, 81 81 struct mlx5_wq_ctrl *wq_ctrl); 82 82 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides); 83 + void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq); 83 84 84 85 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 85 86 void *qpc, struct mlx5_wq_qp *wq, ··· 93 92 int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 94 93 void *wqc, struct mlx5_wq_ll *wq, 95 94 struct mlx5_wq_ctrl *wq_ctrl); 95 + void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq); 96 96 97 97 void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl); 98 98
+4 -1
include/linux/mlx5/mlx5_ifc.h
··· 688 688 u8 nic_rx_multi_path_tirs[0x1]; 689 689 u8 nic_rx_multi_path_tirs_fts[0x1]; 690 690 u8 allow_sniffer_and_nic_rx_shared_tir[0x1]; 691 - u8 reserved_at_3[0x1d]; 691 + u8 reserved_at_3[0x4]; 692 + u8 sw_owner_reformat_supported[0x1]; 693 + u8 reserved_at_8[0x18]; 694 + 692 695 u8 encap_general_header[0x1]; 693 696 u8 reserved_at_21[0xa]; 694 697 u8 log_max_packet_reformat_context[0x5];