Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/mlx5: Support set qp counter

Support bind a qp with counter. If counter is null then bind the qp to the
default counter. Different QP state has different operation:

- RESET: Set the counter field so that it will take effective during
RST2INIT change;
- RTS: Issue an RTS2RTS change to update the QP counter;
- Other: Set the counter field and mark the counter_pending flag, when QP
is moved to RTS state and this flag is set, then issue an RTS2RTS
modification to update the counter.

Signed-off-by: Mark Zhang <markz@mellanox.com>
Reviewed-by: Majd Dibbiny <majd@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Mark Zhang and committed by
Jason Gunthorpe
d14133dd 99fa331d

+81 -2
+6
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 439 439 u32 flags_en; 440 440 /* storage for qp sub type when core qp type is IB_QPT_DRIVER */ 441 441 enum ib_qp_type qp_sub_type; 442 + /* A flag to indicate if there's a new counter is configured 443 + * but not take effective 444 + */ 445 + u32 counter_pending; 442 446 }; 443 447 444 448 struct mlx5_ib_cq_buf { ··· 1472 1468 int bfregn_to_uar_index(struct mlx5_ib_dev *dev, 1473 1469 struct mlx5_bfreg_info *bfregi, u32 bfregn, 1474 1470 bool dyn_bfreg); 1471 + 1472 + int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter); 1475 1473 #endif /* MLX5_IB_H */
+74 -2
drivers/infiniband/hw/mlx5/qp.c
··· 34 34 #include <rdma/ib_umem.h> 35 35 #include <rdma/ib_cache.h> 36 36 #include <rdma/ib_user_verbs.h> 37 + #include <rdma/rdma_counter.h> 37 38 #include <linux/mlx5/fs.h> 38 39 #include "mlx5_ib.h" 39 40 #include "ib_rep.h" ··· 3381 3380 return tx_port_affinity; 3382 3381 } 3383 3382 3383 + static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, 3384 + struct rdma_counter *counter) 3385 + { 3386 + struct mlx5_ib_dev *dev = to_mdev(qp->device); 3387 + struct mlx5_ib_qp *mqp = to_mqp(qp); 3388 + struct mlx5_qp_context context = {}; 3389 + struct mlx5_ib_port *mibport = NULL; 3390 + struct mlx5_ib_qp_base *base; 3391 + u32 set_id; 3392 + 3393 + if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) 3394 + return 0; 3395 + 3396 + if (counter) { 3397 + set_id = counter->id; 3398 + } else { 3399 + mibport = &dev->port[mqp->port - 1]; 3400 + set_id = mibport->cnts.set_id; 3401 + } 3402 + 3403 + base = &mqp->trans_qp.base; 3404 + context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); 3405 + context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24); 3406 + return mlx5_core_qp_modify(dev->mdev, 3407 + MLX5_CMD_OP_RTS2RTS_QP, 3408 + MLX5_QP_OPTPAR_COUNTER_SET_ID, 3409 + &context, &base->mqp); 3410 + } 3411 + 3384 3412 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, 3385 3413 const struct ib_qp_attr *attr, int attr_mask, 3386 3414 enum ib_qp_state cur_state, ··· 3463 3433 struct mlx5_ib_port *mibport = NULL; 3464 3434 enum mlx5_qp_state mlx5_cur, mlx5_new; 3465 3435 enum mlx5_qp_optpar optpar; 3436 + u32 set_id = 0; 3466 3437 int mlx5_st; 3467 3438 int err; 3468 3439 u16 op; ··· 3626 3595 port_num = 0; 3627 3596 3628 3597 mibport = &dev->port[port_num]; 3598 + if (ibqp->counter) 3599 + set_id = ibqp->counter->id; 3600 + else 3601 + set_id = mibport->cnts.set_id; 3629 3602 context->qp_counter_set_usr_page |= 3630 - cpu_to_be32((u32)(mibport->cnts.set_id) << 24); 3603 + cpu_to_be32(set_id << 24); 3631 3604 } 3632 3605 3633 3606 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) ··· 3659 3624 3660 3625 raw_qp_param.operation = op; 3661 3626 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { 3662 - raw_qp_param.rq_q_ctr_id = mibport->cnts.set_id; 3627 + raw_qp_param.rq_q_ctr_id = set_id; 3663 3628 raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID; 3664 3629 } 3665 3630 ··· 3734 3699 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); 3735 3700 qp->db.db[MLX5_RCV_DBR] = 0; 3736 3701 qp->db.db[MLX5_SND_DBR] = 0; 3702 + } 3703 + 3704 + if ((new_state == IB_QPS_RTS) && qp->counter_pending) { 3705 + err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter); 3706 + if (!err) 3707 + qp->counter_pending = 0; 3737 3708 } 3738 3709 3739 3710 out: ··· 6475 6434 } 6476 6435 6477 6436 handle_drain_completion(cq, &rdrain, dev); 6437 + } 6438 + 6439 + /** 6440 + * Bind a qp to a counter. If @counter is NULL then bind the qp to 6441 + * the default counter 6442 + */ 6443 + int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) 6444 + { 6445 + struct mlx5_ib_qp *mqp = to_mqp(qp); 6446 + int err = 0; 6447 + 6448 + mutex_lock(&mqp->mutex); 6449 + if (mqp->state == IB_QPS_RESET) { 6450 + qp->counter = counter; 6451 + goto out; 6452 + } 6453 + 6454 + if (mqp->state == IB_QPS_RTS) { 6455 + err = __mlx5_ib_qp_set_counter(qp, counter); 6456 + if (!err) 6457 + qp->counter = counter; 6458 + 6459 + goto out; 6460 + } 6461 + 6462 + mqp->counter_pending = 1; 6463 + qp->counter = counter; 6464 + 6465 + out: 6466 + mutex_unlock(&mqp->mutex); 6467 + return err; 6478 6468 }
+1
include/linux/mlx5/qp.h
··· 71 71 MLX5_QP_OPTPAR_CQN_RCV = 1 << 19, 72 72 MLX5_QP_OPTPAR_DC_HS = 1 << 20, 73 73 MLX5_QP_OPTPAR_DC_KEY = 1 << 21, 74 + MLX5_QP_OPTPAR_COUNTER_SET_ID = 1 << 25, 74 75 }; 75 76 76 77 enum mlx5_qp_state {