Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Check attr_mask during modify_qp

Each driver should check that it can support the provided attr_mask during
modify_qp. IB_USER_VERBS_EX_CMD_MODIFY_QP was being used to block
modify_qp_ex because the driver didn't check RATE_LIMIT.

Link: https://lore.kernel.org/r/6-v1-caa70ba3d1ab+1436e-ucmd_mask_jgg@nvidia.com
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

+53 -22
+1
drivers/infiniband/core/device.c
··· 639 639 BIT_ULL(IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL) | 640 640 BIT_ULL(IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 641 641 BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_CQ) | 642 + BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_QP) | 642 643 BIT_ULL(IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 643 644 BIT_ULL(IB_USER_VERBS_EX_CMD_QUERY_DEVICE); 644 645
+2 -6
drivers/infiniband/core/uverbs_cmd.c
··· 1906 1906 if (ret) 1907 1907 return ret; 1908 1908 1909 - if (cmd.base.attr_mask & 1910 - ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1)) 1909 + if (cmd.base.attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1911 1910 return -EOPNOTSUPP; 1912 1911 1913 1912 return modify_qp(attrs, &cmd); ··· 1928 1929 * Last bit is reserved for extending the attr_mask by 1929 1930 * using another field. 1930 1931 */ 1931 - BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1ULL << 31)); 1932 - 1933 - if (cmd.base.attr_mask & 1934 - ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1)) 1932 + if (cmd.base.attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT)) 1935 1933 return -EOPNOTSUPP; 1936 1934 1937 1935 ret = modify_qp(attrs, &cmd);
+3
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 1829 1829 unsigned int flags; 1830 1830 u8 nw_type; 1831 1831 1832 + if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1833 + return -EOPNOTSUPP; 1834 + 1832 1835 qp->qplib_qp.modify_flags = 0; 1833 1836 if (qp_attr_mask & IB_QP_STATE) { 1834 1837 curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
+3
drivers/infiniband/hw/cxgb4/qp.c
··· 2374 2374 2375 2375 pr_debug("ib_qp %p\n", ibqp); 2376 2376 2377 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 2378 + return -EOPNOTSUPP; 2379 + 2377 2380 /* iwarp does not support the RTR state */ 2378 2381 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) 2379 2382 attr_mask &= ~IB_QP_STATE;
+3
drivers/infiniband/hw/efa/efa_verbs.c
··· 917 917 enum ib_qp_state new_state; 918 918 int err; 919 919 920 + if (qp_attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 921 + return -EOPNOTSUPP; 922 + 920 923 if (udata->inlen && 921 924 !ib_is_udata_cleared(udata, 0, udata->inlen)) { 922 925 ibdev_dbg(&dev->ibdev,
+2
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
··· 3256 3256 enum ib_qp_state cur_state, 3257 3257 enum ib_qp_state new_state) 3258 3258 { 3259 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 3260 + return -EOPNOTSUPP; 3259 3261 3260 3262 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) 3261 3263 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
+3
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
··· 4757 4757 unsigned long rq_flag = 0; 4758 4758 int ret; 4759 4759 4760 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 4761 + return -EOPNOTSUPP; 4762 + 4760 4763 /* 4761 4764 * In v2 engine, software pass context and context mask to hardware 4762 4765 * when modifying qp. If software need modify some fields in context,
+3
drivers/infiniband/hw/i40iw/i40iw_verbs.c
··· 855 855 u32 err; 856 856 unsigned long flags; 857 857 858 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 859 + return -EOPNOTSUPP; 860 + 858 861 memset(&info, 0, sizeof(info)); 859 862 ctx_info = &iwqp->ctx_info; 860 863 iwarp_info = &iwqp->iwarp_info;
+3
drivers/infiniband/hw/mlx4/qp.c
··· 2787 2787 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 2788 2788 int ret; 2789 2789 2790 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 2791 + return -EOPNOTSUPP; 2792 + 2790 2793 ret = _mlx4_ib_modify_qp(ibqp, attr, attr_mask, udata); 2791 2794 2792 2795 if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) {
+1 -2
drivers/infiniband/hw/mlx5/main.c
··· 4144 4144 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 4145 4145 dev->ib_dev.uverbs_ex_cmd_mask |= 4146 4146 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 4147 - (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP) | 4148 - (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP); 4147 + (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 4149 4148 4150 4149 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 4151 4150 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
+3
drivers/infiniband/hw/mlx5/qp.c
··· 4247 4247 int err = -EINVAL; 4248 4248 int port; 4249 4249 4250 + if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT)) 4251 + return -EOPNOTSUPP; 4252 + 4250 4253 if (ibqp->rwq_ind_tbl) 4251 4254 return -ENOSYS; 4252 4255
+3
drivers/infiniband/hw/mthca/mthca_qp.c
··· 863 863 enum ib_qp_state cur_state, new_state; 864 864 int err = -EINVAL; 865 865 866 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 867 + return -EOPNOTSUPP; 868 + 866 869 mutex_lock(&qp->mutex); 867 870 if (attr_mask & IB_QP_CUR_STATE) { 868 871 cur_state = attr->cur_qp_state;
+3
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 1391 1391 struct ocrdma_dev *dev; 1392 1392 enum ib_qp_state old_qps, new_qps; 1393 1393 1394 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1395 + return -EOPNOTSUPP; 1396 + 1394 1397 qp = get_ocrdma_qp(ibqp); 1395 1398 dev = get_ocrdma_dev(ibqp->device); 1396 1399
+3
drivers/infiniband/hw/qedr/verbs.c
··· 2472 2472 "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask, 2473 2473 attr->qp_state); 2474 2474 2475 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 2476 + return -EOPNOTSUPP; 2477 + 2475 2478 old_qp_state = qedr_get_ibqp_state(qp->state); 2476 2479 if (attr_mask & IB_QP_STATE) 2477 2480 new_qp_state = attr->qp_state;
+3
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 557 557 int status; 558 558 usnic_dbg("\n"); 559 559 560 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 561 + return -EOPNOTSUPP; 562 + 560 563 qp_grp = to_uqp_grp(ibqp); 561 564 562 565 mutex_lock(&qp_grp->vf->pf->usdev_lock);
+3
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
··· 544 544 enum ib_qp_state cur_state, next_state; 545 545 int ret; 546 546 547 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 548 + return -EOPNOTSUPP; 549 + 547 550 /* Sanity checking. Should need lock here */ 548 551 mutex_lock(&qp->mutex); 549 552 cur_state = (attr_mask & IB_QP_CUR_STATE) ? attr->cur_qp_state :
+3
drivers/infiniband/sw/rdmavt/qp.c
··· 1469 1469 int pmtu = 0; /* for gcc warning only */ 1470 1470 int opa_ah; 1471 1471 1472 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 1473 + return -EOPNOTSUPP; 1474 + 1472 1475 spin_lock_irq(&qp->r_lock); 1473 1476 spin_lock(&qp->s_hlock); 1474 1477 spin_lock(&qp->s_lock);
+3
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 436 436 struct rxe_dev *rxe = to_rdev(ibqp->device); 437 437 struct rxe_qp *qp = to_rqp(ibqp); 438 438 439 + if (mask & ~IB_QP_ATTR_STANDARD_BITS) 440 + return -EOPNOTSUPP; 441 + 439 442 err = rxe_qp_chk_attr(rxe, qp, attr, mask); 440 443 if (err) 441 444 goto err1;
+3
drivers/infiniband/sw/siw/siw_verbs.c
··· 544 544 if (!attr_mask) 545 545 return 0; 546 546 547 + if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) 548 + return -EOPNOTSUPP; 549 + 547 550 memset(&new_attrs, 0, sizeof(new_attrs)); 548 551 549 552 if (attr_mask & IB_QP_ACCESS_FLAGS) {
+2
include/rdma/ib_verbs.h
··· 1234 1234 IB_QP_RESERVED3 = (1<<23), 1235 1235 IB_QP_RESERVED4 = (1<<24), 1236 1236 IB_QP_RATE_LIMIT = (1<<25), 1237 + 1238 + IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0), 1237 1239 }; 1238 1240 1239 1241 enum ib_qp_state {
-14
include/uapi/rdma/ib_user_verbs.h
··· 596 596 IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, 597 597 }; 598 598 599 - enum { 600 - /* 601 - * This value is equal to IB_QP_DEST_QPN. 602 - */ 603 - IB_USER_LEGACY_LAST_QP_ATTR_MASK = 1ULL << 20, 604 - }; 605 - 606 - enum { 607 - /* 608 - * This value is equal to IB_QP_RATE_LIMIT. 609 - */ 610 - IB_USER_LAST_QP_ATTR_MASK = 1ULL << 25, 611 - }; 612 - 613 599 struct ib_uverbs_ex_create_qp { 614 600 __aligned_u64 user_handle; 615 601 __u32 pd_handle;