Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

qedr: Add support for QP verbs

Add support for Queue Pair verbs which adds, deletes,
modifies and queries Queue Pairs.

Signed-off-by: Rajesh Borundia <rajesh.borundia@cavium.com>
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>

authored by

Ram Amrani and committed by
Doug Ledford
cecbcddf a7efd777

+1320 -1
+14 -1
drivers/infiniband/hw/qedr/main.c
··· 48 48 MODULE_LICENSE("Dual BSD/GPL"); 49 49 MODULE_VERSION(QEDR_MODULE_VERSION); 50 50 51 + #define QEDR_WQ_MULTIPLIER_DFT (3) 52 + 51 53 void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num, 52 54 enum ib_event_type type) 53 55 { ··· 96 94 QEDR_UVERBS(CREATE_CQ) | 97 95 QEDR_UVERBS(RESIZE_CQ) | 98 96 QEDR_UVERBS(DESTROY_CQ) | 99 - QEDR_UVERBS(REQ_NOTIFY_CQ); 97 + QEDR_UVERBS(REQ_NOTIFY_CQ) | 98 + QEDR_UVERBS(CREATE_QP) | 99 + QEDR_UVERBS(MODIFY_QP) | 100 + QEDR_UVERBS(QUERY_QP) | 101 + QEDR_UVERBS(DESTROY_QP); 100 102 101 103 dev->ibdev.phys_port_cnt = 1; 102 104 dev->ibdev.num_comp_vectors = dev->num_cnq; ··· 125 119 dev->ibdev.destroy_cq = qedr_destroy_cq; 126 120 dev->ibdev.resize_cq = qedr_resize_cq; 127 121 dev->ibdev.req_notify_cq = qedr_arm_cq; 122 + 123 + dev->ibdev.create_qp = qedr_create_qp; 124 + dev->ibdev.modify_qp = qedr_modify_qp; 125 + dev->ibdev.query_qp = qedr_query_qp; 126 + dev->ibdev.destroy_qp = qedr_destroy_qp; 128 127 129 128 dev->ibdev.query_pkey = qedr_query_pkey; 130 129 ··· 640 629 DP_ERR(dev, "not enough CNQ resources.\n"); 641 630 goto init_err; 642 631 } 632 + 633 + dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT; 643 634 644 635 qedr_pci_set_atomic(dev, pdev); 645 636
+125
drivers/infiniband/hw/qedr/qedr.h
··· 52 52 #define QEDR_MSG_MISC "MISC" 53 53 #define QEDR_MSG_CQ " CQ" 54 54 #define QEDR_MSG_MR " MR" 55 + #define QEDR_MSG_RQ " RQ" 56 + #define QEDR_MSG_SQ " SQ" 57 + #define QEDR_MSG_QP " QP" 55 58 56 59 #define QEDR_CQ_MAGIC_NUMBER (0x11223344) 57 60 ··· 146 143 u32 dp_module; 147 144 u8 dp_level; 148 145 u8 num_hwfns; 146 + uint wq_multiplier; 147 + 149 148 }; 150 149 151 150 #define QEDR_MAX_SQ_PBL (0x8000) ··· 277 272 struct list_head entry; 278 273 }; 279 274 275 + union db_prod32 { 276 + struct rdma_pwm_val16_data data; 277 + u32 raw; 278 + }; 279 + 280 + struct qedr_qp_hwq_info { 281 + /* WQE Elements */ 282 + struct qed_chain pbl; 283 + u64 p_phys_addr_tbl; 284 + u32 max_sges; 285 + 286 + /* WQE */ 287 + u16 prod; 288 + u16 cons; 289 + u16 wqe_cons; 290 + u16 max_wr; 291 + 292 + /* DB */ 293 + void __iomem *db; 294 + union db_prod32 db_data; 295 + }; 296 + 297 + #define QEDR_INC_SW_IDX(p_info, index) \ 298 + do { \ 299 + p_info->index = (p_info->index + 1) & \ 300 + qed_chain_get_capacity(p_info->pbl) \ 301 + } while (0) 302 + 303 + enum qedr_qp_err_bitmap { 304 + QEDR_QP_ERR_SQ_FULL = 1, 305 + QEDR_QP_ERR_RQ_FULL = 2, 306 + QEDR_QP_ERR_BAD_SR = 4, 307 + QEDR_QP_ERR_BAD_RR = 8, 308 + QEDR_QP_ERR_SQ_PBL_FULL = 16, 309 + QEDR_QP_ERR_RQ_PBL_FULL = 32, 310 + }; 311 + 312 + struct qedr_qp { 313 + struct ib_qp ibqp; /* must be first */ 314 + struct qedr_dev *dev; 315 + 316 + struct qedr_qp_hwq_info sq; 317 + struct qedr_qp_hwq_info rq; 318 + 319 + u32 max_inline_data; 320 + 321 + /* Lock for QP's */ 322 + spinlock_t q_lock; 323 + struct qedr_cq *sq_cq; 324 + struct qedr_cq *rq_cq; 325 + struct qedr_srq *srq; 326 + enum qed_roce_qp_state state; 327 + u32 id; 328 + struct qedr_pd *pd; 329 + enum ib_qp_type qp_type; 330 + struct qed_rdma_qp *qed_qp; 331 + u32 qp_id; 332 + u16 icid; 333 + u16 mtu; 334 + int sgid_idx; 335 + u32 rq_psn; 336 + u32 sq_psn; 337 + u32 qkey; 338 + u32 dest_qp_num; 339 + 340 + /* Relevant to qps created from kernel space only (ULPs) */ 341 + u8 prev_wqe_size; 342 + u16 wqe_cons; 343 + u32 err_bitmap; 344 + bool signaled; 345 + 346 + /* SQ shadow */ 347 + struct { 348 + u64 wr_id; 349 + enum ib_wc_opcode opcode; 350 + u32 bytes_len; 351 + u8 wqe_size; 352 + bool signaled; 353 + dma_addr_t icrc_mapping; 354 + u32 *icrc; 355 + struct qedr_mr *mr; 356 + } *wqe_wr_id; 357 + 358 + /* RQ shadow */ 359 + struct { 360 + u64 wr_id; 361 + struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE]; 362 + u8 wqe_size; 363 + 364 + u16 vlan_id; 365 + int rc; 366 + } *rqe_wr_id; 367 + 368 + /* Relevant to qps created from user space only (applications) */ 369 + struct qedr_userq usq; 370 + struct qedr_userq urq; 371 + }; 372 + 373 + static inline int qedr_get_dmac(struct qedr_dev *dev, 374 + struct ib_ah_attr *ah_attr, u8 *mac_addr) 375 + { 376 + union ib_gid zero_sgid = { { 0 } }; 377 + struct in6_addr in6; 378 + 379 + if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) { 380 + DP_ERR(dev, "Local port GID not supported\n"); 381 + eth_zero_addr(mac_addr); 382 + return -EINVAL; 383 + } 384 + 385 + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 386 + ether_addr_copy(mac_addr, ah_attr->dmac); 387 + 388 + return 0; 389 + } 390 + 280 391 static inline 281 392 struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext) 282 393 { ··· 414 293 return container_of(ibcq, struct qedr_cq, ibcq); 415 294 } 416 295 296 + static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp) 297 + { 298 + return container_of(ibqp, struct qedr_qp, ibqp); 299 + } 417 300 #endif
+40
drivers/infiniband/hw/qedr/qedr_cm.h
··· 1 + /* QLogic qedr NIC Driver 2 + * Copyright (c) 2015-2016 QLogic Corporation 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and /or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + #ifndef LINUX_QEDR_CM_H_ 33 + #define LINUX_QEDR_CM_H_ 34 + 35 + static inline u32 qedr_get_ipv4_from_gid(u8 *gid) 36 + { 37 + return *(u32 *)(void *)&gid[12]; 38 + } 39 + 40 + #endif
+11
drivers/infiniband/hw/qedr/qedr_hsi_rdma.h
··· 158 158 __le32 l_key; 159 159 }; 160 160 161 + /* Rdma doorbell data for SQ and RQ */ 162 + struct rdma_pwm_val16_data { 163 + __le16 icid; 164 + __le16 value; 165 + }; 166 + 167 + union rdma_pwm_val16_data_union { 168 + struct rdma_pwm_val16_data as_struct; 169 + __le32 as_dword; 170 + }; 171 + 161 172 /* Rdma doorbell data for CQ */ 162 173 struct rdma_pwm_val32_data { 163 174 __le16 icid;
+1089
drivers/infiniband/hw/qedr/verbs.c
··· 48 48 #include "qedr.h" 49 49 #include "verbs.h" 50 50 #include <rdma/qedr-abi.h> 51 + #include "qedr_cm.h" 51 52 52 53 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) 53 54 ··· 993 992 kfree(cq); 994 993 995 994 return 0; 995 + } 996 + 997 + static inline int get_gid_info_from_table(struct ib_qp *ibqp, 998 + struct ib_qp_attr *attr, 999 + int attr_mask, 1000 + struct qed_rdma_modify_qp_in_params 1001 + *qp_params) 1002 + { 1003 + enum rdma_network_type nw_type; 1004 + struct ib_gid_attr gid_attr; 1005 + union ib_gid gid; 1006 + u32 ipv4_addr; 1007 + int rc = 0; 1008 + int i; 1009 + 1010 + rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num, 1011 + attr->ah_attr.grh.sgid_index, &gid, &gid_attr); 1012 + if (rc) 1013 + return rc; 1014 + 1015 + if (!memcmp(&gid, &zgid, sizeof(gid))) 1016 + return -ENOENT; 1017 + 1018 + if (gid_attr.ndev) { 1019 + qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev); 1020 + 1021 + dev_put(gid_attr.ndev); 1022 + nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid); 1023 + switch (nw_type) { 1024 + case RDMA_NETWORK_IPV6: 1025 + memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], 1026 + sizeof(qp_params->sgid)); 1027 + memcpy(&qp_params->dgid.bytes[0], 1028 + &attr->ah_attr.grh.dgid, 1029 + sizeof(qp_params->dgid)); 1030 + qp_params->roce_mode = ROCE_V2_IPV6; 1031 + SET_FIELD(qp_params->modify_flags, 1032 + QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); 1033 + break; 1034 + case RDMA_NETWORK_IB: 1035 + memcpy(&qp_params->sgid.bytes[0], &gid.raw[0], 1036 + sizeof(qp_params->sgid)); 1037 + memcpy(&qp_params->dgid.bytes[0], 1038 + &attr->ah_attr.grh.dgid, 1039 + sizeof(qp_params->dgid)); 1040 + qp_params->roce_mode = ROCE_V1; 1041 + break; 1042 + case RDMA_NETWORK_IPV4: 1043 + memset(&qp_params->sgid, 0, sizeof(qp_params->sgid)); 1044 + memset(&qp_params->dgid, 0, sizeof(qp_params->dgid)); 1045 + ipv4_addr = qedr_get_ipv4_from_gid(gid.raw); 1046 + qp_params->sgid.ipv4_addr = ipv4_addr; 1047 + ipv4_addr = 1048 + qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw); 1049 + qp_params->dgid.ipv4_addr = ipv4_addr; 1050 + SET_FIELD(qp_params->modify_flags, 1051 + QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1); 1052 + qp_params->roce_mode = ROCE_V2_IPV4; 1053 + break; 1054 + } 1055 + } 1056 + 1057 + for (i = 0; i < 4; i++) { 1058 + qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]); 1059 + qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]); 1060 + } 1061 + 1062 + if (qp_params->vlan_id >= VLAN_CFI_MASK) 1063 + qp_params->vlan_id = 0; 1064 + 1065 + return 0; 1066 + } 1067 + 1068 + static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp) 1069 + { 1070 + qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl); 1071 + ib_umem_release(qp->usq.umem); 1072 + } 1073 + 1074 + static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp) 1075 + { 1076 + qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl); 1077 + ib_umem_release(qp->urq.umem); 1078 + } 1079 + 1080 + static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp) 1081 + { 1082 + dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); 1083 + kfree(qp->wqe_wr_id); 1084 + } 1085 + 1086 + static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp) 1087 + { 1088 + dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); 1089 + kfree(qp->rqe_wr_id); 1090 + } 1091 + 1092 + static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev, 1093 + struct ib_qp_init_attr *attrs) 1094 + { 1095 + struct qedr_device_attr *qattr = &dev->attr; 1096 + 1097 + /* QP0... attrs->qp_type == IB_QPT_GSI */ 1098 + if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) { 1099 + DP_DEBUG(dev, QEDR_MSG_QP, 1100 + "create qp: unsupported qp type=0x%x requested\n", 1101 + attrs->qp_type); 1102 + return -EINVAL; 1103 + } 1104 + 1105 + if (attrs->cap.max_send_wr > qattr->max_sqe) { 1106 + DP_ERR(dev, 1107 + "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n", 1108 + attrs->cap.max_send_wr, qattr->max_sqe); 1109 + return -EINVAL; 1110 + } 1111 + 1112 + if (attrs->cap.max_inline_data > qattr->max_inline) { 1113 + DP_ERR(dev, 1114 + "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n", 1115 + attrs->cap.max_inline_data, qattr->max_inline); 1116 + return -EINVAL; 1117 + } 1118 + 1119 + if (attrs->cap.max_send_sge > qattr->max_sge) { 1120 + DP_ERR(dev, 1121 + "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n", 1122 + attrs->cap.max_send_sge, qattr->max_sge); 1123 + return -EINVAL; 1124 + } 1125 + 1126 + if (attrs->cap.max_recv_sge > qattr->max_sge) { 1127 + DP_ERR(dev, 1128 + "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n", 1129 + attrs->cap.max_recv_sge, qattr->max_sge); 1130 + return -EINVAL; 1131 + } 1132 + 1133 + /* Unprivileged user space cannot create special QP */ 1134 + if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { 1135 + DP_ERR(dev, 1136 + "create qp: userspace can't create special QPs of type=0x%x\n", 1137 + attrs->qp_type); 1138 + return -EINVAL; 1139 + } 1140 + 1141 + return 0; 1142 + } 1143 + 1144 + static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp, 1145 + struct qedr_qp *qp) 1146 + { 1147 + uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); 1148 + uresp->rq_icid = qp->icid; 1149 + } 1150 + 1151 + static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp, 1152 + struct qedr_qp *qp) 1153 + { 1154 + uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); 1155 + uresp->sq_icid = qp->icid + 1; 1156 + } 1157 + 1158 + static int qedr_copy_qp_uresp(struct qedr_dev *dev, 1159 + struct qedr_qp *qp, struct ib_udata *udata) 1160 + { 1161 + struct qedr_create_qp_uresp uresp; 1162 + int rc; 1163 + 1164 + memset(&uresp, 0, sizeof(uresp)); 1165 + qedr_copy_sq_uresp(&uresp, qp); 1166 + qedr_copy_rq_uresp(&uresp, qp); 1167 + 1168 + uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; 1169 + uresp.qp_id = qp->qp_id; 1170 + 1171 + rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); 1172 + if (rc) 1173 + DP_ERR(dev, 1174 + "create qp: failed a copy to user space with qp icid=0x%x.\n", 1175 + qp->icid); 1176 + 1177 + return rc; 1178 + } 1179 + 1180 + static void qedr_set_qp_init_params(struct qedr_dev *dev, 1181 + struct qedr_qp *qp, 1182 + struct qedr_pd *pd, 1183 + struct ib_qp_init_attr *attrs) 1184 + { 1185 + qp->pd = pd; 1186 + 1187 + spin_lock_init(&qp->q_lock); 1188 + 1189 + qp->qp_type = attrs->qp_type; 1190 + qp->max_inline_data = attrs->cap.max_inline_data; 1191 + qp->sq.max_sges = attrs->cap.max_send_sge; 1192 + qp->state = QED_ROCE_QP_STATE_RESET; 1193 + qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false; 1194 + qp->sq_cq = get_qedr_cq(attrs->send_cq); 1195 + qp->rq_cq = get_qedr_cq(attrs->recv_cq); 1196 + qp->dev = dev; 1197 + 1198 + DP_DEBUG(dev, QEDR_MSG_QP, 1199 + "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n", 1200 + pd->pd_id, qp->qp_type, qp->max_inline_data, 1201 + qp->state, qp->signaled, (attrs->srq) ? 1 : 0); 1202 + DP_DEBUG(dev, QEDR_MSG_QP, 1203 + "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n", 1204 + qp->sq.max_sges, qp->sq_cq->icid); 1205 + qp->rq.max_sges = attrs->cap.max_recv_sge; 1206 + DP_DEBUG(dev, QEDR_MSG_QP, 1207 + "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n", 1208 + qp->rq.max_sges, qp->rq_cq->icid); 1209 + } 1210 + 1211 + static inline void 1212 + qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params, 1213 + struct qedr_create_qp_ureq *ureq) 1214 + { 1215 + /* QP handle to be written in CQE */ 1216 + params->qp_handle_lo = ureq->qp_handle_lo; 1217 + params->qp_handle_hi = ureq->qp_handle_hi; 1218 + } 1219 + 1220 + static inline void 1221 + qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp) 1222 + { 1223 + qp->sq.db = dev->db_addr + 1224 + DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD); 1225 + qp->sq.db_data.data.icid = qp->icid + 1; 1226 + } 1227 + 1228 + static inline void 1229 + qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp) 1230 + { 1231 + qp->rq.db = dev->db_addr + 1232 + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD); 1233 + qp->rq.db_data.data.icid = qp->icid; 1234 + } 1235 + 1236 + static inline int 1237 + qedr_init_qp_kernel_params_rq(struct qedr_dev *dev, 1238 + struct qedr_qp *qp, struct ib_qp_init_attr *attrs) 1239 + { 1240 + /* Allocate driver internal RQ array */ 1241 + qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id), 1242 + GFP_KERNEL); 1243 + if (!qp->rqe_wr_id) 1244 + return -ENOMEM; 1245 + 1246 + DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr); 1247 + 1248 + return 0; 1249 + } 1250 + 1251 + static inline int 1252 + qedr_init_qp_kernel_params_sq(struct qedr_dev *dev, 1253 + struct qedr_qp *qp, 1254 + struct ib_qp_init_attr *attrs, 1255 + struct qed_rdma_create_qp_in_params *params) 1256 + { 1257 + u32 temp_max_wr; 1258 + 1259 + /* Allocate driver internal SQ array */ 1260 + temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier; 1261 + temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe); 1262 + 1263 + /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */ 1264 + qp->sq.max_wr = (u16)temp_max_wr; 1265 + qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), 1266 + GFP_KERNEL); 1267 + if (!qp->wqe_wr_id) 1268 + return -ENOMEM; 1269 + 1270 + DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr); 1271 + 1272 + /* QP handle to be written in CQE */ 1273 + params->qp_handle_lo = lower_32_bits((uintptr_t)qp); 1274 + params->qp_handle_hi = upper_32_bits((uintptr_t)qp); 1275 + 1276 + return 0; 1277 + } 1278 + 1279 + static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev, 1280 + struct qedr_qp *qp, 1281 + struct ib_qp_init_attr *attrs) 1282 + { 1283 + u32 n_sq_elems, n_sq_entries; 1284 + int rc; 1285 + 1286 + /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in 1287 + * the ring. The ring should allow at least a single WR, even if the 1288 + * user requested none, due to allocation issues. 1289 + */ 1290 + n_sq_entries = attrs->cap.max_send_wr; 1291 + n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe); 1292 + n_sq_entries = max_t(u32, n_sq_entries, 1); 1293 + n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE; 1294 + rc = dev->ops->common->chain_alloc(dev->cdev, 1295 + QED_CHAIN_USE_TO_PRODUCE, 1296 + QED_CHAIN_MODE_PBL, 1297 + QED_CHAIN_CNT_TYPE_U32, 1298 + n_sq_elems, 1299 + QEDR_SQE_ELEMENT_SIZE, 1300 + &qp->sq.pbl); 1301 + if (rc) { 1302 + DP_ERR(dev, "failed to allocate QP %p SQ\n", qp); 1303 + return rc; 1304 + } 1305 + 1306 + DP_DEBUG(dev, QEDR_MSG_SQ, 1307 + "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n", 1308 + qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr, 1309 + n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc); 1310 + return 0; 1311 + } 1312 + 1313 + static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev, 1314 + struct qedr_qp *qp, 1315 + struct ib_qp_init_attr *attrs) 1316 + { 1317 + u32 n_rq_elems, n_rq_entries; 1318 + int rc; 1319 + 1320 + /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in 1321 + * the ring. There ring should allow at least a single WR, even if the 1322 + * user requested none, due to allocation issues. 1323 + */ 1324 + n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1); 1325 + n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE; 1326 + rc = dev->ops->common->chain_alloc(dev->cdev, 1327 + QED_CHAIN_USE_TO_CONSUME_PRODUCE, 1328 + QED_CHAIN_MODE_PBL, 1329 + QED_CHAIN_CNT_TYPE_U32, 1330 + n_rq_elems, 1331 + QEDR_RQE_ELEMENT_SIZE, 1332 + &qp->rq.pbl); 1333 + 1334 + if (rc) { 1335 + DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp); 1336 + return -ENOMEM; 1337 + } 1338 + 1339 + DP_DEBUG(dev, QEDR_MSG_RQ, 1340 + "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n", 1341 + qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr, 1342 + n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc); 1343 + 1344 + /* n_rq_entries < u16 so the casting is safe */ 1345 + qp->rq.max_wr = (u16)n_rq_entries; 1346 + 1347 + return 0; 1348 + } 1349 + 1350 + static inline void 1351 + qedr_init_qp_in_params_sq(struct qedr_dev *dev, 1352 + struct qedr_pd *pd, 1353 + struct qedr_qp *qp, 1354 + struct ib_qp_init_attr *attrs, 1355 + struct ib_udata *udata, 1356 + struct qed_rdma_create_qp_in_params *params) 1357 + { 1358 + /* QP handle to be written in an async event */ 1359 + params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp); 1360 + params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp); 1361 + 1362 + params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR); 1363 + params->fmr_and_reserved_lkey = !udata; 1364 + params->pd = pd->pd_id; 1365 + params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi; 1366 + params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid; 1367 + params->max_sq_sges = 0; 1368 + params->stats_queue = 0; 1369 + 1370 + if (udata) { 1371 + params->sq_num_pages = qp->usq.pbl_info.num_pbes; 1372 + params->sq_pbl_ptr = qp->usq.pbl_tbl->pa; 1373 + } else { 1374 + params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); 1375 + params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); 1376 + } 1377 + } 1378 + 1379 + static inline void 1380 + qedr_init_qp_in_params_rq(struct qedr_qp *qp, 1381 + struct ib_qp_init_attr *attrs, 1382 + struct ib_udata *udata, 1383 + struct qed_rdma_create_qp_in_params *params) 1384 + { 1385 + params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid; 1386 + params->srq_id = 0; 1387 + params->use_srq = false; 1388 + 1389 + if (udata) { 1390 + params->rq_num_pages = qp->urq.pbl_info.num_pbes; 1391 + params->rq_pbl_ptr = qp->urq.pbl_tbl->pa; 1392 + } else { 1393 + params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl); 1394 + params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl); 1395 + } 1396 + } 1397 + 1398 + static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp) 1399 + { 1400 + DP_DEBUG(dev, QEDR_MSG_QP, 1401 + "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n", 1402 + qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr, 1403 + qp->urq.buf_len); 1404 + } 1405 + 1406 + static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx, 1407 + struct qedr_dev *dev, 1408 + struct qedr_qp *qp, 1409 + struct qedr_create_qp_ureq *ureq) 1410 + { 1411 + int rc; 1412 + 1413 + /* SQ - read access only (0), dma sync not required (0) */ 1414 + rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr, 1415 + ureq->sq_len, 0, 0); 1416 + if (rc) 1417 + return rc; 1418 + 1419 + /* RQ - read access only (0), dma sync not required (0) */ 1420 + rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr, 1421 + ureq->rq_len, 0, 0); 1422 + 1423 + if (rc) 1424 + qedr_cleanup_user_sq(dev, qp); 1425 + return rc; 1426 + } 1427 + 1428 + static inline int 1429 + qedr_init_kernel_qp(struct qedr_dev *dev, 1430 + struct qedr_qp *qp, 1431 + struct ib_qp_init_attr *attrs, 1432 + struct qed_rdma_create_qp_in_params *params) 1433 + { 1434 + int rc; 1435 + 1436 + rc = qedr_init_qp_kernel_sq(dev, qp, attrs); 1437 + if (rc) { 1438 + DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp); 1439 + return rc; 1440 + } 1441 + 1442 + rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params); 1443 + if (rc) { 1444 + dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); 1445 + DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp); 1446 + return rc; 1447 + } 1448 + 1449 + rc = qedr_init_qp_kernel_rq(dev, qp, attrs); 1450 + if (rc) { 1451 + qedr_cleanup_kernel_sq(dev, qp); 1452 + DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp); 1453 + return rc; 1454 + } 1455 + 1456 + rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs); 1457 + if (rc) { 1458 + DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp); 1459 + qedr_cleanup_kernel_sq(dev, qp); 1460 + dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl); 1461 + return rc; 1462 + } 1463 + 1464 + return rc; 1465 + } 1466 + 1467 + struct ib_qp *qedr_create_qp(struct ib_pd *ibpd, 1468 + struct ib_qp_init_attr *attrs, 1469 + struct ib_udata *udata) 1470 + { 1471 + struct qedr_dev *dev = get_qedr_dev(ibpd->device); 1472 + struct qed_rdma_create_qp_out_params out_params; 1473 + struct qed_rdma_create_qp_in_params in_params; 1474 + struct qedr_pd *pd = get_qedr_pd(ibpd); 1475 + struct ib_ucontext *ib_ctx = NULL; 1476 + struct qedr_ucontext *ctx = NULL; 1477 + struct qedr_create_qp_ureq ureq; 1478 + struct qedr_qp *qp; 1479 + int rc = 0; 1480 + 1481 + DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", 1482 + udata ? "user library" : "kernel", pd); 1483 + 1484 + rc = qedr_check_qp_attrs(ibpd, dev, attrs); 1485 + if (rc) 1486 + return ERR_PTR(rc); 1487 + 1488 + qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1489 + if (!qp) 1490 + return ERR_PTR(-ENOMEM); 1491 + 1492 + if (attrs->srq) 1493 + return ERR_PTR(-EINVAL); 1494 + 1495 + DP_DEBUG(dev, QEDR_MSG_QP, 1496 + "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", 1497 + get_qedr_cq(attrs->send_cq), 1498 + get_qedr_cq(attrs->send_cq)->icid, 1499 + get_qedr_cq(attrs->recv_cq), 1500 + get_qedr_cq(attrs->recv_cq)->icid); 1501 + 1502 + qedr_set_qp_init_params(dev, qp, pd, attrs); 1503 + 1504 + memset(&in_params, 0, sizeof(in_params)); 1505 + 1506 + if (udata) { 1507 + if (!(udata && ibpd->uobject && ibpd->uobject->context)) 1508 + goto err0; 1509 + 1510 + ib_ctx = ibpd->uobject->context; 1511 + ctx = get_qedr_ucontext(ib_ctx); 1512 + 1513 + memset(&ureq, 0, sizeof(ureq)); 1514 + if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) { 1515 + DP_ERR(dev, 1516 + "create qp: problem copying data from user space\n"); 1517 + goto err0; 1518 + } 1519 + 1520 + rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq); 1521 + if (rc) 1522 + goto err0; 1523 + 1524 + qedr_init_qp_user_params(&in_params, &ureq); 1525 + } else { 1526 + rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params); 1527 + if (rc) 1528 + goto err0; 1529 + } 1530 + 1531 + qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params); 1532 + qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params); 1533 + 1534 + qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx, 1535 + &in_params, &out_params); 1536 + 1537 + if (!qp->qed_qp) 1538 + goto err1; 1539 + 1540 + qp->qp_id = out_params.qp_id; 1541 + qp->icid = out_params.icid; 1542 + qp->ibqp.qp_num = qp->qp_id; 1543 + 1544 + if (udata) { 1545 + rc = qedr_copy_qp_uresp(dev, qp, udata); 1546 + if (rc) 1547 + goto err2; 1548 + 1549 + qedr_qp_user_print(dev, qp); 1550 + } else { 1551 + qedr_init_qp_kernel_doorbell_sq(dev, qp); 1552 + qedr_init_qp_kernel_doorbell_rq(dev, qp); 1553 + } 1554 + 1555 + DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n", 1556 + udata ? "user" : "kernel", qp); 1557 + 1558 + return &qp->ibqp; 1559 + 1560 + err2: 1561 + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); 1562 + if (rc) 1563 + DP_ERR(dev, "create qp: fatal fault. rc=%d", rc); 1564 + err1: 1565 + if (udata) { 1566 + qedr_cleanup_user_sq(dev, qp); 1567 + qedr_cleanup_user_rq(dev, qp); 1568 + } else { 1569 + qedr_cleanup_kernel_sq(dev, qp); 1570 + qedr_cleanup_kernel_rq(dev, qp); 1571 + } 1572 + 1573 + err0: 1574 + kfree(qp); 1575 + 1576 + return ERR_PTR(-EFAULT); 1577 + } 1578 + 1579 + enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state) 1580 + { 1581 + switch (qp_state) { 1582 + case QED_ROCE_QP_STATE_RESET: 1583 + return IB_QPS_RESET; 1584 + case QED_ROCE_QP_STATE_INIT: 1585 + return IB_QPS_INIT; 1586 + case QED_ROCE_QP_STATE_RTR: 1587 + return IB_QPS_RTR; 1588 + case QED_ROCE_QP_STATE_RTS: 1589 + return IB_QPS_RTS; 1590 + case QED_ROCE_QP_STATE_SQD: 1591 + return IB_QPS_SQD; 1592 + case QED_ROCE_QP_STATE_ERR: 1593 + return IB_QPS_ERR; 1594 + case QED_ROCE_QP_STATE_SQE: 1595 + return IB_QPS_SQE; 1596 + } 1597 + return IB_QPS_ERR; 1598 + } 1599 + 1600 + enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state) 1601 + { 1602 + switch (qp_state) { 1603 + case IB_QPS_RESET: 1604 + return QED_ROCE_QP_STATE_RESET; 1605 + case IB_QPS_INIT: 1606 + return QED_ROCE_QP_STATE_INIT; 1607 + case IB_QPS_RTR: 1608 + return QED_ROCE_QP_STATE_RTR; 1609 + case IB_QPS_RTS: 1610 + return QED_ROCE_QP_STATE_RTS; 1611 + case IB_QPS_SQD: 1612 + return QED_ROCE_QP_STATE_SQD; 1613 + case IB_QPS_ERR: 1614 + return QED_ROCE_QP_STATE_ERR; 1615 + default: 1616 + return QED_ROCE_QP_STATE_ERR; 1617 + } 1618 + } 1619 + 1620 + static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph) 1621 + { 1622 + qed_chain_reset(&qph->pbl); 1623 + qph->prod = 0; 1624 + qph->cons = 0; 1625 + qph->wqe_cons = 0; 1626 + qph->db_data.data.value = cpu_to_le16(0); 1627 + } 1628 + 1629 + static int qedr_update_qp_state(struct qedr_dev *dev, 1630 + struct qedr_qp *qp, 1631 + enum qed_roce_qp_state new_state) 1632 + { 1633 + int status = 0; 1634 + 1635 + if (new_state == qp->state) 1636 + return 1; 1637 + 1638 + switch (qp->state) { 1639 + case QED_ROCE_QP_STATE_RESET: 1640 + switch (new_state) { 1641 + case QED_ROCE_QP_STATE_INIT: 1642 + qp->prev_wqe_size = 0; 1643 + qedr_reset_qp_hwq_info(&qp->sq); 1644 + qedr_reset_qp_hwq_info(&qp->rq); 1645 + break; 1646 + default: 1647 + status = -EINVAL; 1648 + break; 1649 + }; 1650 + break; 1651 + case QED_ROCE_QP_STATE_INIT: 1652 + switch (new_state) { 1653 + case QED_ROCE_QP_STATE_RTR: 1654 + /* Update doorbell (in case post_recv was 1655 + * done before move to RTR) 1656 + */ 1657 + wmb(); 1658 + writel(qp->rq.db_data.raw, qp->rq.db); 1659 + /* Make sure write takes effect */ 1660 + mmiowb(); 1661 + break; 1662 + case QED_ROCE_QP_STATE_ERR: 1663 + break; 1664 + default: 1665 + /* Invalid state change. */ 1666 + status = -EINVAL; 1667 + break; 1668 + }; 1669 + break; 1670 + case QED_ROCE_QP_STATE_RTR: 1671 + /* RTR->XXX */ 1672 + switch (new_state) { 1673 + case QED_ROCE_QP_STATE_RTS: 1674 + break; 1675 + case QED_ROCE_QP_STATE_ERR: 1676 + break; 1677 + default: 1678 + /* Invalid state change. */ 1679 + status = -EINVAL; 1680 + break; 1681 + }; 1682 + break; 1683 + case QED_ROCE_QP_STATE_RTS: 1684 + /* RTS->XXX */ 1685 + switch (new_state) { 1686 + case QED_ROCE_QP_STATE_SQD: 1687 + break; 1688 + case QED_ROCE_QP_STATE_ERR: 1689 + break; 1690 + default: 1691 + /* Invalid state change. */ 1692 + status = -EINVAL; 1693 + break; 1694 + }; 1695 + break; 1696 + case QED_ROCE_QP_STATE_SQD: 1697 + /* SQD->XXX */ 1698 + switch (new_state) { 1699 + case QED_ROCE_QP_STATE_RTS: 1700 + case QED_ROCE_QP_STATE_ERR: 1701 + break; 1702 + default: 1703 + /* Invalid state change. */ 1704 + status = -EINVAL; 1705 + break; 1706 + }; 1707 + break; 1708 + case QED_ROCE_QP_STATE_ERR: 1709 + /* ERR->XXX */ 1710 + switch (new_state) { 1711 + case QED_ROCE_QP_STATE_RESET: 1712 + break; 1713 + default: 1714 + status = -EINVAL; 1715 + break; 1716 + }; 1717 + break; 1718 + default: 1719 + status = -EINVAL; 1720 + break; 1721 + }; 1722 + 1723 + return status; 1724 + } 1725 + 1726 + int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 1727 + int attr_mask, struct ib_udata *udata) 1728 + { 1729 + struct qedr_qp *qp = get_qedr_qp(ibqp); 1730 + struct qed_rdma_modify_qp_in_params qp_params = { 0 }; 1731 + struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev); 1732 + enum ib_qp_state old_qp_state, new_qp_state; 1733 + int rc = 0; 1734 + 1735 + DP_DEBUG(dev, QEDR_MSG_QP, 1736 + "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask, 1737 + attr->qp_state); 1738 + 1739 + old_qp_state = qedr_get_ibqp_state(qp->state); 1740 + if (attr_mask & IB_QP_STATE) 1741 + new_qp_state = attr->qp_state; 1742 + else 1743 + new_qp_state = old_qp_state; 1744 + 1745 + if (!ib_modify_qp_is_ok 1746 + (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask, 1747 + IB_LINK_LAYER_ETHERNET)) { 1748 + DP_ERR(dev, 1749 + "modify qp: invalid attribute mask=0x%x specified for\n" 1750 + "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n", 1751 + attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state, 1752 + new_qp_state); 1753 + rc = -EINVAL; 1754 + goto err; 1755 + } 1756 + 1757 + /* Translate the masks... */ 1758 + if (attr_mask & IB_QP_STATE) { 1759 + SET_FIELD(qp_params.modify_flags, 1760 + QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1); 1761 + qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state); 1762 + } 1763 + 1764 + if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) 1765 + qp_params.sqd_async = true; 1766 + 1767 + if (attr_mask & IB_QP_PKEY_INDEX) { 1768 + SET_FIELD(qp_params.modify_flags, 1769 + QED_ROCE_MODIFY_QP_VALID_PKEY, 1); 1770 + if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) { 1771 + rc = -EINVAL; 1772 + goto err; 1773 + } 1774 + 1775 + qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT; 1776 + } 1777 + 1778 + if (attr_mask & IB_QP_QKEY) 1779 + qp->qkey = attr->qkey; 1780 + 1781 + if (attr_mask & IB_QP_ACCESS_FLAGS) { 1782 + SET_FIELD(qp_params.modify_flags, 1783 + QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1); 1784 + qp_params.incoming_rdma_read_en = attr->qp_access_flags & 1785 + IB_ACCESS_REMOTE_READ; 1786 + qp_params.incoming_rdma_write_en = attr->qp_access_flags & 1787 + IB_ACCESS_REMOTE_WRITE; 1788 + qp_params.incoming_atomic_en = attr->qp_access_flags & 1789 + IB_ACCESS_REMOTE_ATOMIC; 1790 + } 1791 + 1792 + if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) { 1793 + if (attr_mask & IB_QP_PATH_MTU) { 1794 + if (attr->path_mtu < IB_MTU_256 || 1795 + attr->path_mtu > IB_MTU_4096) { 1796 + pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n"); 1797 + rc = -EINVAL; 1798 + goto err; 1799 + } 1800 + qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu), 1801 + ib_mtu_enum_to_int(iboe_get_mtu 1802 + (dev->ndev->mtu))); 1803 + } 1804 + 1805 + if (!qp->mtu) { 1806 + qp->mtu = 1807 + ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); 1808 + pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu); 1809 + } 1810 + 1811 + SET_FIELD(qp_params.modify_flags, 1812 + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1); 1813 + 1814 + qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class; 1815 + qp_params.flow_label = attr->ah_attr.grh.flow_label; 1816 + qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit; 1817 + 1818 + qp->sgid_idx = attr->ah_attr.grh.sgid_index; 1819 + 1820 + rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params); 1821 + if (rc) { 1822 + DP_ERR(dev, 1823 + "modify qp: problems with GID index %d (rc=%d)\n", 1824 + attr->ah_attr.grh.sgid_index, rc); 1825 + return rc; 1826 + } 1827 + 1828 + rc = qedr_get_dmac(dev, &attr->ah_attr, 1829 + qp_params.remote_mac_addr); 1830 + if (rc) 1831 + return rc; 1832 + 1833 + qp_params.use_local_mac = true; 1834 + ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr); 1835 + 1836 + DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n", 1837 + qp_params.dgid.dwords[0], qp_params.dgid.dwords[1], 1838 + qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]); 1839 + DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n", 1840 + qp_params.sgid.dwords[0], qp_params.sgid.dwords[1], 1841 + qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]); 1842 + DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n", 1843 + qp_params.remote_mac_addr); 1844 + ; 1845 + 1846 + qp_params.mtu = qp->mtu; 1847 + qp_params.lb_indication = false; 1848 + } 1849 + 1850 + if (!qp_params.mtu) { 1851 + /* Stay with current MTU */ 1852 + if (qp->mtu) 1853 + qp_params.mtu = qp->mtu; 1854 + else 1855 + qp_params.mtu = 1856 + ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu)); 1857 + } 1858 + 1859 + if (attr_mask & IB_QP_TIMEOUT) { 1860 + SET_FIELD(qp_params.modify_flags, 1861 + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1); 1862 + 1863 + qp_params.ack_timeout = attr->timeout; 1864 + if (attr->timeout) { 1865 + u32 temp; 1866 + 1867 + temp = 4096 * (1UL << attr->timeout) / 1000 / 1000; 1868 + /* FW requires [msec] */ 1869 + qp_params.ack_timeout = temp; 1870 + } else { 1871 + /* Infinite */ 1872 + qp_params.ack_timeout = 0; 1873 + } 1874 + } 1875 + if (attr_mask & IB_QP_RETRY_CNT) { 1876 + SET_FIELD(qp_params.modify_flags, 1877 + QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1); 1878 + qp_params.retry_cnt = attr->retry_cnt; 1879 + } 1880 + 1881 + if (attr_mask & IB_QP_RNR_RETRY) { 1882 + SET_FIELD(qp_params.modify_flags, 1883 + QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1); 1884 + qp_params.rnr_retry_cnt = attr->rnr_retry; 1885 + } 1886 + 1887 + if (attr_mask & IB_QP_RQ_PSN) { 1888 + SET_FIELD(qp_params.modify_flags, 1889 + QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1); 1890 + qp_params.rq_psn = attr->rq_psn; 1891 + qp->rq_psn = attr->rq_psn; 1892 + } 1893 + 1894 + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 1895 + if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) { 1896 + rc = -EINVAL; 1897 + DP_ERR(dev, 1898 + "unsupported max_rd_atomic=%d, supported=%d\n", 1899 + attr->max_rd_atomic, 1900 + dev->attr.max_qp_req_rd_atomic_resc); 1901 + goto err; 1902 + } 1903 + 1904 + SET_FIELD(qp_params.modify_flags, 1905 + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1); 1906 + qp_params.max_rd_atomic_req = attr->max_rd_atomic; 1907 + } 1908 + 1909 + if (attr_mask & IB_QP_MIN_RNR_TIMER) { 1910 + SET_FIELD(qp_params.modify_flags, 1911 + QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1); 1912 + qp_params.min_rnr_nak_timer = attr->min_rnr_timer; 1913 + } 1914 + 1915 + if (attr_mask & IB_QP_SQ_PSN) { 1916 + SET_FIELD(qp_params.modify_flags, 1917 + QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1); 1918 + qp_params.sq_psn = attr->sq_psn; 1919 + qp->sq_psn = attr->sq_psn; 1920 + } 1921 + 1922 + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 1923 + if (attr->max_dest_rd_atomic > 1924 + dev->attr.max_qp_resp_rd_atomic_resc) { 1925 + DP_ERR(dev, 1926 + "unsupported max_dest_rd_atomic=%d, supported=%d\n", 1927 + attr->max_dest_rd_atomic, 1928 + dev->attr.max_qp_resp_rd_atomic_resc); 1929 + 1930 + rc = -EINVAL; 1931 + goto err; 1932 + } 1933 + 1934 + SET_FIELD(qp_params.modify_flags, 1935 + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1); 1936 + qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic; 1937 + } 1938 + 1939 + if (attr_mask & IB_QP_DEST_QPN) { 1940 + SET_FIELD(qp_params.modify_flags, 1941 + QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1); 1942 + 1943 + qp_params.dest_qp = attr->dest_qp_num; 1944 + qp->dest_qp_num = attr->dest_qp_num; 1945 + } 1946 + 1947 + if (qp->qp_type != IB_QPT_GSI) 1948 + rc = dev->ops->rdma_modify_qp(dev->rdma_ctx, 1949 + qp->qed_qp, &qp_params); 1950 + 1951 + if (attr_mask & IB_QP_STATE) { 1952 + if ((qp->qp_type != IB_QPT_GSI) && (!udata)) 1953 + qedr_update_qp_state(dev, qp, qp_params.new_state); 1954 + qp->state = qp_params.new_state; 1955 + } 1956 + 1957 + err: 1958 + return rc; 1959 + } 1960 + 1961 + static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params) 1962 + { 1963 + int ib_qp_acc_flags = 0; 1964 + 1965 + if (params->incoming_rdma_write_en) 1966 + ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE; 1967 + if (params->incoming_rdma_read_en) 1968 + ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ; 1969 + if (params->incoming_atomic_en) 1970 + ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC; 1971 + ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE; 1972 + return ib_qp_acc_flags; 1973 + } 1974 + 1975 + int qedr_query_qp(struct ib_qp *ibqp, 1976 + struct ib_qp_attr *qp_attr, 1977 + int attr_mask, struct ib_qp_init_attr *qp_init_attr) 1978 + { 1979 + struct qed_rdma_query_qp_out_params params; 1980 + struct qedr_qp *qp = get_qedr_qp(ibqp); 1981 + struct qedr_dev *dev = qp->dev; 1982 + int rc = 0; 1983 + 1984 + memset(&params, 0, sizeof(params)); 1985 + 1986 + rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params); 1987 + if (rc) 1988 + goto err; 1989 + 1990 + memset(qp_attr, 0, sizeof(*qp_attr)); 1991 + memset(qp_init_attr, 0, sizeof(*qp_init_attr)); 1992 + 1993 + qp_attr->qp_state = qedr_get_ibqp_state(params.state); 1994 + qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); 1995 + qp_attr->path_mtu = iboe_get_mtu(params.mtu); 1996 + qp_attr->path_mig_state = IB_MIG_MIGRATED; 1997 + qp_attr->rq_psn = params.rq_psn; 1998 + qp_attr->sq_psn = params.sq_psn; 1999 + qp_attr->dest_qp_num = params.dest_qp; 2000 + 2001 + qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params); 2002 + 2003 + qp_attr->cap.max_send_wr = qp->sq.max_wr; 2004 + qp_attr->cap.max_recv_wr = qp->rq.max_wr; 2005 + qp_attr->cap.max_send_sge = qp->sq.max_sges; 2006 + qp_attr->cap.max_recv_sge = qp->rq.max_sges; 2007 + qp_attr->cap.max_inline_data = qp->max_inline_data; 2008 + qp_init_attr->cap = qp_attr->cap; 2009 + 2010 + memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0], 2011 + sizeof(qp_attr->ah_attr.grh.dgid.raw)); 2012 + 2013 + qp_attr->ah_attr.grh.flow_label = params.flow_label; 2014 + qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx; 2015 + qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl; 2016 + qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos; 2017 + 2018 + qp_attr->ah_attr.ah_flags = IB_AH_GRH; 2019 + qp_attr->ah_attr.port_num = 1; 2020 + qp_attr->ah_attr.sl = 0; 2021 + qp_attr->timeout = params.timeout; 2022 + qp_attr->rnr_retry = params.rnr_retry; 2023 + qp_attr->retry_cnt = params.retry_cnt; 2024 + qp_attr->min_rnr_timer = params.min_rnr_nak_timer; 2025 + qp_attr->pkey_index = params.pkey_index; 2026 + qp_attr->port_num = 1; 2027 + qp_attr->ah_attr.src_path_bits = 0; 2028 + qp_attr->ah_attr.static_rate = 0; 2029 + qp_attr->alt_pkey_index = 0; 2030 + qp_attr->alt_port_num = 0; 2031 + qp_attr->alt_timeout = 0; 2032 + memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 2033 + 2034 + qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0; 2035 + qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic; 2036 + qp_attr->max_rd_atomic = params.max_rd_atomic; 2037 + qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0; 2038 + 2039 + DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n", 2040 + qp_attr->cap.max_inline_data); 2041 + 2042 + err: 2043 + return rc; 2044 + } 2045 + 2046 + int qedr_destroy_qp(struct ib_qp *ibqp) 2047 + { 2048 + struct qedr_qp *qp = get_qedr_qp(ibqp); 2049 + struct qedr_dev *dev = qp->dev; 2050 + struct ib_qp_attr attr; 2051 + int attr_mask = 0; 2052 + int rc = 0; 2053 + 2054 + DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n", 2055 + qp, qp->qp_type); 2056 + 2057 + if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR | 2058 + QED_ROCE_QP_STATE_INIT)) { 2059 + attr.qp_state = IB_QPS_ERR; 2060 + attr_mask |= IB_QP_STATE; 2061 + 2062 + /* Change the QP state to ERROR */ 2063 + qedr_modify_qp(ibqp, &attr, attr_mask, NULL); 2064 + } 2065 + 2066 + if (qp->qp_type != IB_QPT_GSI) { 2067 + rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp); 2068 + if (rc) 2069 + return rc; 2070 + } 2071 + 2072 + if (ibqp->uobject && ibqp->uobject->context) { 2073 + qedr_cleanup_user_sq(dev, qp); 2074 + qedr_cleanup_user_rq(dev, qp); 2075 + } else { 2076 + qedr_cleanup_kernel_sq(dev, qp); 2077 + qedr_cleanup_kernel_rq(dev, qp); 2078 + } 2079 + 2080 + kfree(qp); 2081 + 2082 + return rc; 996 2083 }
+7
drivers/infiniband/hw/qedr/verbs.h
··· 62 62 int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); 63 63 int qedr_destroy_cq(struct ib_cq *); 64 64 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); 65 + struct ib_qp *qedr_create_qp(struct ib_pd *, struct ib_qp_init_attr *attrs, 66 + struct ib_udata *); 67 + int qedr_modify_qp(struct ib_qp *, struct ib_qp_attr *attr, 68 + int attr_mask, struct ib_udata *udata); 69 + int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr, 70 + int qp_attr_mask, struct ib_qp_init_attr *); 71 + int qedr_destroy_qp(struct ib_qp *ibqp); 65 72 66 73 #endif
+34
include/uapi/rdma/qedr-abi.h
··· 69 69 __u16 icid; 70 70 }; 71 71 72 + struct qedr_create_qp_ureq { 73 + __u32 qp_handle_hi; 74 + __u32 qp_handle_lo; 75 + 76 + /* SQ */ 77 + /* user space virtual address of SQ buffer */ 78 + __u64 sq_addr; 79 + 80 + /* length of SQ buffer */ 81 + __u64 sq_len; 82 + 83 + /* RQ */ 84 + /* user space virtual address of RQ buffer */ 85 + __u64 rq_addr; 86 + 87 + /* length of RQ buffer */ 88 + __u64 rq_len; 89 + }; 90 + 91 + struct qedr_create_qp_uresp { 92 + __u32 qp_id; 93 + __u32 atomic_supported; 94 + 95 + /* SQ */ 96 + __u32 sq_db_offset; 97 + __u16 sq_icid; 98 + 99 + /* RQ */ 100 + __u32 rq_db_offset; 101 + __u16 rq_icid; 102 + 103 + __u32 rq_db2_offset; 104 + }; 105 + 72 106 #endif /* __QEDR_USER_H__ */